aboutsummaryrefslogtreecommitdiff
path: root/exec/java-exec/src
diff options
context:
space:
mode:
authorTimothy Farkas <timothyfarkas@apache.org>2018-05-08 11:11:33 -0700
committerSorabh Hamirwasia <sorabh@apache.org>2018-06-06 09:06:22 -0700
commite0c39e070bb696d2bc67f60f18559e5a547208ad (patch)
treebbe50bab85684144d6bf3cdca7c63aa1934667f1 /exec/java-exec/src
parentd05ce7fdf095976a98e66e32d7c27ee016aaf9fc (diff)
DRILL-6389: Fixed building javadocs
- Added documentation about how to build javadocs - Fixed some of the javadoc warnings closes #1276
Diffstat (limited to 'exec/java-exec/src')
-rw-r--r--exec/java-exec/src/main/java/org/apache/drill/exec/client/DrillClient.java18
-rw-r--r--exec/java-exec/src/main/java/org/apache/drill/exec/expr/CodeGenerator.java4
-rw-r--r--exec/java-exec/src/main/java/org/apache/drill/exec/expr/fn/FunctionLookupContext.java2
-rw-r--r--exec/java-exec/src/main/java/org/apache/drill/exec/expr/fn/ModifiedUnparseVisitor.java2
-rw-r--r--exec/java-exec/src/main/java/org/apache/drill/exec/ops/QueryContext.java8
-rw-r--r--exec/java-exec/src/main/java/org/apache/drill/exec/physical/base/AbstractExchange.java4
-rw-r--r--exec/java-exec/src/main/java/org/apache/drill/exec/physical/base/AbstractSingle.java1
-rw-r--r--exec/java-exec/src/main/java/org/apache/drill/exec/physical/base/Exchange.java8
-rw-r--r--exec/java-exec/src/main/java/org/apache/drill/exec/physical/base/LateralContract.java11
-rw-r--r--exec/java-exec/src/main/java/org/apache/drill/exec/physical/base/PhysicalOperator.java7
-rw-r--r--exec/java-exec/src/main/java/org/apache/drill/exec/physical/base/Sender.java2
-rw-r--r--exec/java-exec/src/main/java/org/apache/drill/exec/physical/base/Store.java6
-rw-r--r--exec/java-exec/src/main/java/org/apache/drill/exec/physical/impl/xsort/MSortTemplate.java2
-rw-r--r--exec/java-exec/src/main/java/org/apache/drill/exec/physical/impl/xsort/managed/MergeSortWrapper.java4
-rw-r--r--exec/java-exec/src/main/java/org/apache/drill/exec/physical/rowSet/model/TupleModel.java8
-rw-r--r--exec/java-exec/src/main/java/org/apache/drill/exec/physical/rowSet/model/single/VectorAllocator.java7
-rw-r--r--exec/java-exec/src/main/java/org/apache/drill/exec/planner/AbstractPartitionDescriptor.java4
-rw-r--r--exec/java-exec/src/main/java/org/apache/drill/exec/planner/DFSFilePartitionLocation.java4
-rw-r--r--exec/java-exec/src/main/java/org/apache/drill/exec/planner/PartitionDescriptor.java3
-rw-r--r--exec/java-exec/src/main/java/org/apache/drill/exec/planner/fragment/DistributionAffinity.java2
-rw-r--r--exec/java-exec/src/main/java/org/apache/drill/exec/planner/fragment/SimpleParallelizer.java4
-rw-r--r--exec/java-exec/src/main/java/org/apache/drill/exec/planner/fragment/Wrapper.java6
-rw-r--r--exec/java-exec/src/main/java/org/apache/drill/exec/planner/sql/handlers/SqlHandlerUtil.java4
-rw-r--r--exec/java-exec/src/main/java/org/apache/drill/exec/store/StoragePlugin.java4
-rw-r--r--exec/java-exec/src/main/java/org/apache/drill/exec/store/easy/text/compliant/TextParsingSettings.java2
-rw-r--r--exec/java-exec/src/main/java/org/apache/drill/exec/store/parquet/metadata/MetadataVersion.java4
-rw-r--r--exec/java-exec/src/main/java/org/apache/drill/exec/util/ImpersonationUtil.java5
-rw-r--r--exec/java-exec/src/main/java/org/apache/drill/exec/util/Utilities.java8
-rw-r--r--exec/java-exec/src/main/java/org/apache/drill/exec/vector/accessor/SqlAccessor.java6
-rw-r--r--exec/java-exec/src/test/java/org/apache/drill/exec/physical/unit/PhysicalOpUnitTestBase.java6
30 files changed, 72 insertions, 84 deletions
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/client/DrillClient.java b/exec/java-exec/src/main/java/org/apache/drill/exec/client/DrillClient.java
index f880b936c..ccd7e3636 100644
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/client/DrillClient.java
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/client/DrillClient.java
@@ -520,7 +520,7 @@ public class DrillClient implements Closeable, ConnectionThrottle {
*
* @return the server name, or null if not connected or if the server
* doesn't provide the name
- * @return
+ * @return The server name.
*/
public String getServerName() {
return (client != null && client.getServerInfos() != null) ? client.getServerInfos().getName() : null;
@@ -533,7 +533,7 @@ public class DrillClient implements Closeable, ConnectionThrottle {
*
* @return the server version, or null if not connected or if the server
* doesn't provide the version
- * @return
+ * @return The server version.
*/
public Version getServerVersion() {
return (client != null && client.getServerInfos() != null) ? UserRpcUtils.getVersion(client.getServerInfos()) : null;
@@ -588,7 +588,7 @@ public class DrillClient implements Closeable, ConnectionThrottle {
* @param type
* @param query
* @param isSplitPlan - option to tell whether to return single or split plans for a query
- * @return list of PlanFragments that can be used later on in {@link #runQuery(QueryType, List, UserResultsListener)}
+ * @return list of PlanFragments that can be used later on in {@link #runQuery(org.apache.drill.exec.proto.UserBitShared.QueryType, java.util.List, org.apache.drill.exec.rpc.user.UserResultsListener)}
* to run a query without additional planning
*/
public DrillRpcFuture<QueryPlanFragments> planQuery(QueryType type, String query, boolean isSplitPlan) {
@@ -663,7 +663,7 @@ public class DrillClient implements Closeable, ConnectionThrottle {
* Get the list of catalogs in <code>INFORMATION_SCHEMA.CATALOGS</code> table satisfying the given filters.
*
* @param catalogNameFilter Filter on <code>catalog name</code>. Pass null to apply no filter.
- * @return
+ * @return The list of catalogs in <code>INFORMATION_SCHEMA.CATALOGS</code> table satisfying the given filters.
*/
public DrillRpcFuture<GetCatalogsResp> getCatalogs(LikeFilter catalogNameFilter) {
final GetCatalogsReq.Builder reqBuilder = GetCatalogsReq.newBuilder();
@@ -679,7 +679,7 @@ public class DrillClient implements Closeable, ConnectionThrottle {
*
* @param catalogNameFilter Filter on <code>catalog name</code>. Pass null to apply no filter.
* @param schemaNameFilter Filter on <code>schema name</code>. Pass null to apply no filter.
- * @return
+ * @return The list of schemas in <code>INFORMATION_SCHEMA.SCHEMATA</code> table satisfying the given filters.
*/
public DrillRpcFuture<GetSchemasResp> getSchemas(LikeFilter catalogNameFilter, LikeFilter schemaNameFilter) {
final GetSchemasReq.Builder reqBuilder = GetSchemasReq.newBuilder();
@@ -701,7 +701,7 @@ public class DrillClient implements Closeable, ConnectionThrottle {
* @param schemaNameFilter Filter on <code>schema name</code>. Pass null to apply no filter.
* @param tableNameFilter Filter in <code>table name</code>. Pass null to apply no filter.
* @param tableTypeFilter Filter in <code>table type</code>. Pass null to apply no filter
- * @return
+ * @return The list of tables in <code>INFORMATION_SCHEMA.TABLES</code> table satisfying the given filters.
*/
public DrillRpcFuture<GetTablesResp> getTables(LikeFilter catalogNameFilter, LikeFilter schemaNameFilter,
LikeFilter tableNameFilter, List<String> tableTypeFilter) {
@@ -732,7 +732,7 @@ public class DrillClient implements Closeable, ConnectionThrottle {
* @param schemaNameFilter Filter on <code>schema name</code>. Pass null to apply no filter.
* @param tableNameFilter Filter in <code>table name</code>. Pass null to apply no filter.
* @param columnNameFilter Filter in <code>column name</code>. Pass null to apply no filter.
- * @return
+ * @return The list of columns in <code>INFORMATION_SCHEMA.COLUMNS</code> table satisfying the given filters.
*/
public DrillRpcFuture<GetColumnsResp> getColumns(LikeFilter catalogNameFilter, LikeFilter schemaNameFilter,
LikeFilter tableNameFilter, LikeFilter columnNameFilter) {
@@ -757,10 +757,10 @@ public class DrillClient implements Closeable, ConnectionThrottle {
}
/**
- * Create a prepared statement for given <code>query</code>.
+ * Create a prepared statement for given the <code>query</code>.
*
* @param query
- * @return
+ * @return The prepared statement for given the <code>query</code>.
*/
public DrillRpcFuture<CreatePreparedStatementResp> createPreparedStatement(final String query) {
final CreatePreparedStatementReq req =
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/expr/CodeGenerator.java b/exec/java-exec/src/main/java/org/apache/drill/exec/expr/CodeGenerator.java
index 480b2c2ea..76d68e22d 100644
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/expr/CodeGenerator.java
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/expr/CodeGenerator.java
@@ -46,7 +46,7 @@ import com.sun.codemodel.JDefinedClass;
* class without the normal byte-code manipulations. Plain Java allows
* the option to persist, and debug, the generated code when building new
* generated classes or otherwise working with generated code. To turn
- * on debugging, see the explanation in {@link ClassBuilder}.
+ * on debugging, see the explanation in {@link org.apache.drill.exec.compile.ClassBuilder}.
*
* @param <T>
* The interface that results from compiling and merging the runtime
@@ -149,7 +149,7 @@ public class CodeGenerator<T> {
/**
* Debug-time option to persist the code for the generated class to permit debugging.
* Has effect only when code is generated using the plain Java option. Code
- * is written to the code directory specified in {@link ClassBuilder}.
+ * is written to the code directory specified in {@link org.apache.drill.exec.compile.ClassBuilder}.
* To debug code, set this option, then point your IDE to the code directory
* when the IDE prompts you for the source code location.
*
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/expr/fn/FunctionLookupContext.java b/exec/java-exec/src/main/java/org/apache/drill/exec/expr/fn/FunctionLookupContext.java
index 944041e80..872c13780 100644
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/expr/fn/FunctionLookupContext.java
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/expr/fn/FunctionLookupContext.java
@@ -41,7 +41,7 @@ public interface FunctionLookupContext {
* implementations found on classpath.
*
* @param functionCall - Specifies function name and type of arguments
- * @return
+ * @return AbstractFuncHolder
*/
public AbstractFuncHolder findNonDrillFunction(FunctionCall functionCall);
}
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/expr/fn/ModifiedUnparseVisitor.java b/exec/java-exec/src/main/java/org/apache/drill/exec/expr/fn/ModifiedUnparseVisitor.java
index 58e3c3a1f..fe8349057 100644
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/expr/fn/ModifiedUnparseVisitor.java
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/expr/fn/ModifiedUnparseVisitor.java
@@ -43,7 +43,7 @@ public class ModifiedUnparseVisitor extends UnparseVisitor {
}
/**
- * Unparse the given {@link Java.CompilationUnit} to the given {@link Writer}.
+ * Unparse the given {@link org.codehaus.janino.Java.CompilationUnit} to the given {@link java.io.Writer}.
*/
public static void unparse(Java.CompilationUnit cu, Writer w) {
UnparseVisitor.unparse(cu, w);
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/ops/QueryContext.java b/exec/java-exec/src/main/java/org/apache/drill/exec/ops/QueryContext.java
index eb32bc650..07742f2e8 100644
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/ops/QueryContext.java
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/ops/QueryContext.java
@@ -163,10 +163,10 @@ public class QueryContext implements AutoCloseable, OptimizerRulesContext, Schem
}
/**
- * Create and return a SchemaTree with given <i>schemaConfig</i> but some schemas (from storage plugins)
+ * Create and return a {@link org.apache.calcite.schema.SchemaPlus} with given <i>schemaConfig</i> but some schemas (from storage plugins)
* could be initialized later.
* @param schemaConfig
- * @return
+ * @return A {@link org.apache.calcite.schema.SchemaPlus} with given <i>schemaConfig</i>.
*/
public SchemaPlus getRootSchema(SchemaConfig schemaConfig) {
return schemaTreeProvider.createRootSchema(schemaConfig);
@@ -174,7 +174,7 @@ public class QueryContext implements AutoCloseable, OptimizerRulesContext, Schem
/**
* Create and return a fully initialized SchemaTree with given <i>schemaConfig</i>.
* @param schemaConfig
- * @return
+ * @return A fully initialized SchemaTree with given <i>schemaConfig</i>.
*/
public SchemaPlus getFullRootSchema(SchemaConfig schemaConfig) {
@@ -182,7 +182,7 @@ public class QueryContext implements AutoCloseable, OptimizerRulesContext, Schem
}
/**
* Get the user name of the user who issued the query that is managed by this QueryContext.
- * @return
+ * @return The user name of the user who issued the query that is managed by this QueryContext.
*/
@Override
public String getQueryUserName() {
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/physical/base/AbstractExchange.java b/exec/java-exec/src/main/java/org/apache/drill/exec/physical/base/AbstractExchange.java
index 788516145..96bdbaa34 100644
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/physical/base/AbstractExchange.java
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/physical/base/AbstractExchange.java
@@ -55,7 +55,7 @@ public abstract class AbstractExchange extends AbstractSingle implements Exchang
/**
* Default sender parallelization width range is [1, Integer.MAX_VALUE] and no endpoint affinity
* @param receiverFragmentEndpoints Endpoints assigned to receiver fragment if available, otherwise an empty list.
- * @return
+ * @return Sender {@link org.apache.drill.exec.planner.fragment.ParallelizationInfo}.
*/
@Override
public ParallelizationInfo getSenderParallelizationInfo(List<DrillbitEndpoint> receiverFragmentEndpoints) {
@@ -66,7 +66,7 @@ public abstract class AbstractExchange extends AbstractSingle implements Exchang
* Default receiver parallelization width range is [1, Integer.MAX_VALUE] and affinity to nodes where sender
* fragments are running.
* @param senderFragmentEndpoints Endpoints assigned to receiver fragment if available, otherwise an empty list.
- * @return
+ * @return Receiver {@link org.apache.drill.exec.planner.fragment.ParallelizationInfo}.
*/
@Override
public ParallelizationInfo getReceiverParallelizationInfo(List<DrillbitEndpoint> senderFragmentEndpoints) {
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/physical/base/AbstractSingle.java b/exec/java-exec/src/main/java/org/apache/drill/exec/physical/base/AbstractSingle.java
index 1021465c3..9f3654e8e 100644
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/physical/base/AbstractSingle.java
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/physical/base/AbstractSingle.java
@@ -26,7 +26,6 @@ import com.google.common.collect.Iterators;
/**
* Describes an operator that expects a single child operator as its input.
- * @param <T> The type of Exec model supported.
*/
public abstract class AbstractSingle extends AbstractBase {
static final org.slf4j.Logger logger = org.slf4j.LoggerFactory.getLogger(AbstractSingle.class);
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/physical/base/Exchange.java b/exec/java-exec/src/main/java/org/apache/drill/exec/physical/base/Exchange.java
index 0aa3b70ab..56e9b9c90 100644
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/physical/base/Exchange.java
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/physical/base/Exchange.java
@@ -83,7 +83,7 @@ public interface Exchange extends PhysicalOperator {
* max width and affinity to Drillbits.
*
* @param receiverFragmentEndpoints Endpoints assigned to receiver fragment if available, otherwise an empty list.
- * @return
+ * @return Sender {@link org.apache.drill.exec.planner.fragment.ParallelizationInfo}.
*/
@JsonIgnore
ParallelizationInfo getSenderParallelizationInfo(List<DrillbitEndpoint> receiverFragmentEndpoints);
@@ -93,7 +93,7 @@ public interface Exchange extends PhysicalOperator {
* max width and affinity to Drillbits.
*
* @param senderFragmentEndpoints Endpoints assigned to receiver fragment if available, otherwise an empty list
- * @return
+ * @return Receiver {@link org.apache.drill.exec.planner.fragment.ParallelizationInfo}.
*/
@JsonIgnore
ParallelizationInfo getReceiverParallelizationInfo(List<DrillbitEndpoint> senderFragmentEndpoints);
@@ -101,7 +101,7 @@ public interface Exchange extends PhysicalOperator {
/**
* Return the feeding child of this operator node.
*
- * @return
+ * @return The feeding child of this operator node.
*/
PhysicalOperator getChild();
@@ -110,4 +110,4 @@ public interface Exchange extends PhysicalOperator {
*/
@JsonIgnore
ParallelizationDependency getParallelizationDependency();
-} \ No newline at end of file
+}
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/physical/base/LateralContract.java b/exec/java-exec/src/main/java/org/apache/drill/exec/physical/base/LateralContract.java
index 52e601e6f..2f6d20dd7 100644
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/physical/base/LateralContract.java
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/physical/base/LateralContract.java
@@ -27,19 +27,20 @@ import org.apache.drill.exec.record.RecordBatch.IterOutcome;
public interface LateralContract {
/**
- * Get reference to left side incoming of LateralJoinRecordBatch
- * @return
+ * Get reference to left side incoming of {@link org.apache.drill.exec.physical.impl.join.LateralJoinBatch}.
+ * @return The incoming {@link org.apache.drill.exec.record.RecordBatch}
*/
RecordBatch getIncoming();
/**
- * Get current record index in incoming to be processed
- * @return
+ * Get current record index in incoming to be processed.
+ * @return The current record index in incoming to be processed.
*/
int getRecordIndex();
/**
- * Get the current outcome of left incoming batch
+ * Get the current outcome of left incoming batch.
+ * @return The current outcome of left incoming batch.
*/
IterOutcome getLeftOutcome();
}
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/physical/base/PhysicalOperator.java b/exec/java-exec/src/main/java/org/apache/drill/exec/physical/base/PhysicalOperator.java
index 35138c85e..82fb53bf4 100644
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/physical/base/PhysicalOperator.java
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/physical/base/PhysicalOperator.java
@@ -43,16 +43,13 @@ public interface PhysicalOperator extends GraphValue<PhysicalOperator> {
* Describes whether or not a particular physical operator can actually be executed. Most physical operators can be
* executed. However, Exchange nodes cannot be executed. In order to be executed, they must be converted into their
* Exec sub components.
- *
- * @return
*/
@JsonIgnore
boolean isExecutable();
/**
* Describes the SelectionVector Mode for the output steam from this physical op.
- * This property is used during physical plan creating using {@link PhysicalPlanCreator}.
- * @return
+ * This property is used during physical plan creating using {@link org.apache.drill.exec.planner.physical.PhysicalPlanCreator}.
*/
@JsonIgnore
SelectionVectorMode getSVMode();
@@ -61,14 +58,12 @@ public interface PhysicalOperator extends GraphValue<PhysicalOperator> {
* Provides capability to build a set of output based on traversing a query graph tree.
*
* @param physicalVisitor
- * @return
*/
<T, X, E extends Throwable> T accept(PhysicalVisitor<T, X, E> physicalVisitor, X value) throws E;
/**
* Regenerate with this node with a new set of children. This is used in the case of materialization or optimization.
* @param children
- * @return
*/
@JsonIgnore
PhysicalOperator getNewWithChildren(List<PhysicalOperator> children) throws ExecutionSetupException;
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/physical/base/Sender.java b/exec/java-exec/src/main/java/org/apache/drill/exec/physical/base/Sender.java
index c39cc9b0c..340cbcef6 100644
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/physical/base/Sender.java
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/physical/base/Sender.java
@@ -38,7 +38,7 @@ public interface Sender extends FragmentRoot {
/**
* Get the receiver major fragment id that is opposite this sender.
- * @return
+ * @return The receiver major fragment id that is opposite this sender.
*/
@JsonProperty("receiver-major-fragment")
public int getOppositeMajorFragmentId();
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/physical/base/Store.java b/exec/java-exec/src/main/java/org/apache/drill/exec/physical/base/Store.java
index ffec99e74..24338e0bf 100644
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/physical/base/Store.java
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/physical/base/Store.java
@@ -61,14 +61,14 @@ public interface Store extends HasAffinity {
* maxWidth value of 1 will be returned. In the case that there is no limit for parallelization, this method should
* return Integer.MAX_VALUE.
*
- * @return
+ * @return The maximum allowable width for the Store operation.
*/
@JsonIgnore
public abstract int getMaxWidth();
/**
* Get the child of this store operator as this will be needed for parallelization materialization purposes.
- * @return
+ * @return The child of this store operator.
*/
public abstract PhysicalOperator getChild();
-} \ No newline at end of file
+}
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/physical/impl/xsort/MSortTemplate.java b/exec/java-exec/src/main/java/org/apache/drill/exec/physical/impl/xsort/MSortTemplate.java
index afbc58bb2..49746264b 100644
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/physical/impl/xsort/MSortTemplate.java
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/physical/impl/xsort/MSortTemplate.java
@@ -90,7 +90,7 @@ public abstract class MSortTemplate implements MSorter, IndexedSortable {
* ExternalSortBatch to make decisions about whether to spill or not.
*
* @param recordCount
- * @return
+ * @return The amount of memory MSorter needs for a given record count.
*/
public static long memoryNeeded(final int recordCount) {
// We need 4 bytes (SV4) for each record, power of 2 rounded.
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/physical/impl/xsort/managed/MergeSortWrapper.java b/exec/java-exec/src/main/java/org/apache/drill/exec/physical/impl/xsort/managed/MergeSortWrapper.java
index bca28f177..7ac00ea45 100644
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/physical/impl/xsort/managed/MergeSortWrapper.java
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/physical/impl/xsort/managed/MergeSortWrapper.java
@@ -97,9 +97,7 @@ public class MergeSortWrapper extends BaseSortWrapper implements SortResults {
* destination container, indexed by an SV4.
*
* @param batchGroups the complete set of in-memory batches
- * @param batch the record batch (operator) for the sort operator
- * @param destContainer the vector container for the sort operator
- * @return the sv4 for this operator
+ * @param outputBatchSize
*/
public void merge(List<BatchGroup.InputBatch> batchGroups, int outputBatchSize) {
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/physical/rowSet/model/TupleModel.java b/exec/java-exec/src/main/java/org/apache/drill/exec/physical/rowSet/model/TupleModel.java
index 767153ab7..28b1d937b 100644
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/physical/rowSet/model/TupleModel.java
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/physical/rowSet/model/TupleModel.java
@@ -46,11 +46,11 @@ import org.apache.drill.exec.record.metadata.TupleMetadata;
* <dt>Visitor</dt>
* <dd>The visitor abstraction (classic Gang-of-Four pattern) allows adding
* functionality without complicating the structure classes. Allows the same
- * abstraction to be used for the testing {@link RowSet} abstractions and
+ * abstraction to be used for the testing <b>RowSet</b> abstractions and
* the scan operator "loader" classes.</dd>
* <dt>Metadata</dt>
* <dd>Metadata is simply data about data. Here, data about tuples and columns.
- * The column metadata mostly expands on that available in {@link MaterializedField},
+ * The column metadata mostly expands on that available in {@link org.apache.drill.exec.record.MaterializedField},
* but also adds allocation hints.
* </dl>
* <p>
@@ -60,7 +60,7 @@ import org.apache.drill.exec.record.metadata.TupleMetadata;
* metadata and visitor behavior to allow much easier processing that is
* possible with the raw container structure.
* <p>
- * A key value of this abstraction is the extended {@link TupleSchema}
+ * A key value of this abstraction is the extended {@link org.apache.drill.exec.record.metadata.TupleSchema}
* associated with the structure. Unlike a
* {@link VectorContainer}, this abstraction keeps the schema in sync
* with vectors as columns are added.
@@ -72,7 +72,7 @@ import org.apache.drill.exec.record.metadata.TupleMetadata;
* <p>
* Tuples provide access to columns by both index and name. Both the schema and
* model classes follow this convention. Compared with the VectorContainer and
- * {@link AbstractMapVector} classes, the vector index is a first-class concept:
+ * {@link org.apache.drill.exec.vector.complex.AbstractMapVector} classes, the vector index is a first-class concept:
* the column model and schema are guaranteed to reside at the same index relative
* to the enclosing tuple. In addition, name access is efficient using a hash
* index.
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/physical/rowSet/model/single/VectorAllocator.java b/exec/java-exec/src/main/java/org/apache/drill/exec/physical/rowSet/model/single/VectorAllocator.java
index f4fc5d495..b9c064a31 100644
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/physical/rowSet/model/single/VectorAllocator.java
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/physical/rowSet/model/single/VectorAllocator.java
@@ -36,9 +36,10 @@ import org.apache.drill.exec.vector.complex.RepeatedMapVector;
* walk the schema tree to allocate new vectors according to a given
* row count and the size information provided in column metadata.
* <p>
- * @see {@link AllocationHelper} - the class which this one replaces
- * @see {@link org.apache.drill.exec.record.VectorInitializer} - an earlier cut at implementation
- * based on data from the {@link org.apache.drill.exec.record.RecordBatchSizer}
+ * {@link org.apache.drill.exec.vector.AllocationHelper} - the class which this one replaces
+ * {@link org.apache.drill.exec.record.VectorInitializer} - an earlier cut at implementation
+ * based on data from the {@link org.apache.drill.exec.record.RecordBatchSizer}
+ * </p>
*/
// TODO: Does not yet handle lists; lists are a simple extension
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/planner/AbstractPartitionDescriptor.java b/exec/java-exec/src/main/java/org/apache/drill/exec/planner/AbstractPartitionDescriptor.java
index 412841a21..b70c764e2 100644
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/planner/AbstractPartitionDescriptor.java
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/planner/AbstractPartitionDescriptor.java
@@ -31,7 +31,7 @@ public abstract class AbstractPartitionDescriptor implements PartitionDescriptor
/**
* A sequence of sublists of partition locations combined into a single super list.
- * The size of each sublist is at most {@link PartitionDescriptor.PARTITION_BATCH_SIZE}
+ * The size of each sublist is at most {@link PartitionDescriptor#PARTITION_BATCH_SIZE}
* For example if the size is 3, the complete list could be: {(a, b, c), {d, e, f), (g, h)}
*/
protected List<List<PartitionLocation>> locationSuperList;
@@ -42,7 +42,7 @@ public abstract class AbstractPartitionDescriptor implements PartitionDescriptor
/**
* Create sublists of the partition locations, each sublist of size
- * at most {@link PartitionDescriptor.PARTITION_BATCH_SIZE}
+ * at most {@link PartitionDescriptor#PARTITION_BATCH_SIZE}
*/
protected abstract void createPartitionSublists() ;
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/planner/DFSFilePartitionLocation.java b/exec/java-exec/src/main/java/org/apache/drill/exec/planner/DFSFilePartitionLocation.java
index 228ae6be8..ecfa6220d 100644
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/planner/DFSFilePartitionLocation.java
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/planner/DFSFilePartitionLocation.java
@@ -51,7 +51,7 @@ public class DFSFilePartitionLocation extends SimplePartitionLocation {
/**
* Returns the value for a give partition key
* @param index - Index of the partition key whose value is to be returned
- * @return
+ * @return The value for a partition.
*/
@Override
public String getPartitionValue(int index) {
@@ -61,7 +61,7 @@ public class DFSFilePartitionLocation extends SimplePartitionLocation {
/**
* Return the full location of this partition
- * @return
+ * @return The partition location.
*/
@Override
public String getEntirePartitionLocation() {
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/planner/PartitionDescriptor.java b/exec/java-exec/src/main/java/org/apache/drill/exec/planner/PartitionDescriptor.java
index b29f02e01..220bf291e 100644
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/planner/PartitionDescriptor.java
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/planner/PartitionDescriptor.java
@@ -70,7 +70,6 @@ public interface PartitionDescriptor extends Iterable<List<PartitionLocation>> {
* Method returns the Major type associated with the given column
* @param column - column whose type should be determined
* @param plannerSettings
- * @return
*/
TypeProtos.MajorType getVectorType(SchemaPath column, PlannerSettings plannerSettings);
@@ -78,7 +77,6 @@ public interface PartitionDescriptor extends Iterable<List<PartitionLocation>> {
* Methods create a new TableScan rel node, given the lists of new partitions or new files to SCAN.
* @param newPartitions
* @param wasAllPartitionsPruned
- * @return
* @throws Exception
*/
public TableScan createTableScan(List<PartitionLocation> newPartitions,
@@ -91,7 +89,6 @@ public interface PartitionDescriptor extends Iterable<List<PartitionLocation>> {
* @param cacheFileRoot
* @param wasAllPartitionsPruned
* @param metaContext
- * @return
* @throws Exception
*/
public TableScan createTableScan(List<PartitionLocation> newPartitions, String cacheFileRoot,
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/planner/fragment/DistributionAffinity.java b/exec/java-exec/src/main/java/org/apache/drill/exec/planner/fragment/DistributionAffinity.java
index 5ebea255f..b1ab9aab6 100644
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/planner/fragment/DistributionAffinity.java
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/planner/fragment/DistributionAffinity.java
@@ -55,7 +55,7 @@ public enum DistributionAffinity {
/**
* Is the current DistributionAffinity less restrictive than the given DistributionAffinity?
* @param distributionAffinity
- * @return
+ * @return True if the current DistributionAffinity less restrictive than the given DistributionAffinity. False otherwise.
*/
public boolean isLessRestrictiveThan(final DistributionAffinity distributionAffinity) {
return ordinal() < distributionAffinity.ordinal();
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/planner/fragment/SimpleParallelizer.java b/exec/java-exec/src/main/java/org/apache/drill/exec/planner/fragment/SimpleParallelizer.java
index 1ee9ea231..274db31eb 100644
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/planner/fragment/SimpleParallelizer.java
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/planner/fragment/SimpleParallelizer.java
@@ -140,7 +140,7 @@ public class SimpleParallelizer implements ParallelizationParameters {
* @param rootFragment
* @param session
* @param queryContextInfo
- * @return
+ * @return The {@link QueryWorkUnit}s.
* @throws ExecutionSetupException
*/
public List<QueryWorkUnit> getSplitFragments(OptionList options, DrillbitEndpoint foremanNode, QueryId queryId,
@@ -154,7 +154,7 @@ public class SimpleParallelizer implements ParallelizationParameters {
* Helper method to reuse the code for QueryWorkUnit(s) generation
* @param activeEndpoints
* @param rootFragment
- * @return
+ * @return A {@link PlanningSet}.
* @throws ExecutionSetupException
*/
protected PlanningSet getFragmentsHelper(Collection<DrillbitEndpoint> activeEndpoints, Fragment rootFragment) throws ExecutionSetupException {
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/planner/fragment/Wrapper.java b/exec/java-exec/src/main/java/org/apache/drill/exec/planner/fragment/Wrapper.java
index 3ff00ca6e..329a8a189 100644
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/planner/fragment/Wrapper.java
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/planner/fragment/Wrapper.java
@@ -184,15 +184,15 @@ public class Wrapper {
/**
* Is the endpoints assignment done for this fragment?
- * @return
+ * @return True if the endpoints assignment done for this fragment. False otherwise.
*/
public boolean isEndpointsAssignmentDone() {
return endpointsAssigned;
}
/**
- * Get the list of fragements this particular fragment depends for determining its
- * @return
+ * Get the list of fragements this particular fragment depends on.
+ * @return The list of fragements this particular fragment depends on.
*/
public List<Wrapper> getFragmentDependencies() {
return ImmutableList.copyOf(fragmentDependencies);
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/planner/sql/handlers/SqlHandlerUtil.java b/exec/java-exec/src/main/java/org/apache/drill/exec/planner/sql/handlers/SqlHandlerUtil.java
index 02f611481..b7c7ff380 100644
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/planner/sql/handlers/SqlHandlerUtil.java
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/planner/sql/handlers/SqlHandlerUtil.java
@@ -237,7 +237,7 @@ public class SqlHandlerUtil {
/**
* Drops table from schema.
* If drop has failed makes concurrency check: checks if table still exists.
- * If table exists, throws {@link @UserException} since drop was unsuccessful,
+ * If table exists, throws {@link org.apache.drill.common.exceptions.UserException} since drop was unsuccessful,
* otherwise assumes that other user had dropped the view and exists without error.
*
* @param drillSchema drill schema
@@ -256,7 +256,7 @@ public class SqlHandlerUtil {
/**
* Drops view from schema.
* If drop has failed makes concurrency check: checks if view still exists.
- * If view exists, throws {@link @UserException} since drop was unsuccessful,
+ * If view exists, throws {@link org.apache.drill.common.exceptions.UserException} since drop was unsuccessful,
* otherwise assumes that other user had dropped the view and exists without error.
*
* @param drillSchema drill schema
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/store/StoragePlugin.java b/exec/java-exec/src/main/java/org/apache/drill/exec/store/StoragePlugin.java
index 2969d4f43..50f273102 100644
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/store/StoragePlugin.java
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/store/StoragePlugin.java
@@ -54,7 +54,7 @@ public interface StoragePlugin extends SchemaFactory, AutoCloseable {
*
* @param userName User whom to impersonate when when reading the contents as part of Scan.
* @param selection The configured storage engine specific selection.
- * @return
+ * @return The physical scan operator for the particular GroupScan (read) node.
* @throws IOException
*/
public AbstractGroupScan getPhysicalScan(String userName, JSONOptions selection) throws IOException;
@@ -65,7 +65,7 @@ public interface StoragePlugin extends SchemaFactory, AutoCloseable {
* @param userName User whom to impersonate when when reading the contents as part of Scan.
* @param selection The configured storage engine specific selection.
* @param columns (optional) The list of column names to scan from the data source.
- * @return
+ * @return The physical scan operator for the particular GroupScan (read) node.
* @throws IOException
*/
public AbstractGroupScan getPhysicalScan(String userName, JSONOptions selection, List<SchemaPath> columns)
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/store/easy/text/compliant/TextParsingSettings.java b/exec/java-exec/src/main/java/org/apache/drill/exec/store/easy/text/compliant/TextParsingSettings.java
index a01a08214..4bc24bdd2 100644
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/store/easy/text/compliant/TextParsingSettings.java
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/store/easy/text/compliant/TextParsingSettings.java
@@ -209,7 +209,7 @@ public class TextParsingSettings {
/**
* Configures how to handle unescaped quotes inside quoted values. If set to {@code true}, the parser will parse the quote normally as part of the value.
- * If set the {@code false}, a {@link TextParsingException} will be thrown. Defaults to {@code true}.
+ * If set the {@code false}, a {@link com.univocity.parsers.common.TextParsingException} will be thrown. Defaults to {@code true}.
* @param parseUnescapedQuotes indicates whether or not the CSV parser should accept unescaped quotes inside quoted values.
*/
public void setParseUnescapedQuotes(boolean parseUnescapedQuotes) {
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/store/parquet/metadata/MetadataVersion.java b/exec/java-exec/src/main/java/org/apache/drill/exec/store/parquet/metadata/MetadataVersion.java
index 15b4b9da3..797208638 100644
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/store/parquet/metadata/MetadataVersion.java
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/store/parquet/metadata/MetadataVersion.java
@@ -106,7 +106,7 @@ public class MetadataVersion implements Comparable<MetadataVersion> {
/**
* Supported metadata versions.
* <p>
- * Note: keep them synchronized with {@link Metadata.ParquetTableMetadataBase} versions
+ * Note: keep them synchronized with {@link org.apache.drill.exec.store.parquet.metadata.MetadataBase.ParquetTableMetadataBase} versions
*/
public static class Constants {
/**
@@ -134,7 +134,7 @@ public class MetadataVersion implements Comparable<MetadataVersion> {
/**
* Version 3.2: An array with the components of the field name in
- * {@link Metadata.ColumnTypeMetadata_v3.Key} class is replaced by the SchemaPath.<br>
+ * {@link org.apache.drill.exec.store.parquet.metadata.Metadata_V3.ColumnTypeMetadata_v3.Key} class is replaced by the {@link org.apache.drill.common.expression.SchemaPath}.<br>
* See DRILL-4264
*/
public static final String V3_2 = "3.2";
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/util/ImpersonationUtil.java b/exec/java-exec/src/main/java/org/apache/drill/exec/util/ImpersonationUtil.java
index b74ce1aee..c9039e9b0 100644
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/util/ImpersonationUtil.java
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/util/ImpersonationUtil.java
@@ -110,7 +110,6 @@ public class ImpersonationUtil {
* @param opUserName Name of the user whom to impersonate while setting up the operator.
* @param queryUserName Name of the user who issues the query. If <i>opUserName</i> is invalid,
* then this parameter must be valid user name.
- * @return
*/
public static UserGroupInformation createProxyUgi(String opUserName, String queryUserName) {
if (!Strings.isNullOrEmpty(opUserName)) {
@@ -131,7 +130,6 @@ public class ImpersonationUtil {
* Create and return proxy user {@link org.apache.hadoop.security.UserGroupInformation} for give user name.
*
* @param proxyUserName Proxy user name (must be valid)
- * @return
*/
public static UserGroupInformation createProxyUgi(String proxyUserName) {
try {
@@ -202,7 +200,6 @@ public class ImpersonationUtil {
*
* @param proxyUserName Name of the user whom to impersonate while accessing the FileSystem contents.
* @param fsConf FileSystem configuration.
- * @return
*/
public static DrillFileSystem createFileSystem(String proxyUserName, Configuration fsConf) {
return createFileSystem(createProxyUgi(proxyUserName), fsConf, null);
@@ -234,7 +231,7 @@ public class ImpersonationUtil {
* @param userName User who is checked for administrative privileges.
* @param adminUsers Comma separated list of admin usernames,
* @param adminGroups Comma separated list of admin usergroups
- * @return
+ * @return True if the user has admin priveleges. False otherwise.
*/
public static boolean hasAdminPrivileges(final String userName, final String adminUsers, final String adminGroups) {
// Process user is by default an admin
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/util/Utilities.java b/exec/java-exec/src/main/java/org/apache/drill/exec/util/Utilities.java
index a165d9e32..f8fb8d77a 100644
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/util/Utilities.java
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/util/Utilities.java
@@ -56,11 +56,11 @@ public class Utilities {
}
/**
- * Create QueryContextInformation with given <i>defaultSchemaName</i>. Rest of the members of the
+ * Create {@link org.apache.drill.exec.proto.BitControl.QueryContextInformation} with given <i>defaultSchemaName</i>. Rest of the members of the
* QueryContextInformation is derived from the current state of the process.
*
* @param defaultSchemaName
- * @return
+ * @return A {@link org.apache.drill.exec.proto.BitControl.QueryContextInformation} with given <i>defaultSchemaName</i>.
*/
public static QueryContextInformation createQueryContextInfo(final String defaultSchemaName, final String sessionId) {
final long queryStartTime = System.currentTimeMillis();
@@ -75,7 +75,7 @@ public class Utilities {
/**
* Read the manifest file and get the Drill version number
- * @return
+ * @return The Drill version.
*/
public static String getDrillVersion() {
String v = Utilities.class.getPackage().getImplementationVersion();
@@ -85,7 +85,7 @@ public class Utilities {
/**
* Return true if list of schema path has star column.
* @param projected
- * @return
+ * @return True if the list of {@link org.apache.drill.common.expression.SchemaPath}s has star column.
*/
public static boolean isStarQuery(Collection<SchemaPath> projected) {
return Iterables.tryFind(Preconditions.checkNotNull(projected, COL_NULL_ERROR), new Predicate<SchemaPath>() {
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/vector/accessor/SqlAccessor.java b/exec/java-exec/src/main/java/org/apache/drill/exec/vector/accessor/SqlAccessor.java
index 70fb90eb4..50a257281 100644
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/vector/accessor/SqlAccessor.java
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/vector/accessor/SqlAccessor.java
@@ -43,12 +43,12 @@ import org.apache.drill.common.types.TypeProtos.MajorType;
* </p>
* <li>
* a get method that return primitive type throws an exception (callers are
- * responsible for calling {@link isNull} to check for null before calling
+ * responsible for calling {@link #isNull(int)} to check for null before calling
* such methods)
* </li>
* <li>
* a get method that returns a non-primitive type returns Java {@code null}
- * (the caller does not need to call {@link isNull} to check for nulls)
+ * (the caller does not need to call {@link #isNull(int)} to check for nulls)
* </li>
*/
public interface SqlAccessor {
@@ -129,4 +129,4 @@ public interface SqlAccessor {
/** (See {@link SqlAccessor class description}.) */
Object getObject(int rowOffset) throws InvalidAccessException;
-} \ No newline at end of file
+}
diff --git a/exec/java-exec/src/test/java/org/apache/drill/exec/physical/unit/PhysicalOpUnitTestBase.java b/exec/java-exec/src/test/java/org/apache/drill/exec/physical/unit/PhysicalOpUnitTestBase.java
index b8a219ba6..d0875ba1c 100644
--- a/exec/java-exec/src/test/java/org/apache/drill/exec/physical/unit/PhysicalOpUnitTestBase.java
+++ b/exec/java-exec/src/test/java/org/apache/drill/exec/physical/unit/PhysicalOpUnitTestBase.java
@@ -487,12 +487,12 @@ public class PhysicalOpUnitTestBase extends ExecTest {
}
/**
- * Create JSONRecordReader from input strings.
+ * Create {@link org.apache.drill.exec.store.easy.json.JSONRecordReader} from input strings.
* @param jsonBatches : list of input strings, each element represent a batch. Each string could either
* be in the form of "[{...}, {...}, ..., {...}]", or in the form of "{...}".
* @param fragContext : fragment context
* @param columnsToRead : list of schema paths to read from JSON reader.
- * @return
+ * @return The {@link org.apache.drill.exec.store.easy.json.JSONRecordReader} corresponding to each given jsonBatch.
*/
public static Iterator<RecordReader> getJsonReadersFromBatchString(List<String> jsonBatches, FragmentContext fragContext, List<SchemaPath> columnsToRead) {
ObjectMapper mapper = new ObjectMapper();
@@ -515,7 +515,7 @@ public class PhysicalOpUnitTestBase extends ExecTest {
* @param inputPaths : list of .json file paths.
* @param fragContext
* @param columnsToRead
- * @return
+ * @return The {@link org.apache.drill.exec.store.easy.json.JSONRecordReader} corresponding to each given input path.
*/
public static Iterator<RecordReader> getJsonReadersFromInputFiles(DrillFileSystem fs, List<String> inputPaths, FragmentContext fragContext, List<SchemaPath> columnsToRead) {
List<RecordReader> readers = new ArrayList<>();