Binary compatibility report for the succinct-0.1.2 library  between 1.3.0 and 1.0.0 versions   (relating to the portability of client application succinct-0.1.2.jar)

Test Info


Library Namesuccinct-0.1.2
Version #11.3.0
Version #21.0.0
Java Version1.7.0_75

Test Results


Total Java ARchives1
Total Methods / Classes2349 / 463
VerdictIncompatible
(83.9%)

Problem Summary


SeverityCount
Added Methods-115
Removed MethodsHigh1864
Problems with
Data Types
High151
Medium32
Low12
Problems with
Methods
High4
Medium0
Low0
Other Changes
in Data Types
-10

Added Methods (115)


spark-sql_2.10-1.0.0.jar, AddExchange.class
package org.apache.spark.sql.execution
AddExchange.logger ( ) [static]  :  com.typesafe.scalalogging.slf4j.Logger
AddExchange.ruleName ( ) [static]  :  String

spark-sql_2.10-1.0.0.jar, Aggregate.class
package org.apache.spark.sql.execution
Aggregate.Aggregate ( boolean partial, scala.collection.Seq<org.apache.spark.sql.catalyst.expressions.Expression> groupingExpressions, scala.collection.Seq<org.apache.spark.sql.catalyst.expressions.NamedExpression> aggregateExpressions, SparkPlan child, org.apache.spark.SparkContext sc )
Aggregate.copy ( boolean partial, scala.collection.Seq<org.apache.spark.sql.catalyst.expressions.Expression> groupingExpressions, scala.collection.Seq<org.apache.spark.sql.catalyst.expressions.NamedExpression> aggregateExpressions, SparkPlan child, org.apache.spark.SparkContext sc )  :  Aggregate
Aggregate.Aggregate..computedAggregates ( )  :  Aggregate.ComputedAggregate[ ]
Aggregate.Aggregate..computedSchema ( )  :  org.apache.spark.sql.catalyst.expressions.AttributeReference[ ]
Aggregate.Aggregate..namedGroups ( )  :  scala.collection.Seq<scala.Tuple2<org.apache.spark.sql.catalyst.expressions.Expression,org.apache.spark.sql.catalyst.expressions.Attribute>>
Aggregate.Aggregate..resultExpressions ( )  :  scala.collection.Seq<org.apache.spark.sql.catalyst.expressions.Expression>
Aggregate.Aggregate..resultMap ( )  :  scala.collection.immutable.Map<org.apache.spark.sql.catalyst.expressions.Expression,org.apache.spark.sql.catalyst.expressions.Attribute>
Aggregate.otherCopyArgs ( )  :  scala.collection.immutable.List<org.apache.spark.SparkContext>
Aggregate.otherCopyArgs ( )  :  scala.collection.Seq

spark-sql_2.10-1.0.0.jar, CatalystGroupConverter.class
package org.apache.spark.sql.parquet
CatalystGroupConverter.CatalystGroupConverter ( scala.collection.Seq<org.apache.spark.sql.catalyst.expressions.Attribute> schema )
CatalystGroupConverter.CatalystGroupConverter ( scala.collection.Seq<org.apache.spark.sql.catalyst.expressions.Attribute> schema, org.apache.spark.sql.catalyst.expressions.GenericMutableRow current )
CatalystGroupConverter.current ( )  :  org.apache.spark.sql.catalyst.expressions.GenericMutableRow
CatalystGroupConverter.getCurrentRecord ( )  :  org.apache.spark.sql.catalyst.expressions.GenericMutableRow

spark-sql_2.10-1.0.0.jar, CatalystPrimitiveConverter.class
package org.apache.spark.sql.parquet
CatalystPrimitiveConverter.CatalystPrimitiveConverter ( CatalystGroupConverter parent, int fieldIndex )

spark-sql_2.10-1.0.0.jar, ColumnBuilder.class
package org.apache.spark.sql.columnar
ColumnBuilder.appendFrom ( org.apache.spark.sql.catalyst.expressions.Row p1, int p2 ) [abstract]  :  void

spark-sql_2.10-1.0.0.jar, CompressionScheme.class
package org.apache.spark.sql.columnar.compression
CompressionScheme.encoder ( ) [abstract]  :  Encoder<T>

spark-sql_2.10-1.0.0.jar, Encoder<T>.class
package org.apache.spark.sql.columnar.compression
Encoder<T>.compress ( java.nio.ByteBuffer p1, java.nio.ByteBuffer p2, org.apache.spark.sql.columnar.NativeColumnType<T> p3 ) [abstract]  :  java.nio.ByteBuffer
Encoder<T>.gatherCompressibilityStats ( Object p1, org.apache.spark.sql.columnar.NativeColumnType<T> p2 ) [abstract]  :  void

spark-sql_2.10-1.0.0.jar, InMemoryColumnarTableScan.class
package org.apache.spark.sql.columnar
InMemoryColumnarTableScan.cachedColumnBuffers ( )  :  org.apache.spark.rdd.RDD<java.nio.ByteBuffer[ ]>
InMemoryColumnarTableScan.child ( )  :  org.apache.spark.sql.execution.SparkPlan
InMemoryColumnarTableScan.copy ( scala.collection.Seq<org.apache.spark.sql.catalyst.expressions.Attribute> attributes, org.apache.spark.sql.execution.SparkPlan child, boolean useCompression )  :  InMemoryColumnarTableScan
InMemoryColumnarTableScan.InMemoryColumnarTableScan ( scala.collection.Seq<org.apache.spark.sql.catalyst.expressions.Attribute> attributes, org.apache.spark.sql.execution.SparkPlan child, boolean useCompression )
InMemoryColumnarTableScan.useCompression ( )  :  boolean

spark-sql_2.10-1.0.0.jar, InsertIntoParquetTable.class
package org.apache.spark.sql.parquet
InsertIntoParquetTable.copy ( ParquetRelation relation, org.apache.spark.sql.execution.SparkPlan child, boolean overwrite, org.apache.spark.SparkContext sc )  :  InsertIntoParquetTable
InsertIntoParquetTable.InsertIntoParquetTable ( ParquetRelation relation, org.apache.spark.sql.execution.SparkPlan child, boolean overwrite, org.apache.spark.SparkContext sc )
InsertIntoParquetTable.otherCopyArgs ( )  :  scala.collection.immutable.List<org.apache.spark.SparkContext>
InsertIntoParquetTable.otherCopyArgs ( )  :  scala.collection.Seq
InsertIntoParquetTable.sc ( )  :  org.apache.spark.SparkContext

spark-sql_2.10-1.0.0.jar, IntColumnStats.class
package org.apache.spark.sql.columnar
IntColumnStats.ASCENDING ( ) [static]  :  int
IntColumnStats.contains ( org.apache.spark.sql.catalyst.expressions.Row row, int ordinal )  :  boolean
IntColumnStats.DESCENDING ( ) [static]  :  int
IntColumnStats.gatherStats ( org.apache.spark.sql.catalyst.expressions.Row row, int ordinal )  :  void
IntColumnStats.initialBounds ( )  :  scala.Tuple2<Object,Object>
IntColumnStats.INITIALIZED ( ) [static]  :  int
IntColumnStats.isAbove ( org.apache.spark.sql.catalyst.expressions.Row row, int ordinal )  :  boolean
IntColumnStats.isAscending ( )  :  boolean
IntColumnStats.isBelow ( org.apache.spark.sql.catalyst.expressions.Row row, int ordinal )  :  boolean
IntColumnStats.isDescending ( )  :  boolean
IntColumnStats.isOrdered ( )  :  boolean
IntColumnStats.maxDelta ( )  :  int
IntColumnStats.UNINITIALIZED ( ) [static]  :  int
IntColumnStats.UNORDERED ( ) [static]  :  int

spark-sql_2.10-1.0.0.jar, Limit.class
package org.apache.spark.sql.execution
Limit.copy ( int limit, SparkPlan child, org.apache.spark.SparkContext sc )  :  Limit
Limit.executeCollect ( )  :  org.apache.spark.sql.catalyst.expressions.Row[ ]
Limit.Limit ( int limit, SparkPlan child, org.apache.spark.SparkContext sc )
Limit.otherCopyArgs ( )  :  scala.collection.immutable.List<org.apache.spark.SparkContext>
Limit.otherCopyArgs ( )  :  scala.collection.Seq

spark-sql_2.10-1.0.0.jar, NativeColumnType<T>.class
package org.apache.spark.sql.columnar
NativeColumnType<T>.dataType ( )  :  T
NativeColumnType<T>.NativeColumnType ( T dataType, int typeId, int defaultSize )  :  public

spark-sql_2.10-1.0.0.jar, NullableColumnBuilder.class
package org.apache.spark.sql.columnar
NullableColumnBuilder.appendFrom ( org.apache.spark.sql.catalyst.expressions.Row p1, int p2 ) [abstract]  :  void
NullableColumnBuilder.NullableColumnBuilder..nullCount ( ) [abstract]  :  int
NullableColumnBuilder.NullableColumnBuilder..nullCount_.eq ( int p1 ) [abstract]  :  void
NullableColumnBuilder.NullableColumnBuilder..nulls ( ) [abstract]  :  java.nio.ByteBuffer
NullableColumnBuilder.NullableColumnBuilder..nulls_.eq ( java.nio.ByteBuffer p1 ) [abstract]  :  void
NullableColumnBuilder.NullableColumnBuilder..super.appendFrom ( org.apache.spark.sql.catalyst.expressions.Row p1, int p2 ) [abstract]  :  void

spark-sql_2.10-1.0.0.jar, ParquetRelation.class
package org.apache.spark.sql.parquet
ParquetRelation.copy ( String path )  :  ParquetRelation
ParquetRelation.create ( String p1, org.apache.spark.sql.catalyst.plans.logical.LogicalPlan p2, org.apache.hadoop.conf.Configuration p3 ) [static]  :  ParquetRelation
ParquetRelation.createEmpty ( String p1, scala.collection.Seq<org.apache.spark.sql.catalyst.expressions.Attribute> p2, boolean p3, org.apache.hadoop.conf.Configuration p4 ) [static]  :  ParquetRelation
ParquetRelation.defaultCompression ( ) [static]  :  parquet.hadoop.metadata.CompressionCodecName
ParquetRelation.ParquetRelation ( String path )

spark-sql_2.10-1.0.0.jar, ParquetTableScan.class
package org.apache.spark.sql.parquet
ParquetTableScan.columnPruningPred ( )  :  scala.Option<org.apache.spark.sql.catalyst.expressions.Expression>
ParquetTableScan.copy ( scala.collection.Seq<org.apache.spark.sql.catalyst.expressions.Attribute> output, ParquetRelation relation, scala.Option<org.apache.spark.sql.catalyst.expressions.Expression> columnPruningPred, org.apache.spark.SparkContext sc )  :  ParquetTableScan
ParquetTableScan.otherCopyArgs ( )  :  scala.collection.immutable.List<org.apache.spark.SparkContext>
ParquetTableScan.otherCopyArgs ( )  :  scala.collection.Seq
ParquetTableScan.ParquetTableScan ( scala.collection.Seq<org.apache.spark.sql.catalyst.expressions.Attribute> output, ParquetRelation relation, scala.Option<org.apache.spark.sql.catalyst.expressions.Expression> columnPruningPred, org.apache.spark.SparkContext sc )
ParquetTableScan.sc ( )  :  org.apache.spark.SparkContext

spark-sql_2.10-1.0.0.jar, RowWriteSupport.class
package org.apache.spark.sql.parquet
RowWriteSupport.getSchema ( org.apache.hadoop.conf.Configuration configuration )  :  parquet.schema.MessageType
RowWriteSupport.PARQUET_ROW_SCHEMA ( ) [static]  :  String
RowWriteSupport.setSchema ( parquet.schema.MessageType schema, org.apache.hadoop.conf.Configuration configuration )  :  void
RowWriteSupport.write ( org.apache.spark.sql.catalyst.expressions.Row record )  :  void

spark-sql_2.10-1.0.0.jar, Sort.class
package org.apache.spark.sql.execution
Sort.ordering ( )  :  org.apache.spark.sql.catalyst.expressions.RowOrdering

spark-sql_2.10-1.0.0.jar, SparkPlan.class
package org.apache.spark.sql.execution
SparkPlan.buildRow ( scala.collection.Seq<Object> values )  :  org.apache.spark.sql.catalyst.expressions.Row
SparkPlan.executeCollect ( )  :  org.apache.spark.sql.catalyst.expressions.Row[ ]
SparkPlan.logger ( )  :  com.typesafe.scalalogging.slf4j.Logger

spark-sql_2.10-1.0.0.jar, SparkStrategies.class
package org.apache.spark.sql.execution
SparkStrategies.convertToCatalyst ( Object a )  :  Object
SparkStrategies.PartialAggregation ( )  :  SparkStrategies.PartialAggregation.

spark-sql_2.10-1.0.0.jar, SQLContext.class
package org.apache.spark.sql
SQLContext.binaryToLiteral ( byte[ ] a )  :  catalyst.expressions.Literal
SQLContext.booleanToLiteral ( boolean b )  :  catalyst.expressions.Literal
SQLContext.byteToLiteral ( byte b )  :  catalyst.expressions.Literal
SQLContext.createParquetFile ( String path, boolean allowExisting, org.apache.hadoop.conf.Configuration conf, scala.reflect.api.TypeTags.TypeTag<A> p4 )  :  SchemaRDD
SQLContext.createSchemaRDD ( org.apache.spark.rdd.RDD<A> rdd, scala.reflect.api.TypeTags.TypeTag<A> p2 )  :  SchemaRDD
SQLContext.decimalToLiteral ( scala.math.BigDecimal d )  :  catalyst.expressions.Literal
SQLContext.doubleToLiteral ( double d )  :  catalyst.expressions.Literal
SQLContext.DslAttribute ( catalyst.expressions.AttributeReference a )  :  catalyst.dsl.package.ExpressionConversions.DslAttribute
SQLContext.DslExpression ( catalyst.expressions.Expression e )  :  catalyst.dsl.package.ExpressionConversions.DslExpression
SQLContext.DslString ( String s )  :  catalyst.dsl.package.ExpressionConversions.DslString
SQLContext.DslSymbol ( scala.Symbol sym )  :  catalyst.dsl.package.ExpressionConversions.DslSymbol
SQLContext.floatToLiteral ( float f )  :  catalyst.expressions.Literal
SQLContext.inferSchema ( org.apache.spark.rdd.RDD<scala.collection.immutable.Map<String,Object>> rdd )  :  SchemaRDD
SQLContext.intToLiteral ( int i )  :  catalyst.expressions.Literal
SQLContext.logger ( )  :  com.typesafe.scalalogging.slf4j.Logger
SQLContext.logicalPlanToSparkQuery ( catalyst.plans.logical.LogicalPlan plan )  :  SchemaRDD
SQLContext.longToLiteral ( long l )  :  catalyst.expressions.Literal
SQLContext.optimizer ( )  :  catalyst.optimizer.Optimizer.
SQLContext.parquetFile ( String path )  :  SchemaRDD
SQLContext.parser ( )  :  catalyst.SqlParser
SQLContext.registerRDDAsTable ( SchemaRDD rdd, String tableName )  :  void
SQLContext.shortToLiteral ( short s )  :  catalyst.expressions.Literal
SQLContext.sql ( String sqlText )  :  SchemaRDD
SQLContext.stringToLiteral ( String s )  :  catalyst.expressions.Literal
SQLContext.symbolToUnresolvedAttribute ( scala.Symbol s )  :  catalyst.analysis.UnresolvedAttribute
SQLContext.table ( String tableName )  :  SchemaRDD
SQLContext.timestampToLiteral ( java.sql.Timestamp t )  :  catalyst.expressions.Literal

spark-sql_2.10-1.0.0.jar, TakeOrdered.class
package org.apache.spark.sql.execution
TakeOrdered.copy ( int limit, scala.collection.Seq<org.apache.spark.sql.catalyst.expressions.SortOrder> sortOrder, SparkPlan child, org.apache.spark.SparkContext sc )  :  TakeOrdered
TakeOrdered.executeCollect ( )  :  org.apache.spark.sql.catalyst.expressions.Row[ ]
TakeOrdered.ordering ( )  :  org.apache.spark.sql.catalyst.expressions.RowOrdering
TakeOrdered.otherCopyArgs ( )  :  scala.collection.immutable.List<org.apache.spark.SparkContext>
TakeOrdered.otherCopyArgs ( )  :  scala.collection.Seq
TakeOrdered.TakeOrdered ( int limit, scala.collection.Seq<org.apache.spark.sql.catalyst.expressions.SortOrder> sortOrder, SparkPlan child, org.apache.spark.SparkContext sc )

spark-sql_2.10-1.0.0.jar, Union.class
package org.apache.spark.sql.execution
Union.copy ( scala.collection.Seq<SparkPlan> children, org.apache.spark.SparkContext sc )  :  Union
Union.otherCopyArgs ( )  :  scala.collection.immutable.List<org.apache.spark.SparkContext>
Union.otherCopyArgs ( )  :  scala.collection.Seq
Union.Union ( scala.collection.Seq<SparkPlan> children, org.apache.spark.SparkContext sc )

to the top

Removed Methods (1864)


spark-sql_2.10-1.3.0.jar, AddExchange.class
package org.apache.spark.sql.execution
AddExchange.AddExchange ( org.apache.spark.sql.SQLContext sqlContext )
AddExchange.andThen ( scala.Function1<AddExchange,A> p1 ) [static]  :  scala.Function1<org.apache.spark.sql.SQLContext,A>
AddExchange.apply ( org.apache.spark.sql.catalyst.trees.TreeNode plan )  :  org.apache.spark.sql.catalyst.trees.TreeNode
AddExchange.canEqual ( Object p1 )  :  boolean
AddExchange.compose ( scala.Function1<A,org.apache.spark.sql.SQLContext> p1 ) [static]  :  scala.Function1<A,AddExchange>
AddExchange.copy ( org.apache.spark.sql.SQLContext sqlContext )  :  AddExchange
AddExchange.equals ( Object p1 )  :  boolean
AddExchange.hashCode ( )  :  int
AddExchange.productArity ( )  :  int
AddExchange.productElement ( int p1 )  :  Object
AddExchange.productIterator ( )  :  scala.collection.Iterator<Object>
AddExchange.productPrefix ( )  :  String
AddExchange.sqlContext ( )  :  org.apache.spark.sql.SQLContext
AddExchange.toString ( )  :  String

spark-sql_2.10-1.3.0.jar, Aggregate.class
package org.apache.spark.sql.execution
Aggregate.Aggregate ( boolean partial, scala.collection.Seq<org.apache.spark.sql.catalyst.expressions.Expression> groupingExpressions, scala.collection.Seq<org.apache.spark.sql.catalyst.expressions.NamedExpression> aggregateExpressions, SparkPlan child )
Aggregate.copy ( boolean partial, scala.collection.Seq<org.apache.spark.sql.catalyst.expressions.Expression> groupingExpressions, scala.collection.Seq<org.apache.spark.sql.catalyst.expressions.NamedExpression> aggregateExpressions, SparkPlan child )  :  Aggregate
Aggregate.curried ( ) [static]  :  scala.Function1<Object,scala.Function1<scala.collection.Seq<org.apache.spark.sql.catalyst.expressions.Expression>,scala.Function1<scala.collection.Seq<org.apache.spark.sql.catalyst.expressions.NamedExpression>,scala.Function1<SparkPlan,Aggregate>>>>
Aggregate.tupled ( ) [static]  :  scala.Function1<scala.Tuple4<Object,scala.collection.Seq<org.apache.spark.sql.catalyst.expressions.Expression>,scala.collection.Seq<org.apache.spark.sql.catalyst.expressions.NamedExpression>,SparkPlan>,Aggregate>

spark-sql_2.10-1.3.0.jar, AggregateEvaluation.class
package org.apache.spark.sql.execution
AggregateEvaluation.AggregateEvaluation ( scala.collection.Seq<org.apache.spark.sql.catalyst.expressions.Attribute> schema, scala.collection.Seq<org.apache.spark.sql.catalyst.expressions.Expression> initialValues, scala.collection.Seq<org.apache.spark.sql.catalyst.expressions.Expression> update, org.apache.spark.sql.catalyst.expressions.Expression result )
AggregateEvaluation.canEqual ( Object p1 )  :  boolean
AggregateEvaluation.copy ( scala.collection.Seq<org.apache.spark.sql.catalyst.expressions.Attribute> schema, scala.collection.Seq<org.apache.spark.sql.catalyst.expressions.Expression> initialValues, scala.collection.Seq<org.apache.spark.sql.catalyst.expressions.Expression> update, org.apache.spark.sql.catalyst.expressions.Expression result )  :  AggregateEvaluation
AggregateEvaluation.curried ( ) [static]  :  scala.Function1<scala.collection.Seq<org.apache.spark.sql.catalyst.expressions.Attribute>,scala.Function1<scala.collection.Seq<org.apache.spark.sql.catalyst.expressions.Expression>,scala.Function1<scala.collection.Seq<org.apache.spark.sql.catalyst.expressions.Expression>,scala.Function1<org.apache.spark.sql.catalyst.expressions.Expression,AggregateEvaluation>>>>
AggregateEvaluation.equals ( Object p1 )  :  boolean
AggregateEvaluation.hashCode ( )  :  int
AggregateEvaluation.initialValues ( )  :  scala.collection.Seq<org.apache.spark.sql.catalyst.expressions.Expression>
AggregateEvaluation.productArity ( )  :  int
AggregateEvaluation.productElement ( int p1 )  :  Object
AggregateEvaluation.productIterator ( )  :  scala.collection.Iterator<Object>
AggregateEvaluation.productPrefix ( )  :  String
AggregateEvaluation.result ( )  :  org.apache.spark.sql.catalyst.expressions.Expression
AggregateEvaluation.schema ( )  :  scala.collection.Seq<org.apache.spark.sql.catalyst.expressions.Attribute>
AggregateEvaluation.toString ( )  :  String
AggregateEvaluation.tupled ( ) [static]  :  scala.Function1<scala.Tuple4<scala.collection.Seq<org.apache.spark.sql.catalyst.expressions.Attribute>,scala.collection.Seq<org.apache.spark.sql.catalyst.expressions.Expression>,scala.collection.Seq<org.apache.spark.sql.catalyst.expressions.Expression>,org.apache.spark.sql.catalyst.expressions.Expression>,AggregateEvaluation>
AggregateEvaluation.update ( )  :  scala.collection.Seq<org.apache.spark.sql.catalyst.expressions.Expression>

spark-sql_2.10-1.3.0.jar, And.class
package org.apache.spark.sql.sources
And.And ( Filter left, Filter right )
And.canEqual ( Object p1 )  :  boolean
And.copy ( Filter left, Filter right )  :  And
And.curried ( ) [static]  :  scala.Function1<Filter,scala.Function1<Filter,And>>
And.equals ( Object p1 )  :  boolean
And.hashCode ( )  :  int
And.left ( )  :  Filter
And.productArity ( )  :  int
And.productElement ( int p1 )  :  Object
And.productIterator ( )  :  scala.collection.Iterator<Object>
And.productPrefix ( )  :  String
And.right ( )  :  Filter
And.toString ( )  :  String
And.tupled ( ) [static]  :  scala.Function1<scala.Tuple2<Filter,Filter>,And>

spark-sql_2.10-1.3.0.jar, BaseRelation.class
package org.apache.spark.sql.sources
BaseRelation.BaseRelation ( )
BaseRelation.schema ( ) [abstract]  :  org.apache.spark.sql.types.StructType
BaseRelation.sizeInBytes ( )  :  long
BaseRelation.sqlContext ( ) [abstract]  :  org.apache.spark.sql.SQLContext

spark-sql_2.10-1.3.0.jar, BatchPythonEvaluation.class
package org.apache.spark.sql.execution
BatchPythonEvaluation.BatchPythonEvaluation ( PythonUDF udf, scala.collection.Seq<org.apache.spark.sql.catalyst.expressions.Attribute> output, SparkPlan child )
BatchPythonEvaluation.canEqual ( Object p1 )  :  boolean
BatchPythonEvaluation.child ( )  :  SparkPlan
BatchPythonEvaluation.children ( )  :  scala.collection.immutable.List<SparkPlan>
BatchPythonEvaluation.children ( )  :  scala.collection.Seq
BatchPythonEvaluation.copy ( PythonUDF udf, scala.collection.Seq<org.apache.spark.sql.catalyst.expressions.Attribute> output, SparkPlan child )  :  BatchPythonEvaluation
BatchPythonEvaluation.curried ( ) [static]  :  scala.Function1<PythonUDF,scala.Function1<scala.collection.Seq<org.apache.spark.sql.catalyst.expressions.Attribute>,scala.Function1<SparkPlan,BatchPythonEvaluation>>>
BatchPythonEvaluation.equals ( Object p1 )  :  boolean
BatchPythonEvaluation.execute ( )  :  org.apache.spark.rdd.RDD<org.apache.spark.sql.Row>
BatchPythonEvaluation.hashCode ( )  :  int
BatchPythonEvaluation.output ( )  :  scala.collection.Seq<org.apache.spark.sql.catalyst.expressions.Attribute>
BatchPythonEvaluation.productArity ( )  :  int
BatchPythonEvaluation.productElement ( int p1 )  :  Object
BatchPythonEvaluation.productIterator ( )  :  scala.collection.Iterator<Object>
BatchPythonEvaluation.productPrefix ( )  :  String
BatchPythonEvaluation.tupled ( ) [static]  :  scala.Function1<scala.Tuple3<PythonUDF,scala.collection.Seq<org.apache.spark.sql.catalyst.expressions.Attribute>,SparkPlan>,BatchPythonEvaluation>
BatchPythonEvaluation.udf ( )  :  PythonUDF

spark-sql_2.10-1.3.0.jar, BinaryColumnStats.class
package org.apache.spark.sql.columnar
BinaryColumnStats.BinaryColumnStats ( )

spark-sql_2.10-1.3.0.jar, BroadcastHashJoin.class
package org.apache.spark.sql.execution.joins
BroadcastHashJoin.BroadcastHashJoin ( scala.collection.Seq<org.apache.spark.sql.catalyst.expressions.Expression> leftKeys, scala.collection.Seq<org.apache.spark.sql.catalyst.expressions.Expression> rightKeys, package.BuildSide buildSide, org.apache.spark.sql.execution.SparkPlan left, org.apache.spark.sql.execution.SparkPlan right )
BroadcastHashJoin.buildKeys ( )  :  scala.collection.Seq<org.apache.spark.sql.catalyst.expressions.Expression>
BroadcastHashJoin.buildPlan ( )  :  org.apache.spark.sql.execution.SparkPlan
BroadcastHashJoin.buildSide ( )  :  package.BuildSide
BroadcastHashJoin.buildSideKeyGenerator ( )  :  org.apache.spark.sql.catalyst.expressions.package.Projection
BroadcastHashJoin.canEqual ( Object p1 )  :  boolean
BroadcastHashJoin.children ( )  :  scala.collection.Seq<org.apache.spark.sql.execution.SparkPlan>
BroadcastHashJoin.copy ( scala.collection.Seq<org.apache.spark.sql.catalyst.expressions.Expression> leftKeys, scala.collection.Seq<org.apache.spark.sql.catalyst.expressions.Expression> rightKeys, package.BuildSide buildSide, org.apache.spark.sql.execution.SparkPlan left, org.apache.spark.sql.execution.SparkPlan right )  :  BroadcastHashJoin
BroadcastHashJoin.curried ( ) [static]  :  scala.Function1<scala.collection.Seq<org.apache.spark.sql.catalyst.expressions.Expression>,scala.Function1<scala.collection.Seq<org.apache.spark.sql.catalyst.expressions.Expression>,scala.Function1<package.BuildSide,scala.Function1<org.apache.spark.sql.execution.SparkPlan,scala.Function1<org.apache.spark.sql.execution.SparkPlan,BroadcastHashJoin>>>>>
BroadcastHashJoin.equals ( Object p1 )  :  boolean
BroadcastHashJoin.execute ( )  :  org.apache.spark.rdd.RDD<org.apache.spark.sql.Row>
BroadcastHashJoin.hashCode ( )  :  int
BroadcastHashJoin.hashJoin ( scala.collection.Iterator<org.apache.spark.sql.Row> streamIter, HashedRelation hashedRelation )  :  scala.collection.Iterator<org.apache.spark.sql.Row>
BroadcastHashJoin.left ( )  :  org.apache.spark.sql.catalyst.trees.TreeNode
BroadcastHashJoin.left ( )  :  org.apache.spark.sql.execution.SparkPlan
BroadcastHashJoin.leftKeys ( )  :  scala.collection.Seq<org.apache.spark.sql.catalyst.expressions.Expression>
BroadcastHashJoin.output ( )  :  scala.collection.Seq<org.apache.spark.sql.catalyst.expressions.Attribute>
BroadcastHashJoin.outputPartitioning ( )  :  org.apache.spark.sql.catalyst.plans.physical.Partitioning
BroadcastHashJoin.productArity ( )  :  int
BroadcastHashJoin.productElement ( int p1 )  :  Object
BroadcastHashJoin.productIterator ( )  :  scala.collection.Iterator<Object>
BroadcastHashJoin.productPrefix ( )  :  String
BroadcastHashJoin.requiredChildDistribution ( )  :  scala.collection.immutable.List<org.apache.spark.sql.catalyst.plans.physical.UnspecifiedDistribution.>
BroadcastHashJoin.requiredChildDistribution ( )  :  scala.collection.Seq
BroadcastHashJoin.right ( )  :  org.apache.spark.sql.catalyst.trees.TreeNode
BroadcastHashJoin.right ( )  :  org.apache.spark.sql.execution.SparkPlan
BroadcastHashJoin.rightKeys ( )  :  scala.collection.Seq<org.apache.spark.sql.catalyst.expressions.Expression>
BroadcastHashJoin.streamedKeys ( )  :  scala.collection.Seq<org.apache.spark.sql.catalyst.expressions.Expression>
BroadcastHashJoin.streamedPlan ( )  :  org.apache.spark.sql.execution.SparkPlan
BroadcastHashJoin.streamSideKeyGenerator ( )  :  scala.Function0<org.apache.spark.sql.catalyst.expressions.package.MutableProjection>
BroadcastHashJoin.timeout ( )  :  scala.concurrent.duration.Duration
BroadcastHashJoin.tupled ( ) [static]  :  scala.Function1<scala.Tuple5<scala.collection.Seq<org.apache.spark.sql.catalyst.expressions.Expression>,scala.collection.Seq<org.apache.spark.sql.catalyst.expressions.Expression>,package.BuildSide,org.apache.spark.sql.execution.SparkPlan,org.apache.spark.sql.execution.SparkPlan>,BroadcastHashJoin>

spark-sql_2.10-1.3.0.jar, BroadcastLeftSemiJoinHash.class
package org.apache.spark.sql.execution.joins
BroadcastLeftSemiJoinHash.BroadcastLeftSemiJoinHash ( scala.collection.Seq<org.apache.spark.sql.catalyst.expressions.Expression> leftKeys, scala.collection.Seq<org.apache.spark.sql.catalyst.expressions.Expression> rightKeys, org.apache.spark.sql.execution.SparkPlan left, org.apache.spark.sql.execution.SparkPlan right )
BroadcastLeftSemiJoinHash.buildKeys ( )  :  scala.collection.Seq<org.apache.spark.sql.catalyst.expressions.Expression>
BroadcastLeftSemiJoinHash.buildPlan ( )  :  org.apache.spark.sql.execution.SparkPlan
BroadcastLeftSemiJoinHash.buildSide ( )  :  package.BuildRight.
BroadcastLeftSemiJoinHash.buildSide ( )  :  package.BuildSide
BroadcastLeftSemiJoinHash.buildSideKeyGenerator ( )  :  org.apache.spark.sql.catalyst.expressions.package.Projection
BroadcastLeftSemiJoinHash.canEqual ( Object p1 )  :  boolean
BroadcastLeftSemiJoinHash.children ( )  :  scala.collection.Seq<org.apache.spark.sql.execution.SparkPlan>
BroadcastLeftSemiJoinHash.copy ( scala.collection.Seq<org.apache.spark.sql.catalyst.expressions.Expression> leftKeys, scala.collection.Seq<org.apache.spark.sql.catalyst.expressions.Expression> rightKeys, org.apache.spark.sql.execution.SparkPlan left, org.apache.spark.sql.execution.SparkPlan right )  :  BroadcastLeftSemiJoinHash
BroadcastLeftSemiJoinHash.curried ( ) [static]  :  scala.Function1<scala.collection.Seq<org.apache.spark.sql.catalyst.expressions.Expression>,scala.Function1<scala.collection.Seq<org.apache.spark.sql.catalyst.expressions.Expression>,scala.Function1<org.apache.spark.sql.execution.SparkPlan,scala.Function1<org.apache.spark.sql.execution.SparkPlan,BroadcastLeftSemiJoinHash>>>>
BroadcastLeftSemiJoinHash.equals ( Object p1 )  :  boolean
BroadcastLeftSemiJoinHash.execute ( )  :  org.apache.spark.rdd.RDD<org.apache.spark.sql.Row>
BroadcastLeftSemiJoinHash.hashCode ( )  :  int
BroadcastLeftSemiJoinHash.hashJoin ( scala.collection.Iterator<org.apache.spark.sql.Row> streamIter, HashedRelation hashedRelation )  :  scala.collection.Iterator<org.apache.spark.sql.Row>
BroadcastLeftSemiJoinHash.left ( )  :  org.apache.spark.sql.catalyst.trees.TreeNode
BroadcastLeftSemiJoinHash.left ( )  :  org.apache.spark.sql.execution.SparkPlan
BroadcastLeftSemiJoinHash.leftKeys ( )  :  scala.collection.Seq<org.apache.spark.sql.catalyst.expressions.Expression>
BroadcastLeftSemiJoinHash.output ( )  :  scala.collection.Seq<org.apache.spark.sql.catalyst.expressions.Attribute>
BroadcastLeftSemiJoinHash.productArity ( )  :  int
BroadcastLeftSemiJoinHash.productElement ( int p1 )  :  Object
BroadcastLeftSemiJoinHash.productIterator ( )  :  scala.collection.Iterator<Object>
BroadcastLeftSemiJoinHash.productPrefix ( )  :  String
BroadcastLeftSemiJoinHash.right ( )  :  org.apache.spark.sql.catalyst.trees.TreeNode
BroadcastLeftSemiJoinHash.right ( )  :  org.apache.spark.sql.execution.SparkPlan
BroadcastLeftSemiJoinHash.rightKeys ( )  :  scala.collection.Seq<org.apache.spark.sql.catalyst.expressions.Expression>
BroadcastLeftSemiJoinHash.streamedKeys ( )  :  scala.collection.Seq<org.apache.spark.sql.catalyst.expressions.Expression>
BroadcastLeftSemiJoinHash.streamedPlan ( )  :  org.apache.spark.sql.execution.SparkPlan
BroadcastLeftSemiJoinHash.streamSideKeyGenerator ( )  :  scala.Function0<org.apache.spark.sql.catalyst.expressions.package.MutableProjection>
BroadcastLeftSemiJoinHash.tupled ( ) [static]  :  scala.Function1<scala.Tuple4<scala.collection.Seq<org.apache.spark.sql.catalyst.expressions.Expression>,scala.collection.Seq<org.apache.spark.sql.catalyst.expressions.Expression>,org.apache.spark.sql.execution.SparkPlan,org.apache.spark.sql.execution.SparkPlan>,BroadcastLeftSemiJoinHash>

spark-sql_2.10-1.3.0.jar, BroadcastNestedLoopJoin.class
package org.apache.spark.sql.execution.joins
BroadcastNestedLoopJoin.BroadcastNestedLoopJoin ( org.apache.spark.sql.execution.SparkPlan left, org.apache.spark.sql.execution.SparkPlan right, package.BuildSide buildSide, org.apache.spark.sql.catalyst.plans.JoinType joinType, scala.Option<org.apache.spark.sql.catalyst.expressions.Expression> condition )
BroadcastNestedLoopJoin.buildSide ( )  :  package.BuildSide
BroadcastNestedLoopJoin.canEqual ( Object p1 )  :  boolean
BroadcastNestedLoopJoin.children ( )  :  scala.collection.Seq<org.apache.spark.sql.execution.SparkPlan>
BroadcastNestedLoopJoin.condition ( )  :  scala.Option<org.apache.spark.sql.catalyst.expressions.Expression>
BroadcastNestedLoopJoin.copy ( org.apache.spark.sql.execution.SparkPlan left, org.apache.spark.sql.execution.SparkPlan right, package.BuildSide buildSide, org.apache.spark.sql.catalyst.plans.JoinType joinType, scala.Option<org.apache.spark.sql.catalyst.expressions.Expression> condition )  :  BroadcastNestedLoopJoin
BroadcastNestedLoopJoin.curried ( ) [static]  :  scala.Function1<org.apache.spark.sql.execution.SparkPlan,scala.Function1<org.apache.spark.sql.execution.SparkPlan,scala.Function1<package.BuildSide,scala.Function1<org.apache.spark.sql.catalyst.plans.JoinType,scala.Function1<scala.Option<org.apache.spark.sql.catalyst.expressions.Expression>,BroadcastNestedLoopJoin>>>>>
BroadcastNestedLoopJoin.equals ( Object p1 )  :  boolean
BroadcastNestedLoopJoin.execute ( )  :  org.apache.spark.rdd.RDD<org.apache.spark.sql.Row>
BroadcastNestedLoopJoin.hashCode ( )  :  int
BroadcastNestedLoopJoin.joinType ( )  :  org.apache.spark.sql.catalyst.plans.JoinType
BroadcastNestedLoopJoin.left ( )  :  org.apache.spark.sql.catalyst.trees.TreeNode
BroadcastNestedLoopJoin.left ( )  :  org.apache.spark.sql.execution.SparkPlan
BroadcastNestedLoopJoin.BroadcastNestedLoopJoin..boundCondition ( )  :  scala.Function1<org.apache.spark.sql.Row,Object>
BroadcastNestedLoopJoin.output ( )  :  scala.collection.Seq<org.apache.spark.sql.catalyst.expressions.Attribute>
BroadcastNestedLoopJoin.outputPartitioning ( )  :  org.apache.spark.sql.catalyst.plans.physical.Partitioning
BroadcastNestedLoopJoin.productArity ( )  :  int
BroadcastNestedLoopJoin.productElement ( int p1 )  :  Object
BroadcastNestedLoopJoin.productIterator ( )  :  scala.collection.Iterator<Object>
BroadcastNestedLoopJoin.productPrefix ( )  :  String
BroadcastNestedLoopJoin.right ( )  :  org.apache.spark.sql.catalyst.trees.TreeNode
BroadcastNestedLoopJoin.right ( )  :  org.apache.spark.sql.execution.SparkPlan
BroadcastNestedLoopJoin.tupled ( ) [static]  :  scala.Function1<scala.Tuple5<org.apache.spark.sql.execution.SparkPlan,org.apache.spark.sql.execution.SparkPlan,package.BuildSide,org.apache.spark.sql.catalyst.plans.JoinType,scala.Option<org.apache.spark.sql.catalyst.expressions.Expression>>,BroadcastNestedLoopJoin>

spark-sql_2.10-1.3.0.jar, CachedBatch.class
package org.apache.spark.sql.columnar
CachedBatch.buffers ( )  :  byte[ ][ ]
CachedBatch.CachedBatch ( byte[ ][ ] buffers, org.apache.spark.sql.Row stats )
CachedBatch.canEqual ( Object p1 )  :  boolean
CachedBatch.copy ( byte[ ][ ] buffers, org.apache.spark.sql.Row stats )  :  CachedBatch
CachedBatch.curried ( ) [static]  :  scala.Function1<byte[ ][ ],scala.Function1<org.apache.spark.sql.Row,CachedBatch>>
CachedBatch.equals ( Object p1 )  :  boolean
CachedBatch.hashCode ( )  :  int
CachedBatch.productArity ( )  :  int
CachedBatch.productElement ( int p1 )  :  Object
CachedBatch.productIterator ( )  :  scala.collection.Iterator<Object>
CachedBatch.productPrefix ( )  :  String
CachedBatch.stats ( )  :  org.apache.spark.sql.Row
CachedBatch.toString ( )  :  String
CachedBatch.tupled ( ) [static]  :  scala.Function1<scala.Tuple2<byte[ ][ ],org.apache.spark.sql.Row>,CachedBatch>

spark-sql_2.10-1.3.0.jar, CachedData.class
package org.apache.spark.sql
CachedData.CachedData ( catalyst.plans.logical.LogicalPlan plan, columnar.InMemoryRelation cachedRepresentation )
CachedData.cachedRepresentation ( )  :  columnar.InMemoryRelation
CachedData.canEqual ( Object p1 )  :  boolean
CachedData.copy ( catalyst.plans.logical.LogicalPlan plan, columnar.InMemoryRelation cachedRepresentation )  :  CachedData
CachedData.curried ( ) [static]  :  scala.Function1<catalyst.plans.logical.LogicalPlan,scala.Function1<columnar.InMemoryRelation,CachedData>>
CachedData.equals ( Object p1 )  :  boolean
CachedData.hashCode ( )  :  int
CachedData.plan ( )  :  catalyst.plans.logical.LogicalPlan
CachedData.productArity ( )  :  int
CachedData.productElement ( int p1 )  :  Object
CachedData.productIterator ( )  :  scala.collection.Iterator<Object>
CachedData.productPrefix ( )  :  String
CachedData.toString ( )  :  String
CachedData.tupled ( ) [static]  :  scala.Function1<scala.Tuple2<catalyst.plans.logical.LogicalPlan,columnar.InMemoryRelation>,CachedData>

spark-sql_2.10-1.3.0.jar, CacheManager.class
package org.apache.spark.sql
CacheManager.CacheManager ( SQLContext sqlContext )
CacheManager.cacheQuery ( DataFrame query, scala.Option<String> tableName, org.apache.spark.storage.StorageLevel storageLevel )  :  void
CacheManager.cacheTable ( String tableName )  :  void
CacheManager.clearCache ( )  :  void
CacheManager.invalidateCache ( catalyst.plans.logical.LogicalPlan plan )  :  void
CacheManager.isCached ( String tableName )  :  boolean
CacheManager.tryUncacheQuery ( DataFrame query, boolean blocking )  :  boolean
CacheManager.uncacheTable ( String tableName )  :  void
CacheManager.useCachedData ( catalyst.plans.logical.LogicalPlan plan )  :  catalyst.plans.logical.LogicalPlan

spark-sql_2.10-1.3.0.jar, CacheTableCommand.class
package org.apache.spark.sql.execution
CacheTableCommand.CacheTableCommand ( String tableName, scala.Option<org.apache.spark.sql.catalyst.plans.logical.LogicalPlan> plan, boolean isLazy )
CacheTableCommand.canEqual ( Object p1 )  :  boolean
CacheTableCommand.copy ( String tableName, scala.Option<org.apache.spark.sql.catalyst.plans.logical.LogicalPlan> plan, boolean isLazy )  :  CacheTableCommand
CacheTableCommand.curried ( ) [static]  :  scala.Function1<String,scala.Function1<scala.Option<org.apache.spark.sql.catalyst.plans.logical.LogicalPlan>,scala.Function1<Object,CacheTableCommand>>>
CacheTableCommand.equals ( Object p1 )  :  boolean
CacheTableCommand.hashCode ( )  :  int
CacheTableCommand.isLazy ( )  :  boolean
CacheTableCommand.output ( )  :  scala.collection.Seq<org.apache.spark.sql.catalyst.expressions.Attribute>
CacheTableCommand.plan ( )  :  scala.Option<org.apache.spark.sql.catalyst.plans.logical.LogicalPlan>
CacheTableCommand.productArity ( )  :  int
CacheTableCommand.productElement ( int p1 )  :  Object
CacheTableCommand.productIterator ( )  :  scala.collection.Iterator<Object>
CacheTableCommand.productPrefix ( )  :  String
CacheTableCommand.run ( org.apache.spark.sql.SQLContext sqlContext )  :  scala.collection.Seq<org.apache.spark.sql.Row>
CacheTableCommand.tableName ( )  :  String
CacheTableCommand.tupled ( ) [static]  :  scala.Function1<scala.Tuple3<String,scala.Option<org.apache.spark.sql.catalyst.plans.logical.LogicalPlan>,Object>,CacheTableCommand>

spark-sql_2.10-1.3.0.jar, CartesianProduct.class
package org.apache.spark.sql.execution.joins
CartesianProduct.canEqual ( Object p1 )  :  boolean
CartesianProduct.CartesianProduct ( org.apache.spark.sql.execution.SparkPlan left, org.apache.spark.sql.execution.SparkPlan right )
CartesianProduct.children ( )  :  scala.collection.Seq<org.apache.spark.sql.execution.SparkPlan>
CartesianProduct.copy ( org.apache.spark.sql.execution.SparkPlan left, org.apache.spark.sql.execution.SparkPlan right )  :  CartesianProduct
CartesianProduct.curried ( ) [static]  :  scala.Function1<org.apache.spark.sql.execution.SparkPlan,scala.Function1<org.apache.spark.sql.execution.SparkPlan,CartesianProduct>>
CartesianProduct.equals ( Object p1 )  :  boolean
CartesianProduct.execute ( )  :  org.apache.spark.rdd.RDD<org.apache.spark.sql.Row>
CartesianProduct.hashCode ( )  :  int
CartesianProduct.left ( )  :  org.apache.spark.sql.catalyst.trees.TreeNode
CartesianProduct.left ( )  :  org.apache.spark.sql.execution.SparkPlan
CartesianProduct.output ( )  :  scala.collection.Seq<org.apache.spark.sql.catalyst.expressions.Attribute>
CartesianProduct.productArity ( )  :  int
CartesianProduct.productElement ( int p1 )  :  Object
CartesianProduct.productIterator ( )  :  scala.collection.Iterator<Object>
CartesianProduct.productPrefix ( )  :  String
CartesianProduct.right ( )  :  org.apache.spark.sql.catalyst.trees.TreeNode
CartesianProduct.right ( )  :  org.apache.spark.sql.execution.SparkPlan
CartesianProduct.tupled ( ) [static]  :  scala.Function1<scala.Tuple2<org.apache.spark.sql.execution.SparkPlan,org.apache.spark.sql.execution.SparkPlan>,CartesianProduct>

spark-sql_2.10-1.3.0.jar, CaseInsensitiveMap.class
package org.apache.spark.sql.sources
CaseInsensitiveMap.CaseInsensitiveMap ( scala.collection.immutable.Map<String,String> map )

spark-sql_2.10-1.3.0.jar, CatalystArrayContainsNullConverter.class
package org.apache.spark.sql.parquet
CatalystArrayContainsNullConverter.CatalystArrayContainsNullConverter ( org.apache.spark.sql.types.DataType elementType, int index, CatalystConverter parent )

spark-sql_2.10-1.3.0.jar, CatalystArrayConverter.class
package org.apache.spark.sql.parquet
CatalystArrayConverter.CatalystArrayConverter ( org.apache.spark.sql.types.DataType elementType, int index, CatalystConverter parent )

spark-sql_2.10-1.3.0.jar, CatalystConverter.class
package org.apache.spark.sql.parquet
CatalystConverter.ARRAY_CONTAINS_NULL_BAG_SCHEMA_NAME ( ) [static]  :  String
CatalystConverter.ARRAY_ELEMENTS_SCHEMA_NAME ( ) [static]  :  String
CatalystConverter.CatalystConverter ( )
CatalystConverter.clearBuffer ( ) [abstract]  :  void
CatalystConverter.getCurrentRecord ( )  :  org.apache.spark.sql.Row
CatalystConverter.index ( ) [abstract]  :  int
CatalystConverter.isRootConverter ( )  :  boolean
CatalystConverter.MAP_KEY_SCHEMA_NAME ( ) [static]  :  String
CatalystConverter.MAP_SCHEMA_NAME ( ) [static]  :  String
CatalystConverter.MAP_VALUE_SCHEMA_NAME ( ) [static]  :  String
CatalystConverter.parent ( ) [abstract]  :  CatalystConverter
CatalystConverter.readDecimal ( org.apache.spark.sql.types.Decimal dest, parquet.io.api.Binary value, org.apache.spark.sql.types.DecimalType ctype )  :  void
CatalystConverter.readTimestamp ( parquet.io.api.Binary value )  :  java.sql.Timestamp
CatalystConverter.size ( ) [abstract]  :  int
CatalystConverter.THRIFT_ARRAY_ELEMENTS_SCHEMA_NAME_SUFFIX ( ) [static]  :  String
CatalystConverter.updateBinary ( int fieldIndex, parquet.io.api.Binary value )  :  void
CatalystConverter.updateBoolean ( int fieldIndex, boolean value )  :  void
CatalystConverter.updateByte ( int fieldIndex, byte value )  :  void
CatalystConverter.updateDecimal ( int fieldIndex, parquet.io.api.Binary value, org.apache.spark.sql.types.DecimalType ctype )  :  void
CatalystConverter.updateDouble ( int fieldIndex, double value )  :  void
CatalystConverter.updateField ( int p1, Object p2 ) [abstract]  :  void
CatalystConverter.updateFloat ( int fieldIndex, float value )  :  void
CatalystConverter.updateInt ( int fieldIndex, int value )  :  void
CatalystConverter.updateLong ( int fieldIndex, long value )  :  void
CatalystConverter.updateShort ( int fieldIndex, short value )  :  void
CatalystConverter.updateString ( int fieldIndex, String value )  :  void
CatalystConverter.updateTimestamp ( int fieldIndex, parquet.io.api.Binary value )  :  void

spark-sql_2.10-1.3.0.jar, CatalystGroupConverter.class
package org.apache.spark.sql.parquet
CatalystGroupConverter.buffer ( )  :  scala.collection.mutable.ArrayBuffer<org.apache.spark.sql.Row>
CatalystGroupConverter.buffer_.eq ( scala.collection.mutable.ArrayBuffer<org.apache.spark.sql.Row> p1 )  :  void
CatalystGroupConverter.CatalystGroupConverter ( org.apache.spark.sql.catalyst.expressions.Attribute[ ] attributes )
CatalystGroupConverter.CatalystGroupConverter ( org.apache.spark.sql.types.StructField[ ] schema, int index, CatalystConverter parent )
CatalystGroupConverter.CatalystGroupConverter ( org.apache.spark.sql.types.StructField[ ] schema, int index, CatalystConverter parent, scala.collection.mutable.ArrayBuffer<Object> current, scala.collection.mutable.ArrayBuffer<org.apache.spark.sql.Row> buffer )
CatalystGroupConverter.clearBuffer ( )  :  void
CatalystGroupConverter.current ( )  :  scala.collection.mutable.ArrayBuffer<Object>
CatalystGroupConverter.current_.eq ( scala.collection.mutable.ArrayBuffer<Object> p1 )  :  void
CatalystGroupConverter.getCurrentRecord ( )  :  org.apache.spark.sql.Row
CatalystGroupConverter.index ( )  :  int
CatalystGroupConverter.parent ( )  :  CatalystConverter
CatalystGroupConverter.schema ( )  :  org.apache.spark.sql.types.StructField[ ]
CatalystGroupConverter.size ( )  :  int
CatalystGroupConverter.updateField ( int fieldIndex, Object value )  :  void

spark-sql_2.10-1.3.0.jar, CatalystMapConverter.class
package org.apache.spark.sql.parquet
CatalystMapConverter.CatalystMapConverter ( org.apache.spark.sql.types.StructField[ ] schema, int index, CatalystConverter parent )

spark-sql_2.10-1.3.0.jar, CatalystNativeArrayConverter.class
package org.apache.spark.sql.parquet
CatalystNativeArrayConverter.CatalystNativeArrayConverter ( org.apache.spark.sql.types.NativeType elementType, int index, CatalystConverter parent, int capacity )

spark-sql_2.10-1.3.0.jar, CatalystPrimitiveConverter.class
package org.apache.spark.sql.parquet
CatalystPrimitiveConverter.CatalystPrimitiveConverter ( CatalystConverter parent, int fieldIndex )

spark-sql_2.10-1.3.0.jar, CatalystPrimitiveRowConverter.class
package org.apache.spark.sql.parquet
CatalystPrimitiveRowConverter.CatalystPrimitiveRowConverter ( org.apache.spark.sql.catalyst.expressions.Attribute[ ] attributes )

spark-sql_2.10-1.3.0.jar, CatalystPrimitiveStringConverter.class
package org.apache.spark.sql.parquet
CatalystPrimitiveStringConverter.CatalystPrimitiveStringConverter ( CatalystConverter parent, int fieldIndex )

spark-sql_2.10-1.3.0.jar, CatalystScan.class
package org.apache.spark.sql.sources
CatalystScan.buildScan ( scala.collection.Seq<org.apache.spark.sql.catalyst.expressions.Attribute> p1, scala.collection.Seq<org.apache.spark.sql.catalyst.expressions.Expression> p2 ) [abstract]  :  org.apache.spark.rdd.RDD<org.apache.spark.sql.Row>

spark-sql_2.10-1.3.0.jar, CatalystStructConverter.class
package org.apache.spark.sql.parquet
CatalystStructConverter.CatalystStructConverter ( org.apache.spark.sql.types.StructField[ ] schema, int index, CatalystConverter parent )

spark-sql_2.10-1.3.0.jar, Column.class
package org.apache.spark.sql
Column.and ( Column other )  :  Column
Column.apply ( catalyst.expressions.Expression p1 ) [static]  :  Column
Column.apply ( String p1 ) [static]  :  Column
Column.as ( scala.Symbol alias )  :  Column
Column.as ( String alias )  :  Column
Column.asc ( )  :  Column
Column.cast ( types.DataType to )  :  Column
Column.cast ( String to )  :  Column
Column.Column ( catalyst.expressions.Expression expr )
Column.Column ( String name )
Column.contains ( Object other )  :  Column
Column.desc ( )  :  Column
Column.divide ( Object other )  :  Column
Column.endsWith ( Column other )  :  Column
Column.endsWith ( String literal )  :  Column
Column.eqNullSafe ( Object other )  :  Column
Column.equalTo ( Object other )  :  Column
Column.explain ( boolean extended )  :  void
Column.expr ( )  :  catalyst.expressions.Expression
Column.geq ( Object other )  :  Column
Column.getField ( String fieldName )  :  Column
Column.getItem ( int ordinal )  :  Column
Column.gt ( Object other )  :  Column
Column.in ( Column... list )  :  Column
Column.in ( scala.collection.Seq<Column> list )  :  Column
Column.isNotNull ( )  :  Column
Column.isNull ( )  :  Column
Column.leq ( Object other )  :  Column
Column.like ( String literal )  :  Column
Column.lt ( Object other )  :  Column
Column.minus ( Object other )  :  Column
Column.mod ( Object other )  :  Column
Column.multiply ( Object other )  :  Column
Column.notEqual ( Object other )  :  Column
Column.or ( Column other )  :  Column
Column.plus ( Object other )  :  Column
Column.rlike ( String literal )  :  Column
Column.startsWith ( Column other )  :  Column
Column.startsWith ( String literal )  :  Column
Column.substr ( int startPos, int len )  :  Column
Column.substr ( Column startPos, Column len )  :  Column
Column.toString ( )  :  String
Column.unapply ( Column p1 ) [static]  :  scala.Option<catalyst.expressions.Expression>
Column.unary_.bang ( )  :  Column
Column.unary_.minus ( )  :  Column

spark-sql_2.10-1.3.0.jar, ColumnBuilder.class
package org.apache.spark.sql.columnar
ColumnBuilder.appendFrom ( org.apache.spark.sql.Row p1, int p2 ) [abstract]  :  void

spark-sql_2.10-1.3.0.jar, ColumnName.class
package org.apache.spark.sql
ColumnName.ColumnName ( String name )

spark-sql_2.10-1.3.0.jar, ColumnStats.class
package org.apache.spark.sql.columnar
ColumnStats.collectedStatistics ( ) [abstract]  :  org.apache.spark.sql.Row
ColumnStats.count ( ) [abstract]  :  int
ColumnStats.count_.eq ( int p1 ) [abstract]  :  void
ColumnStats.gatherStats ( org.apache.spark.sql.Row p1, int p2 ) [abstract]  :  void
ColumnStats.nullCount ( ) [abstract]  :  int
ColumnStats.nullCount_.eq ( int p1 ) [abstract]  :  void
ColumnStats.sizeInBytes ( ) [abstract]  :  long
ColumnStats.sizeInBytes_.eq ( long p1 ) [abstract]  :  void

spark-sql_2.10-1.3.0.jar, CompressionScheme.class
package org.apache.spark.sql.columnar.compression
CompressionScheme.encoder ( org.apache.spark.sql.columnar.NativeColumnType<T> p1 ) [abstract]  :  Encoder<T>

spark-sql_2.10-1.3.0.jar, CreatableRelationProvider.class
package org.apache.spark.sql.sources
CreatableRelationProvider.createRelation ( org.apache.spark.sql.SQLContext p1, org.apache.spark.sql.SaveMode p2, scala.collection.immutable.Map<String,String> p3, org.apache.spark.sql.DataFrame p4 ) [abstract]  :  BaseRelation

spark-sql_2.10-1.3.0.jar, CreateTableUsing.class
package org.apache.spark.sql.sources
CreateTableUsing.allowExisting ( )  :  boolean
CreateTableUsing.canEqual ( Object p1 )  :  boolean
CreateTableUsing.copy ( String tableName, scala.Option<org.apache.spark.sql.types.StructType> userSpecifiedSchema, String provider, boolean temporary, scala.collection.immutable.Map<String,String> options, boolean allowExisting, boolean managedIfNoPath )  :  CreateTableUsing
CreateTableUsing.CreateTableUsing ( String tableName, scala.Option<org.apache.spark.sql.types.StructType> userSpecifiedSchema, String provider, boolean temporary, scala.collection.immutable.Map<String,String> options, boolean allowExisting, boolean managedIfNoPath )
CreateTableUsing.curried ( ) [static]  :  scala.Function1<String,scala.Function1<scala.Option<org.apache.spark.sql.types.StructType>,scala.Function1<String,scala.Function1<Object,scala.Function1<scala.collection.immutable.Map<String,String>,scala.Function1<Object,scala.Function1<Object,CreateTableUsing>>>>>>>
CreateTableUsing.equals ( Object p1 )  :  boolean
CreateTableUsing.hashCode ( )  :  int
CreateTableUsing.managedIfNoPath ( )  :  boolean
CreateTableUsing.options ( )  :  scala.collection.immutable.Map<String,String>
CreateTableUsing.productArity ( )  :  int
CreateTableUsing.productElement ( int p1 )  :  Object
CreateTableUsing.productIterator ( )  :  scala.collection.Iterator<Object>
CreateTableUsing.productPrefix ( )  :  String
CreateTableUsing.provider ( )  :  String
CreateTableUsing.tableName ( )  :  String
CreateTableUsing.temporary ( )  :  boolean
CreateTableUsing.tupled ( ) [static]  :  scala.Function1<scala.Tuple7<String,scala.Option<org.apache.spark.sql.types.StructType>,String,Object,scala.collection.immutable.Map<String,String>,Object,Object>,CreateTableUsing>
CreateTableUsing.userSpecifiedSchema ( )  :  scala.Option<org.apache.spark.sql.types.StructType>

spark-sql_2.10-1.3.0.jar, CreateTableUsingAsSelect.class
package org.apache.spark.sql.sources
CreateTableUsingAsSelect.canEqual ( Object p1 )  :  boolean
CreateTableUsingAsSelect.child ( )  :  org.apache.spark.sql.catalyst.plans.logical.LogicalPlan
CreateTableUsingAsSelect.child ( )  :  org.apache.spark.sql.catalyst.trees.TreeNode
CreateTableUsingAsSelect.copy ( String tableName, String provider, boolean temporary, org.apache.spark.sql.SaveMode mode, scala.collection.immutable.Map<String,String> options, org.apache.spark.sql.catalyst.plans.logical.LogicalPlan child )  :  CreateTableUsingAsSelect
CreateTableUsingAsSelect.CreateTableUsingAsSelect ( String tableName, String provider, boolean temporary, org.apache.spark.sql.SaveMode mode, scala.collection.immutable.Map<String,String> options, org.apache.spark.sql.catalyst.plans.logical.LogicalPlan child )
CreateTableUsingAsSelect.curried ( ) [static]  :  scala.Function1<String,scala.Function1<String,scala.Function1<Object,scala.Function1<org.apache.spark.sql.SaveMode,scala.Function1<scala.collection.immutable.Map<String,String>,scala.Function1<org.apache.spark.sql.catalyst.plans.logical.LogicalPlan,CreateTableUsingAsSelect>>>>>>
CreateTableUsingAsSelect.equals ( Object p1 )  :  boolean
CreateTableUsingAsSelect.hashCode ( )  :  int
CreateTableUsingAsSelect.mode ( )  :  org.apache.spark.sql.SaveMode
CreateTableUsingAsSelect.options ( )  :  scala.collection.immutable.Map<String,String>
CreateTableUsingAsSelect.output ( )  :  scala.collection.Seq<org.apache.spark.sql.catalyst.expressions.Attribute>
CreateTableUsingAsSelect.productArity ( )  :  int
CreateTableUsingAsSelect.productElement ( int p1 )  :  Object
CreateTableUsingAsSelect.productIterator ( )  :  scala.collection.Iterator<Object>
CreateTableUsingAsSelect.productPrefix ( )  :  String
CreateTableUsingAsSelect.provider ( )  :  String
CreateTableUsingAsSelect.tableName ( )  :  String
CreateTableUsingAsSelect.temporary ( )  :  boolean
CreateTableUsingAsSelect.tupled ( ) [static]  :  scala.Function1<scala.Tuple6<String,String,Object,org.apache.spark.sql.SaveMode,scala.collection.immutable.Map<String,String>,org.apache.spark.sql.catalyst.plans.logical.LogicalPlan>,CreateTableUsingAsSelect>

spark-sql_2.10-1.3.0.jar, CreateTempTableUsing.class
package org.apache.spark.sql.sources
CreateTempTableUsing.canEqual ( Object p1 )  :  boolean
CreateTempTableUsing.copy ( String tableName, scala.Option<org.apache.spark.sql.types.StructType> userSpecifiedSchema, String provider, scala.collection.immutable.Map<String,String> options )  :  CreateTempTableUsing
CreateTempTableUsing.CreateTempTableUsing ( String tableName, scala.Option<org.apache.spark.sql.types.StructType> userSpecifiedSchema, String provider, scala.collection.immutable.Map<String,String> options )
CreateTempTableUsing.curried ( ) [static]  :  scala.Function1<String,scala.Function1<scala.Option<org.apache.spark.sql.types.StructType>,scala.Function1<String,scala.Function1<scala.collection.immutable.Map<String,String>,CreateTempTableUsing>>>>
CreateTempTableUsing.equals ( Object p1 )  :  boolean
CreateTempTableUsing.hashCode ( )  :  int
CreateTempTableUsing.options ( )  :  scala.collection.immutable.Map<String,String>
CreateTempTableUsing.productArity ( )  :  int
CreateTempTableUsing.productElement ( int p1 )  :  Object
CreateTempTableUsing.productIterator ( )  :  scala.collection.Iterator<Object>
CreateTempTableUsing.productPrefix ( )  :  String
CreateTempTableUsing.provider ( )  :  String
CreateTempTableUsing.run ( org.apache.spark.sql.SQLContext sqlContext )  :  scala.collection.Seq<scala.runtime.Nothing.>
CreateTempTableUsing.tableName ( )  :  String
CreateTempTableUsing.tupled ( ) [static]  :  scala.Function1<scala.Tuple4<String,scala.Option<org.apache.spark.sql.types.StructType>,String,scala.collection.immutable.Map<String,String>>,CreateTempTableUsing>
CreateTempTableUsing.userSpecifiedSchema ( )  :  scala.Option<org.apache.spark.sql.types.StructType>

spark-sql_2.10-1.3.0.jar, CreateTempTableUsingAsSelect.class
package org.apache.spark.sql.sources
CreateTempTableUsingAsSelect.canEqual ( Object p1 )  :  boolean
CreateTempTableUsingAsSelect.copy ( String tableName, String provider, org.apache.spark.sql.SaveMode mode, scala.collection.immutable.Map<String,String> options, org.apache.spark.sql.catalyst.plans.logical.LogicalPlan query )  :  CreateTempTableUsingAsSelect
CreateTempTableUsingAsSelect.CreateTempTableUsingAsSelect ( String tableName, String provider, org.apache.spark.sql.SaveMode mode, scala.collection.immutable.Map<String,String> options, org.apache.spark.sql.catalyst.plans.logical.LogicalPlan query )
CreateTempTableUsingAsSelect.curried ( ) [static]  :  scala.Function1<String,scala.Function1<String,scala.Function1<org.apache.spark.sql.SaveMode,scala.Function1<scala.collection.immutable.Map<String,String>,scala.Function1<org.apache.spark.sql.catalyst.plans.logical.LogicalPlan,CreateTempTableUsingAsSelect>>>>>
CreateTempTableUsingAsSelect.equals ( Object p1 )  :  boolean
CreateTempTableUsingAsSelect.hashCode ( )  :  int
CreateTempTableUsingAsSelect.mode ( )  :  org.apache.spark.sql.SaveMode
CreateTempTableUsingAsSelect.options ( )  :  scala.collection.immutable.Map<String,String>
CreateTempTableUsingAsSelect.productArity ( )  :  int
CreateTempTableUsingAsSelect.productElement ( int p1 )  :  Object
CreateTempTableUsingAsSelect.productIterator ( )  :  scala.collection.Iterator<Object>
CreateTempTableUsingAsSelect.productPrefix ( )  :  String
CreateTempTableUsingAsSelect.provider ( )  :  String
CreateTempTableUsingAsSelect.query ( )  :  org.apache.spark.sql.catalyst.plans.logical.LogicalPlan
CreateTempTableUsingAsSelect.run ( org.apache.spark.sql.SQLContext sqlContext )  :  scala.collection.Seq<scala.runtime.Nothing.>
CreateTempTableUsingAsSelect.tableName ( )  :  String
CreateTempTableUsingAsSelect.tupled ( ) [static]  :  scala.Function1<scala.Tuple5<String,String,org.apache.spark.sql.SaveMode,scala.collection.immutable.Map<String,String>,org.apache.spark.sql.catalyst.plans.logical.LogicalPlan>,CreateTempTableUsingAsSelect>

spark-sql_2.10-1.3.0.jar, DataFrame.class
package org.apache.spark.sql
DataFrame.agg ( java.util.Map<String,String> exprs )  :  DataFrame
DataFrame.agg ( Column expr, Column... exprs )  :  DataFrame
DataFrame.agg ( Column expr, scala.collection.Seq<Column> exprs )  :  DataFrame
DataFrame.agg ( scala.collection.immutable.Map<String,String> exprs )  :  DataFrame
DataFrame.agg ( scala.Tuple2<String,String> aggExpr, scala.collection.Seq<scala.Tuple2<String,String>> aggExprs )  :  DataFrame
DataFrame.apply ( String colName )  :  Column
DataFrame.as ( scala.Symbol alias )  :  DataFrame
DataFrame.as ( String alias )  :  DataFrame
DataFrame.cache ( )  :  DataFrame
DataFrame.cache ( )  :  RDDApi
DataFrame.col ( String colName )  :  Column
DataFrame.collect ( )  :  Object
DataFrame.collect ( )  :  Row[ ]
DataFrame.collectAsList ( )  :  java.util.List<Row>
DataFrame.columns ( )  :  String[ ]
DataFrame.count ( )  :  long
DataFrame.createJDBCTable ( String url, String table, boolean allowExisting )  :  void
DataFrame.DataFrame ( SQLContext sqlContext, catalyst.plans.logical.LogicalPlan logicalPlan )
DataFrame.DataFrame ( SQLContext sqlContext, SQLContext.QueryExecution queryExecution )
DataFrame.distinct ( )  :  DataFrame
DataFrame.dtypes ( )  :  scala.Tuple2<String,String>[ ]
DataFrame.except ( DataFrame other )  :  DataFrame
DataFrame.explain ( )  :  void
DataFrame.explain ( boolean extended )  :  void
DataFrame.explode ( scala.collection.Seq<Column> input, scala.Function1<Row,scala.collection.TraversableOnce<A>> f, scala.reflect.api.TypeTags.TypeTag<A> p3 )  :  DataFrame
DataFrame.explode ( String inputColumn, String outputColumn, scala.Function1<A,scala.collection.TraversableOnce<B>> f, scala.reflect.api.TypeTags.TypeTag<B> p4 )  :  DataFrame
DataFrame.filter ( Column condition )  :  DataFrame
DataFrame.filter ( String conditionExpr )  :  DataFrame
DataFrame.first ( )  :  Object
DataFrame.first ( )  :  Row
DataFrame.flatMap ( scala.Function1<Row,scala.collection.TraversableOnce<R>> f, scala.reflect.ClassTag<R> p2 )  :  org.apache.spark.rdd.RDD<R>
DataFrame.foreach ( scala.Function1<Row,scala.runtime.BoxedUnit> f )  :  void
DataFrame.foreachPartition ( scala.Function1<scala.collection.Iterator<Row>,scala.runtime.BoxedUnit> f )  :  void
DataFrame.groupBy ( Column... cols )  :  GroupedData
DataFrame.groupBy ( scala.collection.Seq<Column> cols )  :  GroupedData
DataFrame.groupBy ( String col1, scala.collection.Seq<String> cols )  :  GroupedData
DataFrame.groupBy ( String col1, String... cols )  :  GroupedData
DataFrame.head ( )  :  Row
DataFrame.head ( int n )  :  Row[ ]
DataFrame.insertInto ( String tableName )  :  void
DataFrame.insertInto ( String tableName, boolean overwrite )  :  void
DataFrame.insertIntoJDBC ( String url, String table, boolean overwrite )  :  void
DataFrame.intersect ( DataFrame other )  :  DataFrame
DataFrame.isLocal ( )  :  boolean
DataFrame.javaRDD ( )  :  org.apache.spark.api.java.JavaRDD<Row>
DataFrame.javaToPython ( )  :  org.apache.spark.api.java.JavaRDD<byte[ ]>
DataFrame.join ( DataFrame right )  :  DataFrame
DataFrame.join ( DataFrame right, Column joinExprs )  :  DataFrame
DataFrame.join ( DataFrame right, Column joinExprs, String joinType )  :  DataFrame
DataFrame.limit ( int n )  :  DataFrame
DataFrame.logicalPlan ( )  :  catalyst.plans.logical.LogicalPlan
DataFrame.map ( scala.Function1<Row,R> f, scala.reflect.ClassTag<R> p2 )  :  org.apache.spark.rdd.RDD<R>
DataFrame.mapPartitions ( scala.Function1<scala.collection.Iterator<Row>,scala.collection.Iterator<R>> f, scala.reflect.ClassTag<R> p2 )  :  org.apache.spark.rdd.RDD<R>
DataFrame.numericColumns ( )  :  scala.collection.Seq<catalyst.expressions.Expression>
DataFrame.orderBy ( Column... sortExprs )  :  DataFrame
DataFrame.orderBy ( scala.collection.Seq<Column> sortExprs )  :  DataFrame
DataFrame.orderBy ( String sortCol, scala.collection.Seq<String> sortCols )  :  DataFrame
DataFrame.orderBy ( String sortCol, String... sortCols )  :  DataFrame
DataFrame.persist ( )  :  DataFrame
DataFrame.persist ( )  :  RDDApi
DataFrame.persist ( org.apache.spark.storage.StorageLevel newLevel )  :  DataFrame
DataFrame.persist ( org.apache.spark.storage.StorageLevel newLevel )  :  RDDApi
DataFrame.printSchema ( )  :  void
DataFrame.queryExecution ( )  :  SQLContext.QueryExecution
DataFrame.rdd ( )  :  org.apache.spark.rdd.RDD<Row>
DataFrame.registerTempTable ( String tableName )  :  void
DataFrame.repartition ( int numPartitions )  :  DataFrame
DataFrame.resolve ( String colName )  :  catalyst.expressions.NamedExpression
DataFrame.sample ( boolean withReplacement, double fraction )  :  DataFrame
DataFrame.sample ( boolean withReplacement, double fraction, long seed )  :  DataFrame
DataFrame.save ( String path )  :  void
DataFrame.save ( String path, SaveMode mode )  :  void
DataFrame.save ( String path, String source )  :  void
DataFrame.save ( String path, String source, SaveMode mode )  :  void
DataFrame.save ( String source, SaveMode mode, java.util.Map<String,String> options )  :  void
DataFrame.save ( String source, SaveMode mode, scala.collection.immutable.Map<String,String> options )  :  void
DataFrame.saveAsParquetFile ( String path )  :  void
DataFrame.saveAsTable ( String tableName )  :  void
DataFrame.saveAsTable ( String tableName, SaveMode mode )  :  void
DataFrame.saveAsTable ( String tableName, String source )  :  void
DataFrame.saveAsTable ( String tableName, String source, SaveMode mode )  :  void
DataFrame.saveAsTable ( String tableName, String source, SaveMode mode, java.util.Map<String,String> options )  :  void
DataFrame.saveAsTable ( String tableName, String source, SaveMode mode, scala.collection.immutable.Map<String,String> options )  :  void
DataFrame.schema ( )  :  types.StructType
DataFrame.select ( Column... cols )  :  DataFrame
DataFrame.select ( scala.collection.Seq<Column> cols )  :  DataFrame
DataFrame.select ( String col, scala.collection.Seq<String> cols )  :  DataFrame
DataFrame.select ( String col, String... cols )  :  DataFrame
DataFrame.selectExpr ( scala.collection.Seq<String> exprs )  :  DataFrame
DataFrame.selectExpr ( String... exprs )  :  DataFrame
DataFrame.show ( )  :  void
DataFrame.show ( int numRows )  :  void
DataFrame.showString ( int numRows )  :  String
DataFrame.sort ( Column... sortExprs )  :  DataFrame
DataFrame.sort ( scala.collection.Seq<Column> sortExprs )  :  DataFrame
DataFrame.sort ( String sortCol, scala.collection.Seq<String> sortCols )  :  DataFrame
DataFrame.sort ( String sortCol, String... sortCols )  :  DataFrame
DataFrame.sqlContext ( )  :  SQLContext
DataFrame.take ( int n )  :  Object
DataFrame.take ( int n )  :  Row[ ]
DataFrame.toDF ( )  :  DataFrame
DataFrame.toDF ( scala.collection.Seq<String> colNames )  :  DataFrame
DataFrame.toDF ( String... colNames )  :  DataFrame
DataFrame.toJavaRDD ( )  :  org.apache.spark.api.java.JavaRDD<Row>
DataFrame.toJSON ( )  :  org.apache.spark.rdd.RDD<String>
DataFrame.toString ( )  :  String
DataFrame.unionAll ( DataFrame other )  :  DataFrame
DataFrame.unpersist ( )  :  DataFrame
DataFrame.unpersist ( )  :  RDDApi
DataFrame.unpersist ( boolean blocking )  :  DataFrame
DataFrame.unpersist ( boolean blocking )  :  RDDApi
DataFrame.where ( Column condition )  :  DataFrame
DataFrame.withColumn ( String colName, Column col )  :  DataFrame
DataFrame.withColumnRenamed ( String existingName, String newName )  :  DataFrame

spark-sql_2.10-1.3.0.jar, DataFrameHolder.class
package org.apache.spark.sql
DataFrameHolder.andThen ( scala.Function1<DataFrameHolder,A> p1 ) [static]  :  scala.Function1<DataFrame,A>
DataFrameHolder.canEqual ( Object p1 )  :  boolean
DataFrameHolder.compose ( scala.Function1<A,DataFrame> p1 ) [static]  :  scala.Function1<A,DataFrameHolder>
DataFrameHolder.copy ( DataFrame df )  :  DataFrameHolder
DataFrameHolder.DataFrameHolder ( DataFrame df )
DataFrameHolder.df ( )  :  DataFrame
DataFrameHolder.equals ( Object p1 )  :  boolean
DataFrameHolder.hashCode ( )  :  int
DataFrameHolder.productArity ( )  :  int
DataFrameHolder.productElement ( int p1 )  :  Object
DataFrameHolder.productIterator ( )  :  scala.collection.Iterator<Object>
DataFrameHolder.productPrefix ( )  :  String
DataFrameHolder.toDF ( )  :  DataFrame
DataFrameHolder.toDF ( scala.collection.Seq<String> colNames )  :  DataFrame
DataFrameHolder.toString ( )  :  String

spark-sql_2.10-1.3.0.jar, DateColumnAccessor.class
package org.apache.spark.sql.columnar
DateColumnAccessor.DateColumnAccessor ( java.nio.ByteBuffer buffer )

spark-sql_2.10-1.3.0.jar, DateColumnBuilder.class
package org.apache.spark.sql.columnar
DateColumnBuilder.DateColumnBuilder ( )

spark-sql_2.10-1.3.0.jar, DateColumnStats.class
package org.apache.spark.sql.columnar
DateColumnStats.DateColumnStats ( )

spark-sql_2.10-1.3.0.jar, DDLParser.class
package org.apache.spark.sql.sources
DDLParser.apply ( String input, boolean exceptionOnError )  :  scala.Option<org.apache.spark.sql.catalyst.plans.logical.LogicalPlan>
DDLParser.DDLParser ( scala.Function1<String,org.apache.spark.sql.catalyst.plans.logical.LogicalPlan> parseQuery )

spark-sql_2.10-1.3.0.jar, Decoder<T>.class
package org.apache.spark.sql.columnar.compression
Decoder<T>.hasNext ( ) [abstract]  :  boolean
Decoder<T>.next ( org.apache.spark.sql.catalyst.expressions.MutableRow p1, int p2 ) [abstract]  :  void

spark-sql_2.10-1.3.0.jar, DescribeCommand.class
package org.apache.spark.sql.execution
DescribeCommand.canEqual ( Object p1 )  :  boolean
DescribeCommand.child ( )  :  SparkPlan
DescribeCommand.copy ( SparkPlan child, scala.collection.Seq<org.apache.spark.sql.catalyst.expressions.Attribute> output, boolean isExtended )  :  DescribeCommand
DescribeCommand.curried ( ) [static]  :  scala.Function1<SparkPlan,scala.Function1<scala.collection.Seq<org.apache.spark.sql.catalyst.expressions.Attribute>,scala.Function1<Object,DescribeCommand>>>
DescribeCommand.DescribeCommand ( SparkPlan child, scala.collection.Seq<org.apache.spark.sql.catalyst.expressions.Attribute> output, boolean isExtended )
DescribeCommand.equals ( Object p1 )  :  boolean
DescribeCommand.hashCode ( )  :  int
DescribeCommand.isExtended ( )  :  boolean
DescribeCommand.output ( )  :  scala.collection.Seq<org.apache.spark.sql.catalyst.expressions.Attribute>
DescribeCommand.productArity ( )  :  int
DescribeCommand.productElement ( int p1 )  :  Object
DescribeCommand.productIterator ( )  :  scala.collection.Iterator<Object>
DescribeCommand.productPrefix ( )  :  String
DescribeCommand.run ( org.apache.spark.sql.SQLContext sqlContext )  :  scala.collection.Seq<org.apache.spark.sql.Row>
DescribeCommand.tupled ( ) [static]  :  scala.Function1<scala.Tuple3<SparkPlan,scala.collection.Seq<org.apache.spark.sql.catalyst.expressions.Attribute>,Object>,DescribeCommand>
package org.apache.spark.sql.sources
DescribeCommand.canEqual ( Object p1 )  :  boolean
DescribeCommand.copy ( org.apache.spark.sql.catalyst.plans.logical.LogicalPlan table, boolean isExtended )  :  DescribeCommand
DescribeCommand.curried ( ) [static]  :  scala.Function1<org.apache.spark.sql.catalyst.plans.logical.LogicalPlan,scala.Function1<Object,DescribeCommand>>
DescribeCommand.DescribeCommand ( org.apache.spark.sql.catalyst.plans.logical.LogicalPlan table, boolean isExtended )
DescribeCommand.equals ( Object p1 )  :  boolean
DescribeCommand.hashCode ( )  :  int
DescribeCommand.isExtended ( )  :  boolean
DescribeCommand.output ( )  :  scala.collection.Seq<org.apache.spark.sql.catalyst.expressions.AttributeReference>
DescribeCommand.productArity ( )  :  int
DescribeCommand.productElement ( int p1 )  :  Object
DescribeCommand.productIterator ( )  :  scala.collection.Iterator<Object>
DescribeCommand.productPrefix ( )  :  String
DescribeCommand.table ( )  :  org.apache.spark.sql.catalyst.plans.logical.LogicalPlan
DescribeCommand.tupled ( ) [static]  :  scala.Function1<scala.Tuple2<org.apache.spark.sql.catalyst.plans.logical.LogicalPlan,Object>,DescribeCommand>

spark-sql_2.10-1.3.0.jar, Distinct.class
package org.apache.spark.sql.execution
Distinct.canEqual ( Object p1 )  :  boolean
Distinct.child ( )  :  org.apache.spark.sql.catalyst.trees.TreeNode
Distinct.child ( )  :  SparkPlan
Distinct.children ( )  :  scala.collection.immutable.List<SparkPlan>
Distinct.children ( )  :  scala.collection.Seq
Distinct.copy ( boolean partial, SparkPlan child )  :  Distinct
Distinct.curried ( ) [static]  :  scala.Function1<Object,scala.Function1<SparkPlan,Distinct>>
Distinct.Distinct ( boolean partial, SparkPlan child )
Distinct.equals ( Object p1 )  :  boolean
Distinct.execute ( )  :  org.apache.spark.rdd.RDD<org.apache.spark.sql.Row>
Distinct.hashCode ( )  :  int
Distinct.output ( )  :  scala.collection.Seq<org.apache.spark.sql.catalyst.expressions.Attribute>
Distinct.outputPartitioning ( )  :  org.apache.spark.sql.catalyst.plans.physical.Partitioning
Distinct.partial ( )  :  boolean
Distinct.productArity ( )  :  int
Distinct.productElement ( int p1 )  :  Object
Distinct.productIterator ( )  :  scala.collection.Iterator<Object>
Distinct.productPrefix ( )  :  String
Distinct.requiredChildDistribution ( )  :  scala.collection.Seq<org.apache.spark.sql.catalyst.plans.physical.Distribution>
Distinct.tupled ( ) [static]  :  scala.Function1<scala.Tuple2<Object,SparkPlan>,Distinct>

spark-sql_2.10-1.3.0.jar, DriverQuirks.class
package org.apache.spark.sql.jdbc
DriverQuirks.DriverQuirks ( )
DriverQuirks.get ( String p1 ) [static]  :  DriverQuirks
DriverQuirks.getCatalystType ( int p1, String p2, int p3, org.apache.spark.sql.types.MetadataBuilder p4 ) [abstract]  :  org.apache.spark.sql.types.DataType
DriverQuirks.getJDBCType ( org.apache.spark.sql.types.DataType p1 ) [abstract]  :  scala.Tuple2<String,scala.Option<Object>>

spark-sql_2.10-1.3.0.jar, Encoder<T>.class
package org.apache.spark.sql.columnar.compression
Encoder<T>.compress ( java.nio.ByteBuffer p1, java.nio.ByteBuffer p2 ) [abstract]  :  java.nio.ByteBuffer
Encoder<T>.gatherCompressibilityStats ( org.apache.spark.sql.Row p1, int p2 ) [abstract]  :  void

spark-sql_2.10-1.3.0.jar, EqualTo.class
package org.apache.spark.sql.sources
EqualTo.attribute ( )  :  String
EqualTo.canEqual ( Object p1 )  :  boolean
EqualTo.copy ( String attribute, Object value )  :  EqualTo
EqualTo.curried ( ) [static]  :  scala.Function1<String,scala.Function1<Object,EqualTo>>
EqualTo.equals ( Object p1 )  :  boolean
EqualTo.EqualTo ( String attribute, Object value )
EqualTo.hashCode ( )  :  int
EqualTo.productArity ( )  :  int
EqualTo.productElement ( int p1 )  :  Object
EqualTo.productIterator ( )  :  scala.collection.Iterator<Object>
EqualTo.productPrefix ( )  :  String
EqualTo.toString ( )  :  String
EqualTo.tupled ( ) [static]  :  scala.Function1<scala.Tuple2<String,Object>,EqualTo>
EqualTo.value ( )  :  Object

spark-sql_2.10-1.3.0.jar, EvaluatePython.class
package org.apache.spark.sql.execution
EvaluatePython.canEqual ( Object p1 )  :  boolean
EvaluatePython.child ( )  :  org.apache.spark.sql.catalyst.plans.logical.LogicalPlan
EvaluatePython.child ( )  :  org.apache.spark.sql.catalyst.trees.TreeNode
EvaluatePython.copy ( PythonUDF udf, org.apache.spark.sql.catalyst.plans.logical.LogicalPlan child, org.apache.spark.sql.catalyst.expressions.AttributeReference resultAttribute )  :  EvaluatePython
EvaluatePython.equals ( Object p1 )  :  boolean
EvaluatePython.EvaluatePython ( PythonUDF udf, org.apache.spark.sql.catalyst.plans.logical.LogicalPlan child, org.apache.spark.sql.catalyst.expressions.AttributeReference resultAttribute )
EvaluatePython.fromJava ( Object p1, org.apache.spark.sql.types.DataType p2 ) [static]  :  Object
EvaluatePython.hashCode ( )  :  int
EvaluatePython.output ( )  :  scala.collection.Seq<org.apache.spark.sql.catalyst.expressions.Attribute>
EvaluatePython.productArity ( )  :  int
EvaluatePython.productElement ( int p1 )  :  Object
EvaluatePython.productIterator ( )  :  scala.collection.Iterator<Object>
EvaluatePython.productPrefix ( )  :  String
EvaluatePython.references ( )  :  org.apache.spark.sql.catalyst.expressions.AttributeSet
EvaluatePython.resultAttribute ( )  :  org.apache.spark.sql.catalyst.expressions.AttributeReference
EvaluatePython.rowToArray ( org.apache.spark.sql.Row p1, scala.collection.Seq<org.apache.spark.sql.types.DataType> p2 ) [static]  :  Object[ ]
EvaluatePython.toJava ( Object p1, org.apache.spark.sql.types.DataType p2 ) [static]  :  Object
EvaluatePython.udf ( )  :  PythonUDF

spark-sql_2.10-1.3.0.jar, ExamplePoint.class
package org.apache.spark.sql.test
ExamplePoint.ExamplePoint ( double x, double y )
ExamplePoint.x ( )  :  double
ExamplePoint.y ( )  :  double

spark-sql_2.10-1.3.0.jar, Except.class
package org.apache.spark.sql.execution
Except.canEqual ( Object p1 )  :  boolean
Except.children ( )  :  scala.collection.Seq<SparkPlan>
Except.copy ( SparkPlan left, SparkPlan right )  :  Except
Except.curried ( ) [static]  :  scala.Function1<SparkPlan,scala.Function1<SparkPlan,Except>>
Except.equals ( Object p1 )  :  boolean
Except.Except ( SparkPlan left, SparkPlan right )
Except.execute ( )  :  org.apache.spark.rdd.RDD<org.apache.spark.sql.Row>
Except.hashCode ( )  :  int
Except.left ( )  :  org.apache.spark.sql.catalyst.trees.TreeNode
Except.left ( )  :  SparkPlan
Except.output ( )  :  scala.collection.Seq<org.apache.spark.sql.catalyst.expressions.Attribute>
Except.productArity ( )  :  int
Except.productElement ( int p1 )  :  Object
Except.productIterator ( )  :  scala.collection.Iterator<Object>
Except.productPrefix ( )  :  String
Except.right ( )  :  org.apache.spark.sql.catalyst.trees.TreeNode
Except.right ( )  :  SparkPlan
Except.tupled ( ) [static]  :  scala.Function1<scala.Tuple2<SparkPlan,SparkPlan>,Except>

spark-sql_2.10-1.3.0.jar, Exchange.class
package org.apache.spark.sql.execution
Exchange.Exchange..bypassMergeThreshold ( )  :  int
Exchange.sortBasedShuffleOn ( )  :  boolean

spark-sql_2.10-1.3.0.jar, ExecutedCommand.class
package org.apache.spark.sql.execution
ExecutedCommand.andThen ( scala.Function1<ExecutedCommand,A> p1 ) [static]  :  scala.Function1<RunnableCommand,A>
ExecutedCommand.canEqual ( Object p1 )  :  boolean
ExecutedCommand.children ( )  :  scala.collection.immutable.Nil.
ExecutedCommand.children ( )  :  scala.collection.Seq
ExecutedCommand.cmd ( )  :  RunnableCommand
ExecutedCommand.compose ( scala.Function1<A,RunnableCommand> p1 ) [static]  :  scala.Function1<A,ExecutedCommand>
ExecutedCommand.copy ( RunnableCommand cmd )  :  ExecutedCommand
ExecutedCommand.equals ( Object p1 )  :  boolean
ExecutedCommand.execute ( )  :  org.apache.spark.rdd.RDD<org.apache.spark.sql.Row>
ExecutedCommand.executeCollect ( )  :  org.apache.spark.sql.Row[ ]
ExecutedCommand.ExecutedCommand ( RunnableCommand cmd )
ExecutedCommand.executeTake ( int limit )  :  org.apache.spark.sql.Row[ ]
ExecutedCommand.hashCode ( )  :  int
ExecutedCommand.output ( )  :  scala.collection.Seq<org.apache.spark.sql.catalyst.expressions.Attribute>
ExecutedCommand.productArity ( )  :  int
ExecutedCommand.productElement ( int p1 )  :  Object
ExecutedCommand.productIterator ( )  :  scala.collection.Iterator<Object>
ExecutedCommand.productPrefix ( )  :  String
ExecutedCommand.sideEffectResult ( )  :  scala.collection.Seq<org.apache.spark.sql.Row>

spark-sql_2.10-1.3.0.jar, Expand.class
package org.apache.spark.sql.execution
Expand.canEqual ( Object p1 )  :  boolean
Expand.child ( )  :  org.apache.spark.sql.catalyst.trees.TreeNode
Expand.child ( )  :  SparkPlan
Expand.children ( )  :  scala.collection.immutable.List<SparkPlan>
Expand.children ( )  :  scala.collection.Seq
Expand.copy ( scala.collection.Seq<org.apache.spark.sql.catalyst.expressions.GroupExpression> projections, scala.collection.Seq<org.apache.spark.sql.catalyst.expressions.Attribute> output, SparkPlan child )  :  Expand
Expand.curried ( ) [static]  :  scala.Function1<scala.collection.Seq<org.apache.spark.sql.catalyst.expressions.GroupExpression>,scala.Function1<scala.collection.Seq<org.apache.spark.sql.catalyst.expressions.Attribute>,scala.Function1<SparkPlan,Expand>>>
Expand.equals ( Object p1 )  :  boolean
Expand.execute ( )  :  org.apache.spark.rdd.RDD<org.apache.spark.sql.Row>
Expand.Expand ( scala.collection.Seq<org.apache.spark.sql.catalyst.expressions.GroupExpression> projections, scala.collection.Seq<org.apache.spark.sql.catalyst.expressions.Attribute> output, SparkPlan child )
Expand.hashCode ( )  :  int
Expand.output ( )  :  scala.collection.Seq<org.apache.spark.sql.catalyst.expressions.Attribute>
Expand.outputPartitioning ( )  :  org.apache.spark.sql.catalyst.plans.physical.Partitioning
Expand.productArity ( )  :  int
Expand.productElement ( int p1 )  :  Object
Expand.productIterator ( )  :  scala.collection.Iterator<Object>
Expand.productPrefix ( )  :  String
Expand.projections ( )  :  scala.collection.Seq<org.apache.spark.sql.catalyst.expressions.GroupExpression>
Expand.tupled ( ) [static]  :  scala.Function1<scala.Tuple3<scala.collection.Seq<org.apache.spark.sql.catalyst.expressions.GroupExpression>,scala.collection.Seq<org.apache.spark.sql.catalyst.expressions.Attribute>,SparkPlan>,Expand>

spark-sql_2.10-1.3.0.jar, ExperimentalMethods.class
package org.apache.spark.sql
ExperimentalMethods.ExperimentalMethods ( SQLContext sqlContext )
ExperimentalMethods.extraStrategies ( )  :  scala.collection.Seq<catalyst.planning.GenericStrategy<execution.SparkPlan>>

spark-sql_2.10-1.3.0.jar, ExplainCommand.class
package org.apache.spark.sql.execution
ExplainCommand.canEqual ( Object p1 )  :  boolean
ExplainCommand.copy ( org.apache.spark.sql.catalyst.plans.logical.LogicalPlan logicalPlan, scala.collection.Seq<org.apache.spark.sql.catalyst.expressions.Attribute> output, boolean extended )  :  ExplainCommand
ExplainCommand.curried ( ) [static]  :  scala.Function1<org.apache.spark.sql.catalyst.plans.logical.LogicalPlan,scala.Function1<scala.collection.Seq<org.apache.spark.sql.catalyst.expressions.Attribute>,scala.Function1<Object,ExplainCommand>>>
ExplainCommand.equals ( Object p1 )  :  boolean
ExplainCommand.ExplainCommand ( org.apache.spark.sql.catalyst.plans.logical.LogicalPlan logicalPlan, scala.collection.Seq<org.apache.spark.sql.catalyst.expressions.Attribute> output, boolean extended )
ExplainCommand.extended ( )  :  boolean
ExplainCommand.hashCode ( )  :  int
ExplainCommand.logicalPlan ( )  :  org.apache.spark.sql.catalyst.plans.logical.LogicalPlan
ExplainCommand.output ( )  :  scala.collection.Seq<org.apache.spark.sql.catalyst.expressions.Attribute>
ExplainCommand.productArity ( )  :  int
ExplainCommand.productElement ( int p1 )  :  Object
ExplainCommand.productIterator ( )  :  scala.collection.Iterator<Object>
ExplainCommand.productPrefix ( )  :  String
ExplainCommand.run ( org.apache.spark.sql.SQLContext sqlContext )  :  scala.collection.Seq<org.apache.spark.sql.Row>
ExplainCommand.tupled ( ) [static]  :  scala.Function1<scala.Tuple3<org.apache.spark.sql.catalyst.plans.logical.LogicalPlan,scala.collection.Seq<org.apache.spark.sql.catalyst.expressions.Attribute>,Object>,ExplainCommand>

spark-sql_2.10-1.3.0.jar, ExternalSort.class
package org.apache.spark.sql.execution
ExternalSort.canEqual ( Object p1 )  :  boolean
ExternalSort.child ( )  :  org.apache.spark.sql.catalyst.trees.TreeNode
ExternalSort.child ( )  :  SparkPlan
ExternalSort.children ( )  :  scala.collection.immutable.List<SparkPlan>
ExternalSort.children ( )  :  scala.collection.Seq
ExternalSort.copy ( scala.collection.Seq<org.apache.spark.sql.catalyst.expressions.SortOrder> sortOrder, boolean global, SparkPlan child )  :  ExternalSort
ExternalSort.curried ( ) [static]  :  scala.Function1<scala.collection.Seq<org.apache.spark.sql.catalyst.expressions.SortOrder>,scala.Function1<Object,scala.Function1<SparkPlan,ExternalSort>>>
ExternalSort.equals ( Object p1 )  :  boolean
ExternalSort.execute ( )  :  org.apache.spark.rdd.RDD<org.apache.spark.sql.Row>
ExternalSort.ExternalSort ( scala.collection.Seq<org.apache.spark.sql.catalyst.expressions.SortOrder> sortOrder, boolean global, SparkPlan child )
ExternalSort.global ( )  :  boolean
ExternalSort.hashCode ( )  :  int
ExternalSort.output ( )  :  scala.collection.Seq<org.apache.spark.sql.catalyst.expressions.Attribute>
ExternalSort.outputPartitioning ( )  :  org.apache.spark.sql.catalyst.plans.physical.Partitioning
ExternalSort.productArity ( )  :  int
ExternalSort.productElement ( int p1 )  :  Object
ExternalSort.productIterator ( )  :  scala.collection.Iterator<Object>
ExternalSort.productPrefix ( )  :  String
ExternalSort.requiredChildDistribution ( )  :  scala.collection.Seq<org.apache.spark.sql.catalyst.plans.physical.Distribution>
ExternalSort.sortOrder ( )  :  scala.collection.Seq<org.apache.spark.sql.catalyst.expressions.SortOrder>
ExternalSort.tupled ( ) [static]  :  scala.Function1<scala.Tuple3<scala.collection.Seq<org.apache.spark.sql.catalyst.expressions.SortOrder>,Object,SparkPlan>,ExternalSort>

spark-sql_2.10-1.3.0.jar, Filter.class
package org.apache.spark.sql.execution
Filter.conditionEvaluator ( )  :  scala.Function1<org.apache.spark.sql.Row,Object>
package org.apache.spark.sql.sources
Filter.Filter ( )

spark-sql_2.10-1.3.0.jar, GeneralHashedRelation.class
package org.apache.spark.sql.execution.joins
GeneralHashedRelation.GeneralHashedRelation ( java.util.HashMap<org.apache.spark.sql.Row,org.apache.spark.util.collection.CompactBuffer<org.apache.spark.sql.Row>> hashTable )

spark-sql_2.10-1.3.0.jar, Generate.class
package org.apache.spark.sql.execution
Generate.boundGenerator ( )  :  org.apache.spark.sql.catalyst.expressions.Generator
Generate.generatorOutput ( )  :  scala.collection.Seq<org.apache.spark.sql.catalyst.expressions.Attribute>

spark-sql_2.10-1.3.0.jar, GeneratedAggregate.class
package org.apache.spark.sql.execution
GeneratedAggregate.aggregateExpressions ( )  :  scala.collection.Seq<org.apache.spark.sql.catalyst.expressions.NamedExpression>
GeneratedAggregate.canEqual ( Object p1 )  :  boolean
GeneratedAggregate.child ( )  :  org.apache.spark.sql.catalyst.trees.TreeNode
GeneratedAggregate.child ( )  :  SparkPlan
GeneratedAggregate.children ( )  :  scala.collection.immutable.List<SparkPlan>
GeneratedAggregate.children ( )  :  scala.collection.Seq
GeneratedAggregate.copy ( boolean partial, scala.collection.Seq<org.apache.spark.sql.catalyst.expressions.Expression> groupingExpressions, scala.collection.Seq<org.apache.spark.sql.catalyst.expressions.NamedExpression> aggregateExpressions, SparkPlan child )  :  GeneratedAggregate
GeneratedAggregate.curried ( ) [static]  :  scala.Function1<Object,scala.Function1<scala.collection.Seq<org.apache.spark.sql.catalyst.expressions.Expression>,scala.Function1<scala.collection.Seq<org.apache.spark.sql.catalyst.expressions.NamedExpression>,scala.Function1<SparkPlan,GeneratedAggregate>>>>
GeneratedAggregate.equals ( Object p1 )  :  boolean
GeneratedAggregate.execute ( )  :  org.apache.spark.rdd.RDD<org.apache.spark.sql.Row>
GeneratedAggregate.GeneratedAggregate ( boolean partial, scala.collection.Seq<org.apache.spark.sql.catalyst.expressions.Expression> groupingExpressions, scala.collection.Seq<org.apache.spark.sql.catalyst.expressions.NamedExpression> aggregateExpressions, SparkPlan child )
GeneratedAggregate.groupingExpressions ( )  :  scala.collection.Seq<org.apache.spark.sql.catalyst.expressions.Expression>
GeneratedAggregate.hashCode ( )  :  int
GeneratedAggregate.output ( )  :  scala.collection.Seq<org.apache.spark.sql.catalyst.expressions.Attribute>
GeneratedAggregate.outputPartitioning ( )  :  org.apache.spark.sql.catalyst.plans.physical.Partitioning
GeneratedAggregate.partial ( )  :  boolean
GeneratedAggregate.productArity ( )  :  int
GeneratedAggregate.productElement ( int p1 )  :  Object
GeneratedAggregate.productIterator ( )  :  scala.collection.Iterator<Object>
GeneratedAggregate.productPrefix ( )  :  String
GeneratedAggregate.requiredChildDistribution ( )  :  scala.collection.Seq<org.apache.spark.sql.catalyst.plans.physical.Distribution>
GeneratedAggregate.tupled ( ) [static]  :  scala.Function1<scala.Tuple4<Object,scala.collection.Seq<org.apache.spark.sql.catalyst.expressions.Expression>,scala.collection.Seq<org.apache.spark.sql.catalyst.expressions.NamedExpression>,SparkPlan>,GeneratedAggregate>

spark-sql_2.10-1.3.0.jar, GenericColumnStats.class
package org.apache.spark.sql.columnar
GenericColumnStats.GenericColumnStats ( )

spark-sql_2.10-1.3.0.jar, GreaterThan.class
package org.apache.spark.sql.sources
GreaterThan.attribute ( )  :  String
GreaterThan.canEqual ( Object p1 )  :  boolean
GreaterThan.copy ( String attribute, Object value )  :  GreaterThan
GreaterThan.curried ( ) [static]  :  scala.Function1<String,scala.Function1<Object,GreaterThan>>
GreaterThan.equals ( Object p1 )  :  boolean
GreaterThan.GreaterThan ( String attribute, Object value )
GreaterThan.hashCode ( )  :  int
GreaterThan.productArity ( )  :  int
GreaterThan.productElement ( int p1 )  :  Object
GreaterThan.productIterator ( )  :  scala.collection.Iterator<Object>
GreaterThan.productPrefix ( )  :  String
GreaterThan.toString ( )  :  String
GreaterThan.tupled ( ) [static]  :  scala.Function1<scala.Tuple2<String,Object>,GreaterThan>
GreaterThan.value ( )  :  Object

spark-sql_2.10-1.3.0.jar, GreaterThanOrEqual.class
package org.apache.spark.sql.sources
GreaterThanOrEqual.attribute ( )  :  String
GreaterThanOrEqual.canEqual ( Object p1 )  :  boolean
GreaterThanOrEqual.copy ( String attribute, Object value )  :  GreaterThanOrEqual
GreaterThanOrEqual.curried ( ) [static]  :  scala.Function1<String,scala.Function1<Object,GreaterThanOrEqual>>
GreaterThanOrEqual.equals ( Object p1 )  :  boolean
GreaterThanOrEqual.GreaterThanOrEqual ( String attribute, Object value )
GreaterThanOrEqual.hashCode ( )  :  int
GreaterThanOrEqual.productArity ( )  :  int
GreaterThanOrEqual.productElement ( int p1 )  :  Object
GreaterThanOrEqual.productIterator ( )  :  scala.collection.Iterator<Object>
GreaterThanOrEqual.productPrefix ( )  :  String
GreaterThanOrEqual.toString ( )  :  String
GreaterThanOrEqual.tupled ( ) [static]  :  scala.Function1<scala.Tuple2<String,Object>,GreaterThanOrEqual>
GreaterThanOrEqual.value ( )  :  Object

spark-sql_2.10-1.3.0.jar, GroupedData.class
package org.apache.spark.sql
GroupedData.agg ( java.util.Map<String,String> exprs )  :  DataFrame
GroupedData.agg ( Column expr, scala.collection.Seq<Column> exprs )  :  DataFrame
GroupedData.agg ( scala.collection.immutable.Map<String,String> exprs )  :  DataFrame
GroupedData.agg ( scala.Tuple2<String,String> aggExpr, scala.collection.Seq<scala.Tuple2<String,String>> aggExprs )  :  DataFrame
GroupedData.count ( )  :  DataFrame
GroupedData.GroupedData ( DataFrame df, scala.collection.Seq<catalyst.expressions.Expression> groupingExprs )

spark-sql_2.10-1.3.0.jar, HashedRelation.class
package org.apache.spark.sql.execution.joins
HashedRelation.get ( org.apache.spark.sql.Row p1 ) [abstract]  :  org.apache.spark.util.collection.CompactBuffer<org.apache.spark.sql.Row>

spark-sql_2.10-1.3.0.jar, HashJoin.class
package org.apache.spark.sql.execution.joins
HashJoin.buildKeys ( ) [abstract]  :  scala.collection.Seq<org.apache.spark.sql.catalyst.expressions.Expression>
HashJoin.buildPlan ( ) [abstract]  :  org.apache.spark.sql.execution.SparkPlan
HashJoin.buildSide ( ) [abstract]  :  package.BuildSide
HashJoin.buildSideKeyGenerator ( ) [abstract]  :  org.apache.spark.sql.catalyst.expressions.package.Projection
HashJoin.hashJoin ( scala.collection.Iterator<org.apache.spark.sql.Row> p1, HashedRelation p2 ) [abstract]  :  scala.collection.Iterator<org.apache.spark.sql.Row>
HashJoin.left ( ) [abstract]  :  org.apache.spark.sql.execution.SparkPlan
HashJoin.leftKeys ( ) [abstract]  :  scala.collection.Seq<org.apache.spark.sql.catalyst.expressions.Expression>
HashJoin.output ( ) [abstract]  :  scala.collection.Seq<org.apache.spark.sql.catalyst.expressions.Attribute>
HashJoin.right ( ) [abstract]  :  org.apache.spark.sql.execution.SparkPlan
HashJoin.rightKeys ( ) [abstract]  :  scala.collection.Seq<org.apache.spark.sql.catalyst.expressions.Expression>
HashJoin.streamedKeys ( ) [abstract]  :  scala.collection.Seq<org.apache.spark.sql.catalyst.expressions.Expression>
HashJoin.streamedPlan ( ) [abstract]  :  org.apache.spark.sql.execution.SparkPlan
HashJoin.streamSideKeyGenerator ( ) [abstract]  :  scala.Function0<org.apache.spark.sql.catalyst.expressions.package.MutableProjection>

spark-sql_2.10-1.3.0.jar, HashOuterJoin.class
package org.apache.spark.sql.execution.joins
HashOuterJoin.canEqual ( Object p1 )  :  boolean
HashOuterJoin.children ( )  :  scala.collection.Seq<org.apache.spark.sql.execution.SparkPlan>
HashOuterJoin.condition ( )  :  scala.Option<org.apache.spark.sql.catalyst.expressions.Expression>
HashOuterJoin.copy ( scala.collection.Seq<org.apache.spark.sql.catalyst.expressions.Expression> leftKeys, scala.collection.Seq<org.apache.spark.sql.catalyst.expressions.Expression> rightKeys, org.apache.spark.sql.catalyst.plans.JoinType joinType, scala.Option<org.apache.spark.sql.catalyst.expressions.Expression> condition, org.apache.spark.sql.execution.SparkPlan left, org.apache.spark.sql.execution.SparkPlan right )  :  HashOuterJoin
HashOuterJoin.curried ( ) [static]  :  scala.Function1<scala.collection.Seq<org.apache.spark.sql.catalyst.expressions.Expression>,scala.Function1<scala.collection.Seq<org.apache.spark.sql.catalyst.expressions.Expression>,scala.Function1<org.apache.spark.sql.catalyst.plans.JoinType,scala.Function1<scala.Option<org.apache.spark.sql.catalyst.expressions.Expression>,scala.Function1<org.apache.spark.sql.execution.SparkPlan,scala.Function1<org.apache.spark.sql.execution.SparkPlan,HashOuterJoin>>>>>>
HashOuterJoin.equals ( Object p1 )  :  boolean
HashOuterJoin.execute ( )  :  org.apache.spark.rdd.RDD<org.apache.spark.sql.Row>
HashOuterJoin.hashCode ( )  :  int
HashOuterJoin.HashOuterJoin ( scala.collection.Seq<org.apache.spark.sql.catalyst.expressions.Expression> leftKeys, scala.collection.Seq<org.apache.spark.sql.catalyst.expressions.Expression> rightKeys, org.apache.spark.sql.catalyst.plans.JoinType joinType, scala.Option<org.apache.spark.sql.catalyst.expressions.Expression> condition, org.apache.spark.sql.execution.SparkPlan left, org.apache.spark.sql.execution.SparkPlan right )
HashOuterJoin.joinType ( )  :  org.apache.spark.sql.catalyst.plans.JoinType
HashOuterJoin.left ( )  :  org.apache.spark.sql.catalyst.trees.TreeNode
HashOuterJoin.left ( )  :  org.apache.spark.sql.execution.SparkPlan
HashOuterJoin.leftKeys ( )  :  scala.collection.Seq<org.apache.spark.sql.catalyst.expressions.Expression>
HashOuterJoin.HashOuterJoin..boundCondition ( )  :  scala.Function1<org.apache.spark.sql.Row,Object>
HashOuterJoin.HashOuterJoin..buildHashTable ( scala.collection.Iterator<org.apache.spark.sql.Row> iter, org.apache.spark.sql.catalyst.expressions.package.Projection keyGenerator )  :  java.util.HashMap<org.apache.spark.sql.Row,org.apache.spark.util.collection.CompactBuffer<org.apache.spark.sql.Row>>
HashOuterJoin.HashOuterJoin..DUMMY_LIST ( )  :  scala.collection.Seq<org.apache.spark.sql.Row>
HashOuterJoin.HashOuterJoin..EMPTY_LIST ( )  :  scala.collection.Seq<org.apache.spark.sql.Row>
HashOuterJoin.HashOuterJoin..fullOuterIterator ( org.apache.spark.sql.Row key, scala.collection.Iterable<org.apache.spark.sql.Row> leftIter, scala.collection.Iterable<org.apache.spark.sql.Row> rightIter, org.apache.spark.sql.catalyst.expressions.JoinedRow joinedRow )  :  scala.collection.Iterator<org.apache.spark.sql.Row>
HashOuterJoin.HashOuterJoin..leftNullRow ( )  :  org.apache.spark.sql.catalyst.expressions.GenericRow
HashOuterJoin.HashOuterJoin..leftOuterIterator ( org.apache.spark.sql.Row key, org.apache.spark.sql.catalyst.expressions.JoinedRow joinedRow, scala.collection.Iterable<org.apache.spark.sql.Row> rightIter )  :  scala.collection.Iterator<org.apache.spark.sql.Row>
HashOuterJoin.HashOuterJoin..rightNullRow ( )  :  org.apache.spark.sql.catalyst.expressions.GenericRow
HashOuterJoin.HashOuterJoin..rightOuterIterator ( org.apache.spark.sql.Row key, scala.collection.Iterable<org.apache.spark.sql.Row> leftIter, org.apache.spark.sql.catalyst.expressions.JoinedRow joinedRow )  :  scala.collection.Iterator<org.apache.spark.sql.Row>
HashOuterJoin.output ( )  :  scala.collection.Seq<org.apache.spark.sql.catalyst.expressions.Attribute>
HashOuterJoin.outputPartitioning ( )  :  org.apache.spark.sql.catalyst.plans.physical.Partitioning
HashOuterJoin.productArity ( )  :  int
HashOuterJoin.productElement ( int p1 )  :  Object
HashOuterJoin.productIterator ( )  :  scala.collection.Iterator<Object>
HashOuterJoin.productPrefix ( )  :  String
HashOuterJoin.requiredChildDistribution ( )  :  scala.collection.immutable.List<org.apache.spark.sql.catalyst.plans.physical.ClusteredDistribution>
HashOuterJoin.requiredChildDistribution ( )  :  scala.collection.Seq
HashOuterJoin.right ( )  :  org.apache.spark.sql.catalyst.trees.TreeNode
HashOuterJoin.right ( )  :  org.apache.spark.sql.execution.SparkPlan
HashOuterJoin.rightKeys ( )  :  scala.collection.Seq<org.apache.spark.sql.catalyst.expressions.Expression>
HashOuterJoin.tupled ( ) [static]  :  scala.Function1<scala.Tuple6<scala.collection.Seq<org.apache.spark.sql.catalyst.expressions.Expression>,scala.collection.Seq<org.apache.spark.sql.catalyst.expressions.Expression>,org.apache.spark.sql.catalyst.plans.JoinType,scala.Option<org.apache.spark.sql.catalyst.expressions.Expression>,org.apache.spark.sql.execution.SparkPlan,org.apache.spark.sql.execution.SparkPlan>,HashOuterJoin>

spark-sql_2.10-1.3.0.jar, In.class
package org.apache.spark.sql.sources
In.attribute ( )  :  String
In.canEqual ( Object p1 )  :  boolean
In.copy ( String attribute, Object[ ] values )  :  In
In.curried ( ) [static]  :  scala.Function1<String,scala.Function1<Object[ ],In>>
In.equals ( Object p1 )  :  boolean
In.hashCode ( )  :  int
In.In ( String attribute, Object[ ] values )
In.productArity ( )  :  int
In.productElement ( int p1 )  :  Object
In.productIterator ( )  :  scala.collection.Iterator<Object>
In.productPrefix ( )  :  String
In.toString ( )  :  String
In.tupled ( ) [static]  :  scala.Function1<scala.Tuple2<String,Object[ ]>,In>
In.values ( )  :  Object[ ]

spark-sql_2.10-1.3.0.jar, InMemoryColumnarTableScan.class
package org.apache.spark.sql.columnar
InMemoryColumnarTableScan.buildFilter ( )  :  scala.PartialFunction<org.apache.spark.sql.catalyst.expressions.Expression,org.apache.spark.sql.catalyst.expressions.Expression>
InMemoryColumnarTableScan.copy ( scala.collection.Seq<org.apache.spark.sql.catalyst.expressions.Attribute> attributes, scala.collection.Seq<org.apache.spark.sql.catalyst.expressions.Expression> predicates, InMemoryRelation relation )  :  InMemoryColumnarTableScan
InMemoryColumnarTableScan.InMemoryColumnarTableScan ( scala.collection.Seq<org.apache.spark.sql.catalyst.expressions.Attribute> attributes, scala.collection.Seq<org.apache.spark.sql.catalyst.expressions.Expression> predicates, InMemoryRelation relation )
InMemoryColumnarTableScan.InMemoryColumnarTableScan..inMemoryPartitionPruningEnabled ( )  :  boolean
InMemoryColumnarTableScan.InMemoryColumnarTableScan..statsFor ( org.apache.spark.sql.catalyst.expressions.Attribute a )  :  ColumnStatisticsSchema
InMemoryColumnarTableScan.partitionFilters ( )  :  scala.collection.Seq<org.apache.spark.sql.catalyst.expressions.Expression>
InMemoryColumnarTableScan.predicates ( )  :  scala.collection.Seq<org.apache.spark.sql.catalyst.expressions.Expression>
InMemoryColumnarTableScan.readBatches ( )  :  org.apache.spark.Accumulator<Object>
InMemoryColumnarTableScan.readPartitions ( )  :  org.apache.spark.Accumulator<Object>
InMemoryColumnarTableScan.relation ( )  :  InMemoryRelation

spark-sql_2.10-1.3.0.jar, InMemoryRelation.class
package org.apache.spark.sql.columnar
InMemoryRelation.batchSize ( )  :  int
InMemoryRelation.cachedColumnBuffers ( )  :  org.apache.spark.rdd.RDD<CachedBatch>
InMemoryRelation.canEqual ( Object p1 )  :  boolean
InMemoryRelation.child ( )  :  org.apache.spark.sql.execution.SparkPlan
InMemoryRelation.children ( )  :  scala.collection.Seq<scala.runtime.Nothing.>
InMemoryRelation.copy ( scala.collection.Seq<org.apache.spark.sql.catalyst.expressions.Attribute> output, boolean useCompression, int batchSize, org.apache.spark.storage.StorageLevel storageLevel, org.apache.spark.sql.execution.SparkPlan child, scala.Option<String> tableName, org.apache.spark.rdd.RDD<CachedBatch> _cachedColumnBuffers, org.apache.spark.sql.catalyst.plans.logical.Statistics _statistics )  :  InMemoryRelation
InMemoryRelation.equals ( Object p1 )  :  boolean
InMemoryRelation.hashCode ( )  :  int
InMemoryRelation.InMemoryRelation ( scala.collection.Seq<org.apache.spark.sql.catalyst.expressions.Attribute> output, boolean useCompression, int batchSize, org.apache.spark.storage.StorageLevel storageLevel, org.apache.spark.sql.execution.SparkPlan child, scala.Option<String> tableName, org.apache.spark.rdd.RDD<CachedBatch> _cachedColumnBuffers, org.apache.spark.sql.catalyst.plans.logical.Statistics _statistics )
InMemoryRelation.newInstance ( )  :  org.apache.spark.sql.catalyst.analysis.MultiInstanceRelation
InMemoryRelation.newInstance ( )  :  InMemoryRelation
InMemoryRelation.InMemoryRelation..batchStats ( )  :  org.apache.spark.Accumulable<scala.collection.mutable.ArrayBuffer<org.apache.spark.sql.Row>,org.apache.spark.sql.Row>
InMemoryRelation.otherCopyArgs ( )  :  scala.collection.Seq<Object>
InMemoryRelation.output ( )  :  scala.collection.Seq<org.apache.spark.sql.catalyst.expressions.Attribute>
InMemoryRelation.partitionStatistics ( )  :  PartitionStatistics
InMemoryRelation.productArity ( )  :  int
InMemoryRelation.productElement ( int p1 )  :  Object
InMemoryRelation.productIterator ( )  :  scala.collection.Iterator<Object>
InMemoryRelation.productPrefix ( )  :  String
InMemoryRelation.recache ( )  :  void
InMemoryRelation.statistics ( )  :  org.apache.spark.sql.catalyst.plans.logical.Statistics
InMemoryRelation.storageLevel ( )  :  org.apache.spark.storage.StorageLevel
InMemoryRelation.tableName ( )  :  scala.Option<String>
InMemoryRelation.useCompression ( )  :  boolean
InMemoryRelation.withOutput ( scala.collection.Seq<org.apache.spark.sql.catalyst.expressions.Attribute> newOutput )  :  InMemoryRelation

spark-sql_2.10-1.3.0.jar, InsertableRelation.class
package org.apache.spark.sql.sources
InsertableRelation.insert ( org.apache.spark.sql.DataFrame p1, boolean p2 ) [abstract]  :  void

spark-sql_2.10-1.3.0.jar, InsertIntoDataSource.class
package org.apache.spark.sql.sources
InsertIntoDataSource.canEqual ( Object p1 )  :  boolean
InsertIntoDataSource.copy ( LogicalRelation logicalRelation, org.apache.spark.sql.catalyst.plans.logical.LogicalPlan query, boolean overwrite )  :  InsertIntoDataSource
InsertIntoDataSource.curried ( ) [static]  :  scala.Function1<LogicalRelation,scala.Function1<org.apache.spark.sql.catalyst.plans.logical.LogicalPlan,scala.Function1<Object,InsertIntoDataSource>>>
InsertIntoDataSource.equals ( Object p1 )  :  boolean
InsertIntoDataSource.hashCode ( )  :  int
InsertIntoDataSource.InsertIntoDataSource ( LogicalRelation logicalRelation, org.apache.spark.sql.catalyst.plans.logical.LogicalPlan query, boolean overwrite )
InsertIntoDataSource.logicalRelation ( )  :  LogicalRelation
InsertIntoDataSource.overwrite ( )  :  boolean
InsertIntoDataSource.productArity ( )  :  int
InsertIntoDataSource.productElement ( int p1 )  :  Object
InsertIntoDataSource.productIterator ( )  :  scala.collection.Iterator<Object>
InsertIntoDataSource.productPrefix ( )  :  String
InsertIntoDataSource.query ( )  :  org.apache.spark.sql.catalyst.plans.logical.LogicalPlan
InsertIntoDataSource.run ( org.apache.spark.sql.SQLContext sqlContext )  :  scala.collection.Seq<org.apache.spark.sql.Row>
InsertIntoDataSource.tupled ( ) [static]  :  scala.Function1<scala.Tuple3<LogicalRelation,org.apache.spark.sql.catalyst.plans.logical.LogicalPlan,Object>,InsertIntoDataSource>

spark-sql_2.10-1.3.0.jar, InsertIntoParquetTable.class
package org.apache.spark.sql.parquet
InsertIntoParquetTable.copy ( ParquetRelation relation, org.apache.spark.sql.execution.SparkPlan child, boolean overwrite )  :  InsertIntoParquetTable
InsertIntoParquetTable.curried ( ) [static]  :  scala.Function1<ParquetRelation,scala.Function1<org.apache.spark.sql.execution.SparkPlan,scala.Function1<Object,InsertIntoParquetTable>>>
InsertIntoParquetTable.InsertIntoParquetTable ( ParquetRelation relation, org.apache.spark.sql.execution.SparkPlan child, boolean overwrite )
InsertIntoParquetTable.tupled ( ) [static]  :  scala.Function1<scala.Tuple3<ParquetRelation,org.apache.spark.sql.execution.SparkPlan,Object>,InsertIntoParquetTable>

spark-sql_2.10-1.3.0.jar, IntColumnStats.class
package org.apache.spark.sql.columnar
IntColumnStats.collectedStatistics ( )  :  org.apache.spark.sql.Row
IntColumnStats.count ( )  :  int
IntColumnStats.count_.eq ( int p1 )  :  void
IntColumnStats.gatherStats ( org.apache.spark.sql.Row row, int ordinal )  :  void
IntColumnStats.lower ( )  :  int
IntColumnStats.lower_.eq ( int p1 )  :  void
IntColumnStats.nullCount ( )  :  int
IntColumnStats.nullCount_.eq ( int p1 )  :  void
IntColumnStats.sizeInBytes ( )  :  long
IntColumnStats.sizeInBytes_.eq ( long p1 )  :  void
IntColumnStats.upper ( )  :  int
IntColumnStats.upper_.eq ( int p1 )  :  void

spark-sql_2.10-1.3.0.jar, IntegerHashSetSerializer.class
package org.apache.spark.sql.execution
IntegerHashSetSerializer.IntegerHashSetSerializer ( )

spark-sql_2.10-1.3.0.jar, Intersect.class
package org.apache.spark.sql.execution
Intersect.canEqual ( Object p1 )  :  boolean
Intersect.children ( )  :  scala.collection.Seq<SparkPlan>
Intersect.copy ( SparkPlan left, SparkPlan right )  :  Intersect
Intersect.curried ( ) [static]  :  scala.Function1<SparkPlan,scala.Function1<SparkPlan,Intersect>>
Intersect.equals ( Object p1 )  :  boolean
Intersect.execute ( )  :  org.apache.spark.rdd.RDD<org.apache.spark.sql.Row>
Intersect.hashCode ( )  :  int
Intersect.Intersect ( SparkPlan left, SparkPlan right )
Intersect.left ( )  :  org.apache.spark.sql.catalyst.trees.TreeNode
Intersect.left ( )  :  SparkPlan
Intersect.output ( )  :  scala.collection.Seq<org.apache.spark.sql.catalyst.expressions.Attribute>
Intersect.productArity ( )  :  int
Intersect.productElement ( int p1 )  :  Object
Intersect.productIterator ( )  :  scala.collection.Iterator<Object>
Intersect.productPrefix ( )  :  String
Intersect.right ( )  :  org.apache.spark.sql.catalyst.trees.TreeNode
Intersect.right ( )  :  SparkPlan
Intersect.tupled ( ) [static]  :  scala.Function1<scala.Tuple2<SparkPlan,SparkPlan>,Intersect>

spark-sql_2.10-1.3.0.jar, IsNotNull.class
package org.apache.spark.sql.sources
IsNotNull.andThen ( scala.Function1<IsNotNull,A> p1 ) [static]  :  scala.Function1<String,A>
IsNotNull.attribute ( )  :  String
IsNotNull.canEqual ( Object p1 )  :  boolean
IsNotNull.compose ( scala.Function1<A,String> p1 ) [static]  :  scala.Function1<A,IsNotNull>
IsNotNull.copy ( String attribute )  :  IsNotNull
IsNotNull.equals ( Object p1 )  :  boolean
IsNotNull.hashCode ( )  :  int
IsNotNull.IsNotNull ( String attribute )
IsNotNull.productArity ( )  :  int
IsNotNull.productElement ( int p1 )  :  Object
IsNotNull.productIterator ( )  :  scala.collection.Iterator<Object>
IsNotNull.productPrefix ( )  :  String
IsNotNull.toString ( )  :  String

spark-sql_2.10-1.3.0.jar, IsNull.class
package org.apache.spark.sql.sources
IsNull.andThen ( scala.Function1<IsNull,A> p1 ) [static]  :  scala.Function1<String,A>
IsNull.attribute ( )  :  String
IsNull.canEqual ( Object p1 )  :  boolean
IsNull.compose ( scala.Function1<A,String> p1 ) [static]  :  scala.Function1<A,IsNull>
IsNull.copy ( String attribute )  :  IsNull
IsNull.equals ( Object p1 )  :  boolean
IsNull.hashCode ( )  :  int
IsNull.IsNull ( String attribute )
IsNull.productArity ( )  :  int
IsNull.productElement ( int p1 )  :  Object
IsNull.productIterator ( )  :  scala.collection.Iterator<Object>
IsNull.productPrefix ( )  :  String
IsNull.toString ( )  :  String

spark-sql_2.10-1.3.0.jar, JavaBigDecimalSerializer.class
package org.apache.spark.sql.execution
JavaBigDecimalSerializer.JavaBigDecimalSerializer ( )

spark-sql_2.10-1.3.0.jar, JDBCPartition.class
package org.apache.spark.sql.jdbc
JDBCPartition.canEqual ( Object p1 )  :  boolean
JDBCPartition.copy ( String whereClause, int idx )  :  JDBCPartition
JDBCPartition.curried ( ) [static]  :  scala.Function1<String,scala.Function1<Object,JDBCPartition>>
JDBCPartition.equals ( Object p1 )  :  boolean
JDBCPartition.hashCode ( )  :  int
JDBCPartition.idx ( )  :  int
JDBCPartition.index ( )  :  int
JDBCPartition.JDBCPartition ( String whereClause, int idx )
JDBCPartition.productArity ( )  :  int
JDBCPartition.productElement ( int p1 )  :  Object
JDBCPartition.productIterator ( )  :  scala.collection.Iterator<Object>
JDBCPartition.productPrefix ( )  :  String
JDBCPartition.toString ( )  :  String
JDBCPartition.tupled ( ) [static]  :  scala.Function1<scala.Tuple2<String,Object>,JDBCPartition>
JDBCPartition.whereClause ( )  :  String

spark-sql_2.10-1.3.0.jar, JDBCPartitioningInfo.class
package org.apache.spark.sql.jdbc
JDBCPartitioningInfo.canEqual ( Object p1 )  :  boolean
JDBCPartitioningInfo.column ( )  :  String
JDBCPartitioningInfo.copy ( String column, long lowerBound, long upperBound, int numPartitions )  :  JDBCPartitioningInfo
JDBCPartitioningInfo.curried ( ) [static]  :  scala.Function1<String,scala.Function1<Object,scala.Function1<Object,scala.Function1<Object,JDBCPartitioningInfo>>>>
JDBCPartitioningInfo.equals ( Object p1 )  :  boolean
JDBCPartitioningInfo.hashCode ( )  :  int
JDBCPartitioningInfo.JDBCPartitioningInfo ( String column, long lowerBound, long upperBound, int numPartitions )
JDBCPartitioningInfo.lowerBound ( )  :  long
JDBCPartitioningInfo.numPartitions ( )  :  int
JDBCPartitioningInfo.productArity ( )  :  int
JDBCPartitioningInfo.productElement ( int p1 )  :  Object
JDBCPartitioningInfo.productIterator ( )  :  scala.collection.Iterator<Object>
JDBCPartitioningInfo.productPrefix ( )  :  String
JDBCPartitioningInfo.toString ( )  :  String
JDBCPartitioningInfo.tupled ( ) [static]  :  scala.Function1<scala.Tuple4<String,Object,Object,Object>,JDBCPartitioningInfo>
JDBCPartitioningInfo.upperBound ( )  :  long

spark-sql_2.10-1.3.0.jar, JDBCRDD.class
package org.apache.spark.sql.jdbc
JDBCRDD.BinaryConversion ( )  :  JDBCRDD.BinaryConversion.
JDBCRDD.BinaryLongConversion ( )  :  JDBCRDD.BinaryLongConversion.
JDBCRDD.BooleanConversion ( )  :  JDBCRDD.BooleanConversion.
JDBCRDD.compute ( org.apache.spark.Partition thePart, org.apache.spark.TaskContext context )  :  Object
JDBCRDD.DateConversion ( )  :  JDBCRDD.DateConversion.
JDBCRDD.DecimalConversion ( )  :  JDBCRDD.DecimalConversion.
JDBCRDD.DoubleConversion ( )  :  JDBCRDD.DoubleConversion.
JDBCRDD.FloatConversion ( )  :  JDBCRDD.FloatConversion.
JDBCRDD.getConnector ( String p1, String p2 ) [static]  :  scala.Function0<java.sql.Connection>
JDBCRDD.getConversions ( org.apache.spark.sql.types.StructType schema )  :  JDBCRDD.JDBCConversion[ ]
JDBCRDD.getPartitions ( )  :  org.apache.spark.Partition[ ]
JDBCRDD.IntegerConversion ( )  :  JDBCRDD.IntegerConversion.
JDBCRDD.JDBCRDD ( org.apache.spark.SparkContext sc, scala.Function0<java.sql.Connection> getConnection, org.apache.spark.sql.types.StructType schema, String fqTable, String[ ] columns, org.apache.spark.sql.sources.Filter[ ] filters, org.apache.spark.Partition[ ] partitions )
JDBCRDD.LongConversion ( )  :  JDBCRDD.LongConversion.
JDBCRDD.JDBCRDD..columnList ( )  :  String
JDBCRDD.JDBCRDD..compileFilter ( org.apache.spark.sql.sources.Filter f )  :  String
JDBCRDD.JDBCRDD..getWhereClause ( JDBCPartition part )  :  String
JDBCRDD.resolveTable ( String p1, String p2 ) [static]  :  org.apache.spark.sql.types.StructType
JDBCRDD.scanTable ( org.apache.spark.SparkContext p1, org.apache.spark.sql.types.StructType p2, String p3, String p4, String p5, String[ ] p6, org.apache.spark.sql.sources.Filter[ ] p7, org.apache.spark.Partition[ ] p8 ) [static]  :  org.apache.spark.rdd.RDD<org.apache.spark.sql.Row>
JDBCRDD.StringConversion ( )  :  JDBCRDD.StringConversion.
JDBCRDD.TimestampConversion ( )  :  JDBCRDD.TimestampConversion.

spark-sql_2.10-1.3.0.jar, JDBCRelation.class
package org.apache.spark.sql.jdbc
JDBCRelation.buildScan ( String[ ] requiredColumns, org.apache.spark.sql.sources.Filter[ ] filters )  :  org.apache.spark.rdd.RDD<org.apache.spark.sql.Row>
JDBCRelation.canEqual ( Object p1 )  :  boolean
JDBCRelation.columnPartition ( JDBCPartitioningInfo p1 ) [static]  :  org.apache.spark.Partition[ ]
JDBCRelation.copy ( String url, String table, org.apache.spark.Partition[ ] parts, org.apache.spark.sql.SQLContext sqlContext )  :  JDBCRelation
JDBCRelation.equals ( Object p1 )  :  boolean
JDBCRelation.hashCode ( )  :  int
JDBCRelation.JDBCRelation ( String url, String table, org.apache.spark.Partition[ ] parts, org.apache.spark.sql.SQLContext sqlContext )
JDBCRelation.parts ( )  :  org.apache.spark.Partition[ ]
JDBCRelation.productArity ( )  :  int
JDBCRelation.productElement ( int p1 )  :  Object
JDBCRelation.productIterator ( )  :  scala.collection.Iterator<Object>
JDBCRelation.productPrefix ( )  :  String
JDBCRelation.schema ( )  :  org.apache.spark.sql.types.StructType
JDBCRelation.sqlContext ( )  :  org.apache.spark.sql.SQLContext
JDBCRelation.table ( )  :  String
JDBCRelation.toString ( )  :  String
JDBCRelation.url ( )  :  String

spark-sql_2.10-1.3.0.jar, JSONRelation.class
package org.apache.spark.sql.json
JSONRelation.buildScan ( )  :  org.apache.spark.rdd.RDD<org.apache.spark.sql.Row>
JSONRelation.canEqual ( Object p1 )  :  boolean
JSONRelation.copy ( String path, double samplingRatio, scala.Option<org.apache.spark.sql.types.StructType> userSpecifiedSchema, org.apache.spark.sql.SQLContext sqlContext )  :  JSONRelation
JSONRelation.equals ( Object other )  :  boolean
JSONRelation.hashCode ( )  :  int
JSONRelation.insert ( org.apache.spark.sql.DataFrame data, boolean overwrite )  :  void
JSONRelation.JSONRelation ( String path, double samplingRatio, scala.Option<org.apache.spark.sql.types.StructType> userSpecifiedSchema, org.apache.spark.sql.SQLContext sqlContext )
JSONRelation.JSONRelation..baseRDD ( )  :  org.apache.spark.rdd.RDD<String>
JSONRelation.path ( )  :  String
JSONRelation.productArity ( )  :  int
JSONRelation.productElement ( int p1 )  :  Object
JSONRelation.productIterator ( )  :  scala.collection.Iterator<Object>
JSONRelation.productPrefix ( )  :  String
JSONRelation.samplingRatio ( )  :  double
JSONRelation.schema ( )  :  org.apache.spark.sql.types.StructType
JSONRelation.sqlContext ( )  :  org.apache.spark.sql.SQLContext
JSONRelation.toString ( )  :  String
JSONRelation.userSpecifiedSchema ( )  :  scala.Option<org.apache.spark.sql.types.StructType>

spark-sql_2.10-1.3.0.jar, KryoResourcePool.class
package org.apache.spark.sql.execution
KryoResourcePool.KryoResourcePool ( int size )

spark-sql_2.10-1.3.0.jar, LeftSemiJoinBNL.class
package org.apache.spark.sql.execution.joins
LeftSemiJoinBNL.broadcast ( )  :  org.apache.spark.sql.execution.SparkPlan
LeftSemiJoinBNL.canEqual ( Object p1 )  :  boolean
LeftSemiJoinBNL.children ( )  :  scala.collection.Seq<org.apache.spark.sql.execution.SparkPlan>
LeftSemiJoinBNL.condition ( )  :  scala.Option<org.apache.spark.sql.catalyst.expressions.Expression>
LeftSemiJoinBNL.copy ( org.apache.spark.sql.execution.SparkPlan streamed, org.apache.spark.sql.execution.SparkPlan broadcast, scala.Option<org.apache.spark.sql.catalyst.expressions.Expression> condition )  :  LeftSemiJoinBNL
LeftSemiJoinBNL.curried ( ) [static]  :  scala.Function1<org.apache.spark.sql.execution.SparkPlan,scala.Function1<org.apache.spark.sql.execution.SparkPlan,scala.Function1<scala.Option<org.apache.spark.sql.catalyst.expressions.Expression>,LeftSemiJoinBNL>>>
LeftSemiJoinBNL.equals ( Object p1 )  :  boolean
LeftSemiJoinBNL.execute ( )  :  org.apache.spark.rdd.RDD<org.apache.spark.sql.Row>
LeftSemiJoinBNL.hashCode ( )  :  int
LeftSemiJoinBNL.left ( )  :  org.apache.spark.sql.catalyst.trees.TreeNode
LeftSemiJoinBNL.left ( )  :  org.apache.spark.sql.execution.SparkPlan
LeftSemiJoinBNL.LeftSemiJoinBNL ( org.apache.spark.sql.execution.SparkPlan streamed, org.apache.spark.sql.execution.SparkPlan broadcast, scala.Option<org.apache.spark.sql.catalyst.expressions.Expression> condition )
LeftSemiJoinBNL.LeftSemiJoinBNL..boundCondition ( )  :  scala.Function1<org.apache.spark.sql.Row,Object>
LeftSemiJoinBNL.output ( )  :  scala.collection.Seq<org.apache.spark.sql.catalyst.expressions.Attribute>
LeftSemiJoinBNL.outputPartitioning ( )  :  org.apache.spark.sql.catalyst.plans.physical.Partitioning
LeftSemiJoinBNL.productArity ( )  :  int
LeftSemiJoinBNL.productElement ( int p1 )  :  Object
LeftSemiJoinBNL.productIterator ( )  :  scala.collection.Iterator<Object>
LeftSemiJoinBNL.productPrefix ( )  :  String
LeftSemiJoinBNL.right ( )  :  org.apache.spark.sql.catalyst.trees.TreeNode
LeftSemiJoinBNL.right ( )  :  org.apache.spark.sql.execution.SparkPlan
LeftSemiJoinBNL.streamed ( )  :  org.apache.spark.sql.execution.SparkPlan
LeftSemiJoinBNL.tupled ( ) [static]  :  scala.Function1<scala.Tuple3<org.apache.spark.sql.execution.SparkPlan,org.apache.spark.sql.execution.SparkPlan,scala.Option<org.apache.spark.sql.catalyst.expressions.Expression>>,LeftSemiJoinBNL>

spark-sql_2.10-1.3.0.jar, LeftSemiJoinHash.class
package org.apache.spark.sql.execution.joins
LeftSemiJoinHash.buildKeys ( )  :  scala.collection.Seq<org.apache.spark.sql.catalyst.expressions.Expression>
LeftSemiJoinHash.buildPlan ( )  :  org.apache.spark.sql.execution.SparkPlan
LeftSemiJoinHash.buildSide ( )  :  package.BuildRight.
LeftSemiJoinHash.buildSide ( )  :  package.BuildSide
LeftSemiJoinHash.buildSideKeyGenerator ( )  :  org.apache.spark.sql.catalyst.expressions.package.Projection
LeftSemiJoinHash.canEqual ( Object p1 )  :  boolean
LeftSemiJoinHash.children ( )  :  scala.collection.Seq<org.apache.spark.sql.execution.SparkPlan>
LeftSemiJoinHash.copy ( scala.collection.Seq<org.apache.spark.sql.catalyst.expressions.Expression> leftKeys, scala.collection.Seq<org.apache.spark.sql.catalyst.expressions.Expression> rightKeys, org.apache.spark.sql.execution.SparkPlan left, org.apache.spark.sql.execution.SparkPlan right )  :  LeftSemiJoinHash
LeftSemiJoinHash.curried ( ) [static]  :  scala.Function1<scala.collection.Seq<org.apache.spark.sql.catalyst.expressions.Expression>,scala.Function1<scala.collection.Seq<org.apache.spark.sql.catalyst.expressions.Expression>,scala.Function1<org.apache.spark.sql.execution.SparkPlan,scala.Function1<org.apache.spark.sql.execution.SparkPlan,LeftSemiJoinHash>>>>
LeftSemiJoinHash.equals ( Object p1 )  :  boolean
LeftSemiJoinHash.execute ( )  :  org.apache.spark.rdd.RDD<org.apache.spark.sql.Row>
LeftSemiJoinHash.hashCode ( )  :  int
LeftSemiJoinHash.hashJoin ( scala.collection.Iterator<org.apache.spark.sql.Row> streamIter, HashedRelation hashedRelation )  :  scala.collection.Iterator<org.apache.spark.sql.Row>
LeftSemiJoinHash.left ( )  :  org.apache.spark.sql.catalyst.trees.TreeNode
LeftSemiJoinHash.left ( )  :  org.apache.spark.sql.execution.SparkPlan
LeftSemiJoinHash.leftKeys ( )  :  scala.collection.Seq<org.apache.spark.sql.catalyst.expressions.Expression>
LeftSemiJoinHash.LeftSemiJoinHash ( scala.collection.Seq<org.apache.spark.sql.catalyst.expressions.Expression> leftKeys, scala.collection.Seq<org.apache.spark.sql.catalyst.expressions.Expression> rightKeys, org.apache.spark.sql.execution.SparkPlan left, org.apache.spark.sql.execution.SparkPlan right )
LeftSemiJoinHash.output ( )  :  scala.collection.Seq<org.apache.spark.sql.catalyst.expressions.Attribute>
LeftSemiJoinHash.productArity ( )  :  int
LeftSemiJoinHash.productElement ( int p1 )  :  Object
LeftSemiJoinHash.productIterator ( )  :  scala.collection.Iterator<Object>
LeftSemiJoinHash.productPrefix ( )  :  String
LeftSemiJoinHash.requiredChildDistribution ( )  :  scala.collection.immutable.List<org.apache.spark.sql.catalyst.plans.physical.ClusteredDistribution>
LeftSemiJoinHash.requiredChildDistribution ( )  :  scala.collection.Seq
LeftSemiJoinHash.right ( )  :  org.apache.spark.sql.catalyst.trees.TreeNode
LeftSemiJoinHash.right ( )  :  org.apache.spark.sql.execution.SparkPlan
LeftSemiJoinHash.rightKeys ( )  :  scala.collection.Seq<org.apache.spark.sql.catalyst.expressions.Expression>
LeftSemiJoinHash.streamedKeys ( )  :  scala.collection.Seq<org.apache.spark.sql.catalyst.expressions.Expression>
LeftSemiJoinHash.streamedPlan ( )  :  org.apache.spark.sql.execution.SparkPlan
LeftSemiJoinHash.streamSideKeyGenerator ( )  :  scala.Function0<org.apache.spark.sql.catalyst.expressions.package.MutableProjection>
LeftSemiJoinHash.tupled ( ) [static]  :  scala.Function1<scala.Tuple4<scala.collection.Seq<org.apache.spark.sql.catalyst.expressions.Expression>,scala.collection.Seq<org.apache.spark.sql.catalyst.expressions.Expression>,org.apache.spark.sql.execution.SparkPlan,org.apache.spark.sql.execution.SparkPlan>,LeftSemiJoinHash>

spark-sql_2.10-1.3.0.jar, LessThan.class
package org.apache.spark.sql.sources
LessThan.attribute ( )  :  String
LessThan.canEqual ( Object p1 )  :  boolean
LessThan.copy ( String attribute, Object value )  :  LessThan
LessThan.curried ( ) [static]  :  scala.Function1<String,scala.Function1<Object,LessThan>>
LessThan.equals ( Object p1 )  :  boolean
LessThan.hashCode ( )  :  int
LessThan.LessThan ( String attribute, Object value )
LessThan.productArity ( )  :  int
LessThan.productElement ( int p1 )  :  Object
LessThan.productIterator ( )  :  scala.collection.Iterator<Object>
LessThan.productPrefix ( )  :  String
LessThan.toString ( )  :  String
LessThan.tupled ( ) [static]  :  scala.Function1<scala.Tuple2<String,Object>,LessThan>
LessThan.value ( )  :  Object

spark-sql_2.10-1.3.0.jar, LessThanOrEqual.class
package org.apache.spark.sql.sources
LessThanOrEqual.attribute ( )  :  String
LessThanOrEqual.canEqual ( Object p1 )  :  boolean
LessThanOrEqual.copy ( String attribute, Object value )  :  LessThanOrEqual
LessThanOrEqual.curried ( ) [static]  :  scala.Function1<String,scala.Function1<Object,LessThanOrEqual>>
LessThanOrEqual.equals ( Object p1 )  :  boolean
LessThanOrEqual.hashCode ( )  :  int
LessThanOrEqual.LessThanOrEqual ( String attribute, Object value )
LessThanOrEqual.productArity ( )  :  int
LessThanOrEqual.productElement ( int p1 )  :  Object
LessThanOrEqual.productIterator ( )  :  scala.collection.Iterator<Object>
LessThanOrEqual.productPrefix ( )  :  String
LessThanOrEqual.toString ( )  :  String
LessThanOrEqual.tupled ( ) [static]  :  scala.Function1<scala.Tuple2<String,Object>,LessThanOrEqual>
LessThanOrEqual.value ( )  :  Object

spark-sql_2.10-1.3.0.jar, Limit.class
package org.apache.spark.sql.execution
Limit.copy ( int limit, SparkPlan child )  :  Limit
Limit.curried ( ) [static]  :  scala.Function1<Object,scala.Function1<SparkPlan,Limit>>
Limit.Limit ( int limit, SparkPlan child )
Limit.tupled ( ) [static]  :  scala.Function1<scala.Tuple2<Object,SparkPlan>,Limit>

spark-sql_2.10-1.3.0.jar, LocalTableScan.class
package org.apache.spark.sql.execution
LocalTableScan.canEqual ( Object p1 )  :  boolean
LocalTableScan.children ( )  :  scala.collection.immutable.Nil.
LocalTableScan.children ( )  :  scala.collection.Seq
LocalTableScan.copy ( scala.collection.Seq<org.apache.spark.sql.catalyst.expressions.Attribute> output, scala.collection.Seq<org.apache.spark.sql.Row> rows )  :  LocalTableScan
LocalTableScan.curried ( ) [static]  :  scala.Function1<scala.collection.Seq<org.apache.spark.sql.catalyst.expressions.Attribute>,scala.Function1<scala.collection.Seq<org.apache.spark.sql.Row>,LocalTableScan>>
LocalTableScan.equals ( Object p1 )  :  boolean
LocalTableScan.execute ( )  :  org.apache.spark.rdd.RDD<org.apache.spark.sql.Row>
LocalTableScan.executeCollect ( )  :  org.apache.spark.sql.Row[ ]
LocalTableScan.executeTake ( int limit )  :  org.apache.spark.sql.Row[ ]
LocalTableScan.hashCode ( )  :  int
LocalTableScan.LocalTableScan ( scala.collection.Seq<org.apache.spark.sql.catalyst.expressions.Attribute> output, scala.collection.Seq<org.apache.spark.sql.Row> rows )
LocalTableScan.output ( )  :  scala.collection.Seq<org.apache.spark.sql.catalyst.expressions.Attribute>
LocalTableScan.productArity ( )  :  int
LocalTableScan.productElement ( int p1 )  :  Object
LocalTableScan.productIterator ( )  :  scala.collection.Iterator<Object>
LocalTableScan.productPrefix ( )  :  String
LocalTableScan.rows ( )  :  scala.collection.Seq<org.apache.spark.sql.Row>
LocalTableScan.tupled ( ) [static]  :  scala.Function1<scala.Tuple2<scala.collection.Seq<org.apache.spark.sql.catalyst.expressions.Attribute>,scala.collection.Seq<org.apache.spark.sql.Row>>,LocalTableScan>

spark-sql_2.10-1.3.0.jar, LogicalLocalTable.class
package org.apache.spark.sql.execution
LogicalLocalTable.canEqual ( Object p1 )  :  boolean
LogicalLocalTable.children ( )  :  scala.collection.immutable.Nil.
LogicalLocalTable.children ( )  :  scala.collection.Seq
LogicalLocalTable.copy ( scala.collection.Seq<org.apache.spark.sql.catalyst.expressions.Attribute> output, scala.collection.Seq<org.apache.spark.sql.Row> rows, org.apache.spark.sql.SQLContext sqlContext )  :  LogicalLocalTable
LogicalLocalTable.equals ( Object p1 )  :  boolean
LogicalLocalTable.hashCode ( )  :  int
LogicalLocalTable.LogicalLocalTable ( scala.collection.Seq<org.apache.spark.sql.catalyst.expressions.Attribute> output, scala.collection.Seq<org.apache.spark.sql.Row> rows, org.apache.spark.sql.SQLContext sqlContext )
LogicalLocalTable.newInstance ( )  :  org.apache.spark.sql.catalyst.analysis.MultiInstanceRelation
LogicalLocalTable.newInstance ( )  :  LogicalLocalTable
LogicalLocalTable.output ( )  :  scala.collection.Seq<org.apache.spark.sql.catalyst.expressions.Attribute>
LogicalLocalTable.productArity ( )  :  int
LogicalLocalTable.productElement ( int p1 )  :  Object
LogicalLocalTable.productIterator ( )  :  scala.collection.Iterator<Object>
LogicalLocalTable.productPrefix ( )  :  String
LogicalLocalTable.rows ( )  :  scala.collection.Seq<org.apache.spark.sql.Row>
LogicalLocalTable.sameResult ( org.apache.spark.sql.catalyst.plans.logical.LogicalPlan plan )  :  boolean
LogicalLocalTable.statistics ( )  :  org.apache.spark.sql.catalyst.plans.logical.Statistics

spark-sql_2.10-1.3.0.jar, LogicalRDD.class
package org.apache.spark.sql.execution
LogicalRDD.canEqual ( Object p1 )  :  boolean
LogicalRDD.children ( )  :  scala.collection.immutable.Nil.
LogicalRDD.children ( )  :  scala.collection.Seq
LogicalRDD.copy ( scala.collection.Seq<org.apache.spark.sql.catalyst.expressions.Attribute> output, org.apache.spark.rdd.RDD<org.apache.spark.sql.Row> rdd, org.apache.spark.sql.SQLContext sqlContext )  :  LogicalRDD
LogicalRDD.equals ( Object p1 )  :  boolean
LogicalRDD.hashCode ( )  :  int
LogicalRDD.LogicalRDD ( scala.collection.Seq<org.apache.spark.sql.catalyst.expressions.Attribute> output, org.apache.spark.rdd.RDD<org.apache.spark.sql.Row> rdd, org.apache.spark.sql.SQLContext sqlContext )
LogicalRDD.newInstance ( )  :  org.apache.spark.sql.catalyst.analysis.MultiInstanceRelation
LogicalRDD.newInstance ( )  :  LogicalRDD
LogicalRDD.output ( )  :  scala.collection.Seq<org.apache.spark.sql.catalyst.expressions.Attribute>
LogicalRDD.productArity ( )  :  int
LogicalRDD.productElement ( int p1 )  :  Object
LogicalRDD.productIterator ( )  :  scala.collection.Iterator<Object>
LogicalRDD.productPrefix ( )  :  String
LogicalRDD.rdd ( )  :  org.apache.spark.rdd.RDD<org.apache.spark.sql.Row>
LogicalRDD.sameResult ( org.apache.spark.sql.catalyst.plans.logical.LogicalPlan plan )  :  boolean
LogicalRDD.statistics ( )  :  org.apache.spark.sql.catalyst.plans.logical.Statistics

spark-sql_2.10-1.3.0.jar, LogicalRelation.class
package org.apache.spark.sql.sources
LogicalRelation.andThen ( scala.Function1<LogicalRelation,A> p1 ) [static]  :  scala.Function1<BaseRelation,A>
LogicalRelation.attributeMap ( )  :  org.apache.spark.sql.catalyst.expressions.AttributeMap<org.apache.spark.sql.catalyst.expressions.AttributeReference>
LogicalRelation.canEqual ( Object p1 )  :  boolean
LogicalRelation.compose ( scala.Function1<A,BaseRelation> p1 ) [static]  :  scala.Function1<A,LogicalRelation>
LogicalRelation.copy ( BaseRelation relation )  :  LogicalRelation
LogicalRelation.equals ( Object other )  :  boolean
LogicalRelation.hashCode ( )  :  int
LogicalRelation.LogicalRelation ( BaseRelation relation )
LogicalRelation.newInstance ( )  :  org.apache.spark.sql.catalyst.analysis.MultiInstanceRelation
LogicalRelation.newInstance ( )  :  LogicalRelation
LogicalRelation.output ( )  :  scala.collection.Seq<org.apache.spark.sql.catalyst.expressions.AttributeReference>
LogicalRelation.productArity ( )  :  int
LogicalRelation.productElement ( int p1 )  :  Object
LogicalRelation.productIterator ( )  :  scala.collection.Iterator<Object>
LogicalRelation.productPrefix ( )  :  String
LogicalRelation.relation ( )  :  BaseRelation
LogicalRelation.sameResult ( org.apache.spark.sql.catalyst.plans.logical.LogicalPlan otherPlan )  :  boolean
LogicalRelation.simpleString ( )  :  String
LogicalRelation.statistics ( )  :  org.apache.spark.sql.catalyst.plans.logical.Statistics

spark-sql_2.10-1.3.0.jar, LongHashSetSerializer.class
package org.apache.spark.sql.execution
LongHashSetSerializer.LongHashSetSerializer ( )

spark-sql_2.10-1.3.0.jar, MySQLQuirks.class
package org.apache.spark.sql.jdbc
MySQLQuirks.MySQLQuirks ( )

spark-sql_2.10-1.3.0.jar, NanoTime.class
package org.apache.spark.sql.parquet.timestamp
NanoTime.getJulianDay ( )  :  int
NanoTime.getTimeOfDayNanos ( )  :  long
NanoTime.NanoTime ( )
NanoTime.set ( int julianDay, long timeOfDayNanos )  :  NanoTime
NanoTime.toBinary ( )  :  parquet.io.api.Binary

spark-sql_2.10-1.3.0.jar, NativeColumnType<T>.class
package org.apache.spark.sql.columnar
NativeColumnType<T>.dataType ( )  :  T
NativeColumnType<T>.NativeColumnType ( T dataType, int typeId, int defaultSize )  :  public

spark-sql_2.10-1.3.0.jar, NoQuirks.class
package org.apache.spark.sql.jdbc
NoQuirks.NoQuirks ( )

spark-sql_2.10-1.3.0.jar, Not.class
package org.apache.spark.sql.sources
Not.andThen ( scala.Function1<Not,A> p1 ) [static]  :  scala.Function1<Filter,A>
Not.canEqual ( Object p1 )  :  boolean
Not.child ( )  :  Filter
Not.compose ( scala.Function1<A,Filter> p1 ) [static]  :  scala.Function1<A,Not>
Not.copy ( Filter child )  :  Not
Not.equals ( Object p1 )  :  boolean
Not.hashCode ( )  :  int
Not.Not ( Filter child )
Not.productArity ( )  :  int
Not.productElement ( int p1 )  :  Object
Not.productIterator ( )  :  scala.collection.Iterator<Object>
Not.productPrefix ( )  :  String
Not.toString ( )  :  String

spark-sql_2.10-1.3.0.jar, NullableColumnBuilder.class
package org.apache.spark.sql.columnar
NullableColumnBuilder.appendFrom ( org.apache.spark.sql.Row p1, int p2 ) [abstract]  :  void
NullableColumnBuilder.buildNonNulls ( ) [abstract]  :  java.nio.ByteBuffer
NullableColumnBuilder.nullCount ( ) [abstract]  :  int
NullableColumnBuilder.nullCount_.eq ( int p1 ) [abstract]  :  void
NullableColumnBuilder.nulls ( ) [abstract]  :  java.nio.ByteBuffer
NullableColumnBuilder.nulls_.eq ( java.nio.ByteBuffer p1 ) [abstract]  :  void
NullableColumnBuilder.NullableColumnBuilder..super.appendFrom ( org.apache.spark.sql.Row p1, int p2 ) [abstract]  :  void

spark-sql_2.10-1.3.0.jar, OpenHashSetSerializer.class
package org.apache.spark.sql.execution
OpenHashSetSerializer.OpenHashSetSerializer ( )

spark-sql_2.10-1.3.0.jar, Or.class
package org.apache.spark.sql.sources
Or.canEqual ( Object p1 )  :  boolean
Or.copy ( Filter left, Filter right )  :  Or
Or.curried ( ) [static]  :  scala.Function1<Filter,scala.Function1<Filter,Or>>
Or.equals ( Object p1 )  :  boolean
Or.hashCode ( )  :  int
Or.left ( )  :  Filter
Or.Or ( Filter left, Filter right )
Or.productArity ( )  :  int
Or.productElement ( int p1 )  :  Object
Or.productIterator ( )  :  scala.collection.Iterator<Object>
Or.productPrefix ( )  :  String
Or.right ( )  :  Filter
Or.toString ( )  :  String
Or.tupled ( ) [static]  :  scala.Function1<scala.Tuple2<Filter,Filter>,Or>

spark-sql_2.10-1.3.0.jar, OutputFaker.class
package org.apache.spark.sql.execution
OutputFaker.canEqual ( Object p1 )  :  boolean
OutputFaker.child ( )  :  SparkPlan
OutputFaker.children ( )  :  scala.collection.immutable.List<SparkPlan>
OutputFaker.children ( )  :  scala.collection.Seq
OutputFaker.copy ( scala.collection.Seq<org.apache.spark.sql.catalyst.expressions.Attribute> output, SparkPlan child )  :  OutputFaker
OutputFaker.curried ( ) [static]  :  scala.Function1<scala.collection.Seq<org.apache.spark.sql.catalyst.expressions.Attribute>,scala.Function1<SparkPlan,OutputFaker>>
OutputFaker.equals ( Object p1 )  :  boolean
OutputFaker.execute ( )  :  org.apache.spark.rdd.RDD<org.apache.spark.sql.Row>
OutputFaker.hashCode ( )  :  int
OutputFaker.output ( )  :  scala.collection.Seq<org.apache.spark.sql.catalyst.expressions.Attribute>
OutputFaker.OutputFaker ( scala.collection.Seq<org.apache.spark.sql.catalyst.expressions.Attribute> output, SparkPlan child )
OutputFaker.productArity ( )  :  int
OutputFaker.productElement ( int p1 )  :  Object
OutputFaker.productIterator ( )  :  scala.collection.Iterator<Object>
OutputFaker.productPrefix ( )  :  String
OutputFaker.tupled ( ) [static]  :  scala.Function1<scala.Tuple2<scala.collection.Seq<org.apache.spark.sql.catalyst.expressions.Attribute>,SparkPlan>,OutputFaker>

spark-sql_2.10-1.3.0.jar, ParquetRelation.class
package org.apache.spark.sql.parquet
ParquetRelation.attributeMap ( )  :  org.apache.spark.sql.catalyst.expressions.AttributeMap<org.apache.spark.sql.catalyst.expressions.Attribute>
ParquetRelation.conf ( )  :  scala.Option<org.apache.hadoop.conf.Configuration>
ParquetRelation.copy ( String path, scala.Option<org.apache.hadoop.conf.Configuration> conf, org.apache.spark.sql.SQLContext sqlContext, scala.collection.Seq<org.apache.spark.sql.catalyst.expressions.Attribute> partitioningAttributes )  :  ParquetRelation
ParquetRelation.create ( String p1, org.apache.spark.sql.catalyst.plans.logical.LogicalPlan p2, org.apache.hadoop.conf.Configuration p3, org.apache.spark.sql.SQLContext p4 ) [static]  :  ParquetRelation
ParquetRelation.createEmpty ( String p1, scala.collection.Seq<org.apache.spark.sql.catalyst.expressions.Attribute> p2, boolean p3, org.apache.hadoop.conf.Configuration p4, org.apache.spark.sql.SQLContext p5 ) [static]  :  ParquetRelation
ParquetRelation.ParquetRelation ( String path, scala.Option<org.apache.hadoop.conf.Configuration> conf, org.apache.spark.sql.SQLContext sqlContext, scala.collection.Seq<org.apache.spark.sql.catalyst.expressions.Attribute> partitioningAttributes )
ParquetRelation.partitioningAttributes ( )  :  scala.collection.Seq<org.apache.spark.sql.catalyst.expressions.Attribute>
ParquetRelation.shortParquetCompressionCodecNames ( ) [static]  :  scala.collection.immutable.Map<String,parquet.hadoop.metadata.CompressionCodecName>
ParquetRelation.sqlContext ( )  :  org.apache.spark.sql.SQLContext
ParquetRelation.statistics ( )  :  org.apache.spark.sql.catalyst.plans.logical.Statistics

spark-sql_2.10-1.3.0.jar, ParquetRelation2.class
package org.apache.spark.sql.parquet
ParquetRelation2.buildScan ( scala.collection.Seq<org.apache.spark.sql.catalyst.expressions.Attribute> output, scala.collection.Seq<org.apache.spark.sql.catalyst.expressions.Expression> predicates )  :  org.apache.spark.rdd.RDD<org.apache.spark.sql.Row>
ParquetRelation2.canEqual ( Object p1 )  :  boolean
ParquetRelation2.copy ( scala.collection.Seq<String> paths, scala.collection.immutable.Map<String,String> parameters, scala.Option<org.apache.spark.sql.types.StructType> maybeSchema, scala.Option<PartitionSpec> maybePartitionSpec, org.apache.spark.sql.SQLContext sqlContext )  :  ParquetRelation2
ParquetRelation2.DEFAULT_PARTITION_NAME ( ) [static]  :  String
ParquetRelation2.equals ( Object other )  :  boolean
ParquetRelation2.hashCode ( )  :  int
ParquetRelation2.insert ( org.apache.spark.sql.DataFrame data, boolean overwrite )  :  void
ParquetRelation2.isPartitioned ( )  :  boolean
ParquetRelation2.isTraceEnabled ( )  :  boolean
ParquetRelation2.log ( )  :  org.slf4j.Logger
ParquetRelation2.logDebug ( scala.Function0<String> msg )  :  void
ParquetRelation2.logDebug ( scala.Function0<String> msg, Throwable throwable )  :  void
ParquetRelation2.logError ( scala.Function0<String> msg )  :  void
ParquetRelation2.logError ( scala.Function0<String> msg, Throwable throwable )  :  void
ParquetRelation2.logInfo ( scala.Function0<String> msg )  :  void
ParquetRelation2.logInfo ( scala.Function0<String> msg, Throwable throwable )  :  void
ParquetRelation2.logName ( )  :  String
ParquetRelation2.logTrace ( scala.Function0<String> msg )  :  void
ParquetRelation2.logTrace ( scala.Function0<String> msg, Throwable throwable )  :  void
ParquetRelation2.logWarning ( scala.Function0<String> msg )  :  void
ParquetRelation2.logWarning ( scala.Function0<String> msg, Throwable throwable )  :  void
ParquetRelation2.maybePartitionSpec ( )  :  scala.Option<PartitionSpec>
ParquetRelation2.maybeSchema ( )  :  scala.Option<org.apache.spark.sql.types.StructType>
ParquetRelation2.MERGE_SCHEMA ( ) [static]  :  String
ParquetRelation2.newJobContext ( org.apache.hadoop.conf.Configuration conf, org.apache.hadoop.mapreduce.JobID jobId )  :  org.apache.hadoop.mapreduce.JobContext
ParquetRelation2.newTaskAttemptContext ( org.apache.hadoop.conf.Configuration conf, org.apache.hadoop.mapreduce.TaskAttemptID attemptId )  :  org.apache.hadoop.mapreduce.TaskAttemptContext
ParquetRelation2.newTaskAttemptID ( String jtIdentifier, int jobId, boolean isMap, int taskId, int attemptId )  :  org.apache.hadoop.mapreduce.TaskAttemptID
ParquetRelation2.org.apache.spark.Logging..log_ ( )  :  org.slf4j.Logger
ParquetRelation2.org.apache.spark.Logging..log__.eq ( org.slf4j.Logger p1 )  :  void
ParquetRelation2.ParquetRelation2..defaultPartitionName ( )  :  String
ParquetRelation2.ParquetRelation2..isSummaryFile ( org.apache.hadoop.fs.Path file )  :  boolean
ParquetRelation2.ParquetRelation2..maybeMetastoreSchema ( )  :  scala.Option<org.apache.spark.sql.types.StructType>
ParquetRelation2.ParquetRelation2..metadataCache ( )  :  ParquetRelation2.MetadataCache
ParquetRelation2.ParquetRelation2..shouldMergeSchemas ( )  :  boolean
ParquetRelation2.parameters ( )  :  scala.collection.immutable.Map<String,String>
ParquetRelation2.ParquetRelation2 ( scala.collection.Seq<String> paths, scala.collection.immutable.Map<String,String> parameters, scala.Option<org.apache.spark.sql.types.StructType> maybeSchema, scala.Option<PartitionSpec> maybePartitionSpec, org.apache.spark.sql.SQLContext sqlContext )
ParquetRelation2.partitionColumns ( )  :  org.apache.spark.sql.types.StructType
ParquetRelation2.partitions ( )  :  scala.collection.Seq<Partition>
ParquetRelation2.partitionSpec ( )  :  PartitionSpec
ParquetRelation2.paths ( )  :  scala.collection.Seq<String>
ParquetRelation2.productArity ( )  :  int
ParquetRelation2.productElement ( int p1 )  :  Object
ParquetRelation2.productIterator ( )  :  scala.collection.Iterator<Object>
ParquetRelation2.productPrefix ( )  :  String
ParquetRelation2.schema ( )  :  org.apache.spark.sql.types.StructType
ParquetRelation2.sizeInBytes ( )  :  long
ParquetRelation2.sparkContext ( )  :  org.apache.spark.SparkContext
ParquetRelation2.sqlContext ( )  :  org.apache.spark.sql.SQLContext
ParquetRelation2.toString ( )  :  String

spark-sql_2.10-1.3.0.jar, ParquetTableScan.class
package org.apache.spark.sql.parquet
ParquetTableScan.attributes ( )  :  scala.collection.Seq<org.apache.spark.sql.catalyst.expressions.Attribute>
ParquetTableScan.columnPruningPred ( )  :  scala.collection.Seq<org.apache.spark.sql.catalyst.expressions.Expression>
ParquetTableScan.copy ( scala.collection.Seq<org.apache.spark.sql.catalyst.expressions.Attribute> attributes, ParquetRelation relation, scala.collection.Seq<org.apache.spark.sql.catalyst.expressions.Expression> columnPruningPred )  :  ParquetTableScan
ParquetTableScan.curried ( ) [static]  :  scala.Function1<scala.collection.Seq<org.apache.spark.sql.catalyst.expressions.Attribute>,scala.Function1<ParquetRelation,scala.Function1<scala.collection.Seq<org.apache.spark.sql.catalyst.expressions.Expression>,ParquetTableScan>>>
ParquetTableScan.ParquetTableScan ( scala.collection.Seq<org.apache.spark.sql.catalyst.expressions.Attribute> attributes, ParquetRelation relation, scala.collection.Seq<org.apache.spark.sql.catalyst.expressions.Expression> columnPruningPred )
ParquetTableScan.requestedPartitionOrdinals ( )  :  scala.Tuple2<Object,Object>[ ]
ParquetTableScan.tupled ( ) [static]  :  scala.Function1<scala.Tuple3<scala.collection.Seq<org.apache.spark.sql.catalyst.expressions.Attribute>,ParquetRelation,scala.collection.Seq<org.apache.spark.sql.catalyst.expressions.Expression>>,ParquetTableScan>

spark-sql_2.10-1.3.0.jar, ParquetTest.class
package org.apache.spark.sql.parquet
ParquetTest.configuration ( ) [abstract]  :  org.apache.hadoop.conf.Configuration
ParquetTest.makeParquetFile ( org.apache.spark.sql.DataFrame p1, java.io.File p2, scala.reflect.ClassTag<T> p3, scala.reflect.api.TypeTags.TypeTag<T> p4 ) [abstract]  :  void
ParquetTest.makeParquetFile ( scala.collection.Seq<T> p1, java.io.File p2, scala.reflect.ClassTag<T> p3, scala.reflect.api.TypeTags.TypeTag<T> p4 ) [abstract]  :  void
ParquetTest.makePartitionDir ( java.io.File p1, String p2, scala.collection.Seq<scala.Tuple2<String,Object>> p3 ) [abstract]  :  java.io.File
ParquetTest.sqlContext ( ) [abstract]  :  org.apache.spark.sql.SQLContext
ParquetTest.withParquetDataFrame ( scala.collection.Seq<T> p1, scala.Function1<org.apache.spark.sql.DataFrame,scala.runtime.BoxedUnit> p2, scala.reflect.ClassTag<T> p3, scala.reflect.api.TypeTags.TypeTag<T> p4 ) [abstract]  :  void
ParquetTest.withParquetFile ( scala.collection.Seq<T> p1, scala.Function1<String,scala.runtime.BoxedUnit> p2, scala.reflect.ClassTag<T> p3, scala.reflect.api.TypeTags.TypeTag<T> p4 ) [abstract]  :  void
ParquetTest.withParquetTable ( scala.collection.Seq<T> p1, String p2, scala.Function0<scala.runtime.BoxedUnit> p3, scala.reflect.ClassTag<T> p4, scala.reflect.api.TypeTags.TypeTag<T> p5 ) [abstract]  :  void
ParquetTest.withSQLConf ( scala.collection.Seq<scala.Tuple2<String,String>> p1, scala.Function0<scala.runtime.BoxedUnit> p2 ) [abstract]  :  void
ParquetTest.withTempDir ( scala.Function1<java.io.File,scala.runtime.BoxedUnit> p1 ) [abstract]  :  void
ParquetTest.withTempPath ( scala.Function1<java.io.File,scala.runtime.BoxedUnit> p1 ) [abstract]  :  void
ParquetTest.withTempTable ( String p1, scala.Function0<scala.runtime.BoxedUnit> p2 ) [abstract]  :  void

spark-sql_2.10-1.3.0.jar, ParquetTypeInfo.class
package org.apache.spark.sql.parquet
ParquetTypeInfo.canEqual ( Object p1 )  :  boolean
ParquetTypeInfo.copy ( parquet.schema.PrimitiveType.PrimitiveTypeName primitiveType, scala.Option<parquet.schema.OriginalType> originalType, scala.Option<parquet.schema.DecimalMetadata> decimalMetadata, scala.Option<Object> length )  :  ParquetTypeInfo
ParquetTypeInfo.curried ( ) [static]  :  scala.Function1<parquet.schema.PrimitiveType.PrimitiveTypeName,scala.Function1<scala.Option<parquet.schema.OriginalType>,scala.Function1<scala.Option<parquet.schema.DecimalMetadata>,scala.Function1<scala.Option<Object>,ParquetTypeInfo>>>>
ParquetTypeInfo.decimalMetadata ( )  :  scala.Option<parquet.schema.DecimalMetadata>
ParquetTypeInfo.equals ( Object p1 )  :  boolean
ParquetTypeInfo.hashCode ( )  :  int
ParquetTypeInfo.length ( )  :  scala.Option<Object>
ParquetTypeInfo.originalType ( )  :  scala.Option<parquet.schema.OriginalType>
ParquetTypeInfo.ParquetTypeInfo ( parquet.schema.PrimitiveType.PrimitiveTypeName primitiveType, scala.Option<parquet.schema.OriginalType> originalType, scala.Option<parquet.schema.DecimalMetadata> decimalMetadata, scala.Option<Object> length )
ParquetTypeInfo.primitiveType ( )  :  parquet.schema.PrimitiveType.PrimitiveTypeName
ParquetTypeInfo.productArity ( )  :  int
ParquetTypeInfo.productElement ( int p1 )  :  Object
ParquetTypeInfo.productIterator ( )  :  scala.collection.Iterator<Object>
ParquetTypeInfo.productPrefix ( )  :  String
ParquetTypeInfo.toString ( )  :  String
ParquetTypeInfo.tupled ( ) [static]  :  scala.Function1<scala.Tuple4<parquet.schema.PrimitiveType.PrimitiveTypeName,scala.Option<parquet.schema.OriginalType>,scala.Option<parquet.schema.DecimalMetadata>,scala.Option<Object>>,ParquetTypeInfo>

spark-sql_2.10-1.3.0.jar, Partition.class
package org.apache.spark.sql.parquet
Partition.canEqual ( Object p1 )  :  boolean
Partition.copy ( org.apache.spark.sql.Row values, String path )  :  Partition
Partition.curried ( ) [static]  :  scala.Function1<org.apache.spark.sql.Row,scala.Function1<String,Partition>>
Partition.equals ( Object p1 )  :  boolean
Partition.hashCode ( )  :  int
Partition.Partition ( org.apache.spark.sql.Row values, String path )
Partition.path ( )  :  String
Partition.productArity ( )  :  int
Partition.productElement ( int p1 )  :  Object
Partition.productIterator ( )  :  scala.collection.Iterator<Object>
Partition.productPrefix ( )  :  String
Partition.toString ( )  :  String
Partition.tupled ( ) [static]  :  scala.Function1<scala.Tuple2<org.apache.spark.sql.Row,String>,Partition>
Partition.values ( )  :  org.apache.spark.sql.Row

spark-sql_2.10-1.3.0.jar, PartitionSpec.class
package org.apache.spark.sql.parquet
PartitionSpec.canEqual ( Object p1 )  :  boolean
PartitionSpec.copy ( org.apache.spark.sql.types.StructType partitionColumns, scala.collection.Seq<Partition> partitions )  :  PartitionSpec
PartitionSpec.curried ( ) [static]  :  scala.Function1<org.apache.spark.sql.types.StructType,scala.Function1<scala.collection.Seq<Partition>,PartitionSpec>>
PartitionSpec.equals ( Object p1 )  :  boolean
PartitionSpec.hashCode ( )  :  int
PartitionSpec.partitionColumns ( )  :  org.apache.spark.sql.types.StructType
PartitionSpec.partitions ( )  :  scala.collection.Seq<Partition>
PartitionSpec.PartitionSpec ( org.apache.spark.sql.types.StructType partitionColumns, scala.collection.Seq<Partition> partitions )
PartitionSpec.productArity ( )  :  int
PartitionSpec.productElement ( int p1 )  :  Object
PartitionSpec.productIterator ( )  :  scala.collection.Iterator<Object>
PartitionSpec.productPrefix ( )  :  String
PartitionSpec.toString ( )  :  String
PartitionSpec.tupled ( ) [static]  :  scala.Function1<scala.Tuple2<org.apache.spark.sql.types.StructType,scala.collection.Seq<Partition>>,PartitionSpec>

spark-sql_2.10-1.3.0.jar, PartitionStatistics.class
package org.apache.spark.sql.columnar
PartitionStatistics.forAttribute ( )  :  org.apache.spark.sql.catalyst.expressions.AttributeMap<ColumnStatisticsSchema>
PartitionStatistics.PartitionStatistics ( scala.collection.Seq<org.apache.spark.sql.catalyst.expressions.Attribute> tableSchema )
PartitionStatistics.schema ( )  :  scala.collection.Seq<org.apache.spark.sql.catalyst.expressions.Attribute>

spark-sql_2.10-1.3.0.jar, PhysicalRDD.class
package org.apache.spark.sql.execution
PhysicalRDD.canEqual ( Object p1 )  :  boolean
PhysicalRDD.children ( )  :  scala.collection.immutable.Nil.
PhysicalRDD.children ( )  :  scala.collection.Seq
PhysicalRDD.copy ( scala.collection.Seq<org.apache.spark.sql.catalyst.expressions.Attribute> output, org.apache.spark.rdd.RDD<org.apache.spark.sql.Row> rdd )  :  PhysicalRDD
PhysicalRDD.curried ( ) [static]  :  scala.Function1<scala.collection.Seq<org.apache.spark.sql.catalyst.expressions.Attribute>,scala.Function1<org.apache.spark.rdd.RDD<org.apache.spark.sql.Row>,PhysicalRDD>>
PhysicalRDD.equals ( Object p1 )  :  boolean
PhysicalRDD.execute ( )  :  org.apache.spark.rdd.RDD<org.apache.spark.sql.Row>
PhysicalRDD.hashCode ( )  :  int
PhysicalRDD.output ( )  :  scala.collection.Seq<org.apache.spark.sql.catalyst.expressions.Attribute>
PhysicalRDD.PhysicalRDD ( scala.collection.Seq<org.apache.spark.sql.catalyst.expressions.Attribute> output, org.apache.spark.rdd.RDD<org.apache.spark.sql.Row> rdd )
PhysicalRDD.productArity ( )  :  int
PhysicalRDD.productElement ( int p1 )  :  Object
PhysicalRDD.productIterator ( )  :  scala.collection.Iterator<Object>
PhysicalRDD.productPrefix ( )  :  String
PhysicalRDD.rdd ( )  :  org.apache.spark.rdd.RDD<org.apache.spark.sql.Row>
PhysicalRDD.tupled ( ) [static]  :  scala.Function1<scala.Tuple2<scala.collection.Seq<org.apache.spark.sql.catalyst.expressions.Attribute>,org.apache.spark.rdd.RDD<org.apache.spark.sql.Row>>,PhysicalRDD>

spark-sql_2.10-1.3.0.jar, PostgresQuirks.class
package org.apache.spark.sql.jdbc
PostgresQuirks.PostgresQuirks ( )

spark-sql_2.10-1.3.0.jar, PreWriteCheck.class
package org.apache.spark.sql.sources
PreWriteCheck.andThen ( scala.Function1<scala.runtime.BoxedUnit,A> g )  :  scala.Function1<org.apache.spark.sql.catalyst.plans.logical.LogicalPlan,A>
PreWriteCheck.andThen.mcDD.sp ( scala.Function1<Object,A> g )  :  scala.Function1<Object,A>
PreWriteCheck.andThen.mcDF.sp ( scala.Function1<Object,A> g )  :  scala.Function1<Object,A>
PreWriteCheck.andThen.mcDI.sp ( scala.Function1<Object,A> g )  :  scala.Function1<Object,A>
PreWriteCheck.andThen.mcDJ.sp ( scala.Function1<Object,A> g )  :  scala.Function1<Object,A>
PreWriteCheck.andThen.mcFD.sp ( scala.Function1<Object,A> g )  :  scala.Function1<Object,A>
PreWriteCheck.andThen.mcFF.sp ( scala.Function1<Object,A> g )  :  scala.Function1<Object,A>
PreWriteCheck.andThen.mcFI.sp ( scala.Function1<Object,A> g )  :  scala.Function1<Object,A>
PreWriteCheck.andThen.mcFJ.sp ( scala.Function1<Object,A> g )  :  scala.Function1<Object,A>
PreWriteCheck.andThen.mcID.sp ( scala.Function1<Object,A> g )  :  scala.Function1<Object,A>
PreWriteCheck.andThen.mcIF.sp ( scala.Function1<Object,A> g )  :  scala.Function1<Object,A>
PreWriteCheck.andThen.mcII.sp ( scala.Function1<Object,A> g )  :  scala.Function1<Object,A>
PreWriteCheck.andThen.mcIJ.sp ( scala.Function1<Object,A> g )  :  scala.Function1<Object,A>
PreWriteCheck.andThen.mcJD.sp ( scala.Function1<Object,A> g )  :  scala.Function1<Object,A>
PreWriteCheck.andThen.mcJF.sp ( scala.Function1<Object,A> g )  :  scala.Function1<Object,A>
PreWriteCheck.andThen.mcJI.sp ( scala.Function1<Object,A> g )  :  scala.Function1<Object,A>
PreWriteCheck.andThen.mcJJ.sp ( scala.Function1<Object,A> g )  :  scala.Function1<Object,A>
PreWriteCheck.andThen.mcVD.sp ( scala.Function1<scala.runtime.BoxedUnit,A> g )  :  scala.Function1<Object,A>
PreWriteCheck.andThen.mcVF.sp ( scala.Function1<scala.runtime.BoxedUnit,A> g )  :  scala.Function1<Object,A>
PreWriteCheck.andThen.mcVI.sp ( scala.Function1<scala.runtime.BoxedUnit,A> g )  :  scala.Function1<Object,A>
PreWriteCheck.andThen.mcVJ.sp ( scala.Function1<scala.runtime.BoxedUnit,A> g )  :  scala.Function1<Object,A>
PreWriteCheck.andThen.mcZD.sp ( scala.Function1<Object,A> g )  :  scala.Function1<Object,A>
PreWriteCheck.andThen.mcZF.sp ( scala.Function1<Object,A> g )  :  scala.Function1<Object,A>
PreWriteCheck.andThen.mcZI.sp ( scala.Function1<Object,A> g )  :  scala.Function1<Object,A>
PreWriteCheck.andThen.mcZJ.sp ( scala.Function1<Object,A> g )  :  scala.Function1<Object,A>
PreWriteCheck.apply ( Object v1 )  :  Object
PreWriteCheck.apply ( org.apache.spark.sql.catalyst.plans.logical.LogicalPlan plan )  :  void
PreWriteCheck.apply.mcDD.sp ( double v1 )  :  double
PreWriteCheck.apply.mcDF.sp ( float v1 )  :  double
PreWriteCheck.apply.mcDI.sp ( int v1 )  :  double
PreWriteCheck.apply.mcDJ.sp ( long v1 )  :  double
PreWriteCheck.apply.mcFD.sp ( double v1 )  :  float
PreWriteCheck.apply.mcFF.sp ( float v1 )  :  float
PreWriteCheck.apply.mcFI.sp ( int v1 )  :  float
PreWriteCheck.apply.mcFJ.sp ( long v1 )  :  float
PreWriteCheck.apply.mcID.sp ( double v1 )  :  int
PreWriteCheck.apply.mcIF.sp ( float v1 )  :  int
PreWriteCheck.apply.mcII.sp ( int v1 )  :  int
PreWriteCheck.apply.mcIJ.sp ( long v1 )  :  int
PreWriteCheck.apply.mcJD.sp ( double v1 )  :  long
PreWriteCheck.apply.mcJF.sp ( float v1 )  :  long
PreWriteCheck.apply.mcJI.sp ( int v1 )  :  long
PreWriteCheck.apply.mcJJ.sp ( long v1 )  :  long
PreWriteCheck.apply.mcVD.sp ( double v1 )  :  void
PreWriteCheck.apply.mcVF.sp ( float v1 )  :  void
PreWriteCheck.apply.mcVI.sp ( int v1 )  :  void
PreWriteCheck.apply.mcVJ.sp ( long v1 )  :  void
PreWriteCheck.apply.mcZD.sp ( double v1 )  :  boolean
PreWriteCheck.apply.mcZF.sp ( float v1 )  :  boolean
PreWriteCheck.apply.mcZI.sp ( int v1 )  :  boolean
PreWriteCheck.apply.mcZJ.sp ( long v1 )  :  boolean
PreWriteCheck.canEqual ( Object p1 )  :  boolean
PreWriteCheck.catalog ( )  :  org.apache.spark.sql.catalyst.analysis.Catalog
PreWriteCheck.compose ( scala.Function1<A,org.apache.spark.sql.catalyst.plans.logical.LogicalPlan> g )  :  scala.Function1<A,scala.runtime.BoxedUnit>
PreWriteCheck.compose.mcDD.sp ( scala.Function1<A,Object> g )  :  scala.Function1<A,Object>
PreWriteCheck.compose.mcDF.sp ( scala.Function1<A,Object> g )  :  scala.Function1<A,Object>
PreWriteCheck.compose.mcDI.sp ( scala.Function1<A,Object> g )  :  scala.Function1<A,Object>
PreWriteCheck.compose.mcDJ.sp ( scala.Function1<A,Object> g )  :  scala.Function1<A,Object>
PreWriteCheck.compose.mcFD.sp ( scala.Function1<A,Object> g )  :  scala.Function1<A,Object>
PreWriteCheck.compose.mcFF.sp ( scala.Function1<A,Object> g )  :  scala.Function1<A,Object>
PreWriteCheck.compose.mcFI.sp ( scala.Function1<A,Object> g )  :  scala.Function1<A,Object>
PreWriteCheck.compose.mcFJ.sp ( scala.Function1<A,Object> g )  :  scala.Function1<A,Object>
PreWriteCheck.compose.mcID.sp ( scala.Function1<A,Object> g )  :  scala.Function1<A,Object>
PreWriteCheck.compose.mcIF.sp ( scala.Function1<A,Object> g )  :  scala.Function1<A,Object>
PreWriteCheck.compose.mcII.sp ( scala.Function1<A,Object> g )  :  scala.Function1<A,Object>
PreWriteCheck.compose.mcIJ.sp ( scala.Function1<A,Object> g )  :  scala.Function1<A,Object>
PreWriteCheck.compose.mcJD.sp ( scala.Function1<A,Object> g )  :  scala.Function1<A,Object>
PreWriteCheck.compose.mcJF.sp ( scala.Function1<A,Object> g )  :  scala.Function1<A,Object>
PreWriteCheck.compose.mcJI.sp ( scala.Function1<A,Object> g )  :  scala.Function1<A,Object>
PreWriteCheck.compose.mcJJ.sp ( scala.Function1<A,Object> g )  :  scala.Function1<A,Object>
PreWriteCheck.compose.mcVD.sp ( scala.Function1<A,Object> g )  :  scala.Function1<A,scala.runtime.BoxedUnit>
PreWriteCheck.compose.mcVF.sp ( scala.Function1<A,Object> g )  :  scala.Function1<A,scala.runtime.BoxedUnit>
PreWriteCheck.compose.mcVI.sp ( scala.Function1<A,Object> g )  :  scala.Function1<A,scala.runtime.BoxedUnit>
PreWriteCheck.compose.mcVJ.sp ( scala.Function1<A,Object> g )  :  scala.Function1<A,scala.runtime.BoxedUnit>
PreWriteCheck.compose.mcZD.sp ( scala.Function1<A,Object> g )  :  scala.Function1<A,Object>
PreWriteCheck.compose.mcZF.sp ( scala.Function1<A,Object> g )  :  scala.Function1<A,Object>
PreWriteCheck.compose.mcZI.sp ( scala.Function1<A,Object> g )  :  scala.Function1<A,Object>
PreWriteCheck.compose.mcZJ.sp ( scala.Function1<A,Object> g )  :  scala.Function1<A,Object>
PreWriteCheck.copy ( org.apache.spark.sql.catalyst.analysis.Catalog catalog )  :  PreWriteCheck
PreWriteCheck.equals ( Object p1 )  :  boolean
PreWriteCheck.failAnalysis ( String msg )  :  scala.runtime.Nothing.
PreWriteCheck.hashCode ( )  :  int
PreWriteCheck.PreWriteCheck ( org.apache.spark.sql.catalyst.analysis.Catalog catalog )
PreWriteCheck.productArity ( )  :  int
PreWriteCheck.productElement ( int p1 )  :  Object
PreWriteCheck.productIterator ( )  :  scala.collection.Iterator<Object>
PreWriteCheck.productPrefix ( )  :  String
PreWriteCheck.toString ( )  :  String

spark-sql_2.10-1.3.0.jar, Project.class
package org.apache.spark.sql.execution
Project.buildProjection ( )  :  scala.Function0<org.apache.spark.sql.catalyst.expressions.package.MutableProjection>

spark-sql_2.10-1.3.0.jar, PrunedFilteredScan.class
package org.apache.spark.sql.sources
PrunedFilteredScan.buildScan ( String[ ] p1, Filter[ ] p2 ) [abstract]  :  org.apache.spark.rdd.RDD<org.apache.spark.sql.Row>

spark-sql_2.10-1.3.0.jar, PythonUDF.class
package org.apache.spark.sql.execution
PythonUDF.accumulator ( )  :  org.apache.spark.Accumulator<java.util.List<byte[ ]>>
PythonUDF.broadcastVars ( )  :  java.util.List<org.apache.spark.broadcast.Broadcast<org.apache.spark.api.python.PythonBroadcast>>
PythonUDF.canEqual ( Object p1 )  :  boolean
PythonUDF.children ( )  :  scala.collection.Seq<org.apache.spark.sql.catalyst.expressions.Expression>
PythonUDF.command ( )  :  byte[ ]
PythonUDF.copy ( String name, byte[ ] command, java.util.Map<String,String> envVars, java.util.List<String> pythonIncludes, String pythonExec, java.util.List<org.apache.spark.broadcast.Broadcast<org.apache.spark.api.python.PythonBroadcast>> broadcastVars, org.apache.spark.Accumulator<java.util.List<byte[ ]>> accumulator, org.apache.spark.sql.types.DataType dataType, scala.collection.Seq<org.apache.spark.sql.catalyst.expressions.Expression> children )  :  PythonUDF
PythonUDF.curried ( ) [static]  :  scala.Function1<String,scala.Function1<byte[ ],scala.Function1<java.util.Map<String,String>,scala.Function1<java.util.List<String>,scala.Function1<String,scala.Function1<java.util.List<org.apache.spark.broadcast.Broadcast<org.apache.spark.api.python.PythonBroadcast>>,scala.Function1<org.apache.spark.Accumulator<java.util.List<byte[ ]>>,scala.Function1<org.apache.spark.sql.types.DataType,scala.Function1<scala.collection.Seq<org.apache.spark.sql.catalyst.expressions.Expression>,PythonUDF>>>>>>>>>
PythonUDF.dataType ( )  :  org.apache.spark.sql.types.DataType
PythonUDF.envVars ( )  :  java.util.Map<String,String>
PythonUDF.equals ( Object p1 )  :  boolean
PythonUDF.eval ( org.apache.spark.sql.Row input )  :  Object
PythonUDF.eval ( org.apache.spark.sql.Row input )  :  scala.runtime.Nothing.
PythonUDF.hashCode ( )  :  int
PythonUDF.isTraceEnabled ( )  :  boolean
PythonUDF.log ( )  :  org.slf4j.Logger
PythonUDF.logDebug ( scala.Function0<String> msg )  :  void
PythonUDF.logDebug ( scala.Function0<String> msg, Throwable throwable )  :  void
PythonUDF.logError ( scala.Function0<String> msg )  :  void
PythonUDF.logError ( scala.Function0<String> msg, Throwable throwable )  :  void
PythonUDF.logInfo ( scala.Function0<String> msg )  :  void
PythonUDF.logInfo ( scala.Function0<String> msg, Throwable throwable )  :  void
PythonUDF.logName ( )  :  String
PythonUDF.logTrace ( scala.Function0<String> msg )  :  void
PythonUDF.logTrace ( scala.Function0<String> msg, Throwable throwable )  :  void
PythonUDF.logWarning ( scala.Function0<String> msg )  :  void
PythonUDF.logWarning ( scala.Function0<String> msg, Throwable throwable )  :  void
PythonUDF.name ( )  :  String
PythonUDF.nullable ( )  :  boolean
PythonUDF.org.apache.spark.Logging..log_ ( )  :  org.slf4j.Logger
PythonUDF.org.apache.spark.Logging..log__.eq ( org.slf4j.Logger p1 )  :  void
PythonUDF.productArity ( )  :  int
PythonUDF.productElement ( int p1 )  :  Object
PythonUDF.productIterator ( )  :  scala.collection.Iterator<Object>
PythonUDF.productPrefix ( )  :  String
PythonUDF.pythonExec ( )  :  String
PythonUDF.pythonIncludes ( )  :  java.util.List<String>
PythonUDF.PythonUDF ( String name, byte[ ] command, java.util.Map<String,String> envVars, java.util.List<String> pythonIncludes, String pythonExec, java.util.List<org.apache.spark.broadcast.Broadcast<org.apache.spark.api.python.PythonBroadcast>> broadcastVars, org.apache.spark.Accumulator<java.util.List<byte[ ]>> accumulator, org.apache.spark.sql.types.DataType dataType, scala.collection.Seq<org.apache.spark.sql.catalyst.expressions.Expression> children )
PythonUDF.toString ( )  :  String
PythonUDF.tupled ( ) [static]  :  scala.Function1<scala.Tuple9<String,byte[ ],java.util.Map<String,String>,java.util.List<String>,String,java.util.List<org.apache.spark.broadcast.Broadcast<org.apache.spark.api.python.PythonBroadcast>>,org.apache.spark.Accumulator<java.util.List<byte[ ]>>,org.apache.spark.sql.types.DataType,scala.collection.Seq<org.apache.spark.sql.catalyst.expressions.Expression>>,PythonUDF>

spark-sql_2.10-1.3.0.jar, RefreshTable.class
package org.apache.spark.sql.sources
RefreshTable.canEqual ( Object p1 )  :  boolean
RefreshTable.copy ( String databaseName, String tableName )  :  RefreshTable
RefreshTable.curried ( ) [static]  :  scala.Function1<String,scala.Function1<String,RefreshTable>>
RefreshTable.databaseName ( )  :  String
RefreshTable.equals ( Object p1 )  :  boolean
RefreshTable.hashCode ( )  :  int
RefreshTable.productArity ( )  :  int
RefreshTable.productElement ( int p1 )  :  Object
RefreshTable.productIterator ( )  :  scala.collection.Iterator<Object>
RefreshTable.productPrefix ( )  :  String
RefreshTable.RefreshTable ( String databaseName, String tableName )
RefreshTable.run ( org.apache.spark.sql.SQLContext sqlContext )  :  scala.collection.Seq<org.apache.spark.sql.Row>
RefreshTable.tableName ( )  :  String
RefreshTable.tupled ( ) [static]  :  scala.Function1<scala.Tuple2<String,String>,RefreshTable>

spark-sql_2.10-1.3.0.jar, RelationProvider.class
package org.apache.spark.sql.sources
RelationProvider.createRelation ( org.apache.spark.sql.SQLContext p1, scala.collection.immutable.Map<String,String> p2 ) [abstract]  :  BaseRelation

spark-sql_2.10-1.3.0.jar, ResolvedDataSource.class
package org.apache.spark.sql.sources
ResolvedDataSource.apply ( org.apache.spark.sql.SQLContext p1, scala.Option<org.apache.spark.sql.types.StructType> p2, String p3, scala.collection.immutable.Map<String,String> p4 ) [static]  :  ResolvedDataSource
ResolvedDataSource.apply ( org.apache.spark.sql.SQLContext p1, String p2, org.apache.spark.sql.SaveMode p3, scala.collection.immutable.Map<String,String> p4, org.apache.spark.sql.DataFrame p5 ) [static]  :  ResolvedDataSource
ResolvedDataSource.canEqual ( Object p1 )  :  boolean
ResolvedDataSource.copy ( Class<?> provider, BaseRelation relation )  :  ResolvedDataSource
ResolvedDataSource.equals ( Object p1 )  :  boolean
ResolvedDataSource.hashCode ( )  :  int
ResolvedDataSource.lookupDataSource ( String p1 ) [static]  :  Class<?>
ResolvedDataSource.productArity ( )  :  int
ResolvedDataSource.productElement ( int p1 )  :  Object
ResolvedDataSource.productIterator ( )  :  scala.collection.Iterator<Object>
ResolvedDataSource.productPrefix ( )  :  String
ResolvedDataSource.provider ( )  :  Class<?>
ResolvedDataSource.relation ( )  :  BaseRelation
ResolvedDataSource.ResolvedDataSource ( Class<?> provider, BaseRelation relation )
ResolvedDataSource.toString ( )  :  String

spark-sql_2.10-1.3.0.jar, RowRecordMaterializer.class
package org.apache.spark.sql.parquet
RowRecordMaterializer.RowRecordMaterializer ( parquet.schema.MessageType parquetSchema, scala.collection.Seq<org.apache.spark.sql.catalyst.expressions.Attribute> attributes )

spark-sql_2.10-1.3.0.jar, RowWriteSupport.class
package org.apache.spark.sql.parquet
RowWriteSupport.attributes ( )  :  org.apache.spark.sql.catalyst.expressions.Attribute[ ]
RowWriteSupport.attributes_.eq ( org.apache.spark.sql.catalyst.expressions.Attribute[ ] p1 )  :  void
RowWriteSupport.getSchema ( org.apache.hadoop.conf.Configuration p1 ) [static]  :  scala.collection.Seq<org.apache.spark.sql.catalyst.expressions.Attribute>
RowWriteSupport.logName ( )  :  String
RowWriteSupport.setSchema ( scala.collection.Seq<org.apache.spark.sql.catalyst.expressions.Attribute> p1, org.apache.hadoop.conf.Configuration p2 ) [static]  :  void
RowWriteSupport.SPARK_ROW_SCHEMA ( ) [static]  :  String
RowWriteSupport.write ( org.apache.spark.sql.Row record )  :  void
RowWriteSupport.writeArray ( org.apache.spark.sql.types.ArrayType schema, scala.collection.Seq<Object> array )  :  void
RowWriteSupport.writeDecimal ( org.apache.spark.sql.types.Decimal decimal, int precision )  :  void
RowWriteSupport.writeMap ( org.apache.spark.sql.types.MapType schema, scala.collection.immutable.Map<?,Object> map )  :  void
RowWriteSupport.writePrimitive ( org.apache.spark.sql.types.DataType schema, Object value )  :  void
RowWriteSupport.writeStruct ( org.apache.spark.sql.types.StructType schema, org.apache.spark.sql.Row struct )  :  void
RowWriteSupport.writeTimestamp ( java.sql.Timestamp ts )  :  void
RowWriteSupport.writeValue ( org.apache.spark.sql.types.DataType schema, Object value )  :  void

spark-sql_2.10-1.3.0.jar, RunnableCommand.class
package org.apache.spark.sql.execution
RunnableCommand.run ( org.apache.spark.sql.SQLContext p1 ) [abstract]  :  scala.collection.Seq<org.apache.spark.sql.Row>

spark-sql_2.10-1.3.0.jar, SaveMode.class
package org.apache.spark.sql
SaveMode.valueOf ( String name ) [static]  :  SaveMode
SaveMode.values ( ) [static]  :  SaveMode[ ]

spark-sql_2.10-1.3.0.jar, ScalaBigDecimalSerializer.class
package org.apache.spark.sql.execution
ScalaBigDecimalSerializer.ScalaBigDecimalSerializer ( )

spark-sql_2.10-1.3.0.jar, SchemaRelationProvider.class
package org.apache.spark.sql.sources
SchemaRelationProvider.createRelation ( org.apache.spark.sql.SQLContext p1, scala.collection.immutable.Map<String,String> p2, org.apache.spark.sql.types.StructType p3 ) [abstract]  :  BaseRelation

spark-sql_2.10-1.3.0.jar, SetCommand.class
package org.apache.spark.sql.execution
SetCommand.canEqual ( Object p1 )  :  boolean
SetCommand.copy ( scala.Option<scala.Tuple2<String,scala.Option<String>>> kv, scala.collection.Seq<org.apache.spark.sql.catalyst.expressions.Attribute> output )  :  SetCommand
SetCommand.curried ( ) [static]  :  scala.Function1<scala.Option<scala.Tuple2<String,scala.Option<String>>>,scala.Function1<scala.collection.Seq<org.apache.spark.sql.catalyst.expressions.Attribute>,SetCommand>>
SetCommand.equals ( Object p1 )  :  boolean
SetCommand.hashCode ( )  :  int
SetCommand.kv ( )  :  scala.Option<scala.Tuple2<String,scala.Option<String>>>
SetCommand.output ( )  :  scala.collection.Seq<org.apache.spark.sql.catalyst.expressions.Attribute>
SetCommand.productArity ( )  :  int
SetCommand.productElement ( int p1 )  :  Object
SetCommand.productIterator ( )  :  scala.collection.Iterator<Object>
SetCommand.productPrefix ( )  :  String
SetCommand.run ( org.apache.spark.sql.SQLContext sqlContext )  :  scala.collection.Seq<org.apache.spark.sql.Row>
SetCommand.SetCommand ( scala.Option<scala.Tuple2<String,scala.Option<String>>> kv, scala.collection.Seq<org.apache.spark.sql.catalyst.expressions.Attribute> output )
SetCommand.tupled ( ) [static]  :  scala.Function1<scala.Tuple2<scala.Option<scala.Tuple2<String,scala.Option<String>>>,scala.collection.Seq<org.apache.spark.sql.catalyst.expressions.Attribute>>,SetCommand>

spark-sql_2.10-1.3.0.jar, ShowTablesCommand.class
package org.apache.spark.sql.execution
ShowTablesCommand.andThen ( scala.Function1<ShowTablesCommand,A> p1 ) [static]  :  scala.Function1<scala.Option<String>,A>
ShowTablesCommand.canEqual ( Object p1 )  :  boolean
ShowTablesCommand.compose ( scala.Function1<A,scala.Option<String>> p1 ) [static]  :  scala.Function1<A,ShowTablesCommand>
ShowTablesCommand.copy ( scala.Option<String> databaseName )  :  ShowTablesCommand
ShowTablesCommand.databaseName ( )  :  scala.Option<String>
ShowTablesCommand.equals ( Object p1 )  :  boolean
ShowTablesCommand.hashCode ( )  :  int
ShowTablesCommand.output ( )  :  scala.collection.Seq<org.apache.spark.sql.catalyst.expressions.AttributeReference>
ShowTablesCommand.productArity ( )  :  int
ShowTablesCommand.productElement ( int p1 )  :  Object
ShowTablesCommand.productIterator ( )  :  scala.collection.Iterator<Object>
ShowTablesCommand.productPrefix ( )  :  String
ShowTablesCommand.run ( org.apache.spark.sql.SQLContext sqlContext )  :  scala.collection.Seq<org.apache.spark.sql.Row>
ShowTablesCommand.ShowTablesCommand ( scala.Option<String> databaseName )

spark-sql_2.10-1.3.0.jar, ShuffledHashJoin.class
package org.apache.spark.sql.execution.joins
ShuffledHashJoin.buildKeys ( )  :  scala.collection.Seq<org.apache.spark.sql.catalyst.expressions.Expression>
ShuffledHashJoin.buildPlan ( )  :  org.apache.spark.sql.execution.SparkPlan
ShuffledHashJoin.buildSide ( )  :  package.BuildSide
ShuffledHashJoin.buildSideKeyGenerator ( )  :  org.apache.spark.sql.catalyst.expressions.package.Projection
ShuffledHashJoin.canEqual ( Object p1 )  :  boolean
ShuffledHashJoin.children ( )  :  scala.collection.Seq<org.apache.spark.sql.execution.SparkPlan>
ShuffledHashJoin.copy ( scala.collection.Seq<org.apache.spark.sql.catalyst.expressions.Expression> leftKeys, scala.collection.Seq<org.apache.spark.sql.catalyst.expressions.Expression> rightKeys, package.BuildSide buildSide, org.apache.spark.sql.execution.SparkPlan left, org.apache.spark.sql.execution.SparkPlan right )  :  ShuffledHashJoin
ShuffledHashJoin.curried ( ) [static]  :  scala.Function1<scala.collection.Seq<org.apache.spark.sql.catalyst.expressions.Expression>,scala.Function1<scala.collection.Seq<org.apache.spark.sql.catalyst.expressions.Expression>,scala.Function1<package.BuildSide,scala.Function1<org.apache.spark.sql.execution.SparkPlan,scala.Function1<org.apache.spark.sql.execution.SparkPlan,ShuffledHashJoin>>>>>
ShuffledHashJoin.equals ( Object p1 )  :  boolean
ShuffledHashJoin.execute ( )  :  org.apache.spark.rdd.RDD<org.apache.spark.sql.Row>
ShuffledHashJoin.hashCode ( )  :  int
ShuffledHashJoin.hashJoin ( scala.collection.Iterator<org.apache.spark.sql.Row> streamIter, HashedRelation hashedRelation )  :  scala.collection.Iterator<org.apache.spark.sql.Row>
ShuffledHashJoin.left ( )  :  org.apache.spark.sql.catalyst.trees.TreeNode
ShuffledHashJoin.left ( )  :  org.apache.spark.sql.execution.SparkPlan
ShuffledHashJoin.leftKeys ( )  :  scala.collection.Seq<org.apache.spark.sql.catalyst.expressions.Expression>
ShuffledHashJoin.output ( )  :  scala.collection.Seq<org.apache.spark.sql.catalyst.expressions.Attribute>
ShuffledHashJoin.outputPartitioning ( )  :  org.apache.spark.sql.catalyst.plans.physical.Partitioning
ShuffledHashJoin.productArity ( )  :  int
ShuffledHashJoin.productElement ( int p1 )  :  Object
ShuffledHashJoin.productIterator ( )  :  scala.collection.Iterator<Object>
ShuffledHashJoin.productPrefix ( )  :  String
ShuffledHashJoin.requiredChildDistribution ( )  :  scala.collection.immutable.List<org.apache.spark.sql.catalyst.plans.physical.ClusteredDistribution>
ShuffledHashJoin.requiredChildDistribution ( )  :  scala.collection.Seq
ShuffledHashJoin.right ( )  :  org.apache.spark.sql.catalyst.trees.TreeNode
ShuffledHashJoin.right ( )  :  org.apache.spark.sql.execution.SparkPlan
ShuffledHashJoin.rightKeys ( )  :  scala.collection.Seq<org.apache.spark.sql.catalyst.expressions.Expression>
ShuffledHashJoin.ShuffledHashJoin ( scala.collection.Seq<org.apache.spark.sql.catalyst.expressions.Expression> leftKeys, scala.collection.Seq<org.apache.spark.sql.catalyst.expressions.Expression> rightKeys, package.BuildSide buildSide, org.apache.spark.sql.execution.SparkPlan left, org.apache.spark.sql.execution.SparkPlan right )
ShuffledHashJoin.streamedKeys ( )  :  scala.collection.Seq<org.apache.spark.sql.catalyst.expressions.Expression>
ShuffledHashJoin.streamedPlan ( )  :  org.apache.spark.sql.execution.SparkPlan
ShuffledHashJoin.streamSideKeyGenerator ( )  :  scala.Function0<org.apache.spark.sql.catalyst.expressions.package.MutableProjection>
ShuffledHashJoin.tupled ( ) [static]  :  scala.Function1<scala.Tuple5<scala.collection.Seq<org.apache.spark.sql.catalyst.expressions.Expression>,scala.collection.Seq<org.apache.spark.sql.catalyst.expressions.Expression>,package.BuildSide,org.apache.spark.sql.execution.SparkPlan,org.apache.spark.sql.execution.SparkPlan>,ShuffledHashJoin>

spark-sql_2.10-1.3.0.jar, SparkPlan.class
package org.apache.spark.sql.execution
SparkPlan.codegenEnabled ( )  :  boolean
SparkPlan.executeCollect ( )  :  org.apache.spark.sql.Row[ ]
SparkPlan.executeTake ( int n )  :  org.apache.spark.sql.Row[ ]
SparkPlan.isTraceEnabled ( )  :  boolean
SparkPlan.log ( )  :  org.slf4j.Logger
SparkPlan.logDebug ( scala.Function0<String> msg )  :  void
SparkPlan.logDebug ( scala.Function0<String> msg, Throwable throwable )  :  void
SparkPlan.logError ( scala.Function0<String> msg )  :  void
SparkPlan.logError ( scala.Function0<String> msg, Throwable throwable )  :  void
SparkPlan.logInfo ( scala.Function0<String> msg )  :  void
SparkPlan.logInfo ( scala.Function0<String> msg, Throwable throwable )  :  void
SparkPlan.logName ( )  :  String
SparkPlan.logTrace ( scala.Function0<String> msg )  :  void
SparkPlan.logTrace ( scala.Function0<String> msg, Throwable throwable )  :  void
SparkPlan.logWarning ( scala.Function0<String> msg )  :  void
SparkPlan.logWarning ( scala.Function0<String> msg, Throwable throwable )  :  void
SparkPlan.makeCopy ( Object[ ] newArgs )  :  org.apache.spark.sql.catalyst.trees.TreeNode
SparkPlan.makeCopy ( Object[ ] newArgs )  :  SparkPlan
SparkPlan.newMutableProjection ( scala.collection.Seq<org.apache.spark.sql.catalyst.expressions.Expression> expressions, scala.collection.Seq<org.apache.spark.sql.catalyst.expressions.Attribute> inputSchema )  :  scala.Function0<org.apache.spark.sql.catalyst.expressions.package.MutableProjection>
SparkPlan.newOrdering ( scala.collection.Seq<org.apache.spark.sql.catalyst.expressions.SortOrder> order, scala.collection.Seq<org.apache.spark.sql.catalyst.expressions.Attribute> inputSchema )  :  scala.math.Ordering<org.apache.spark.sql.Row>
SparkPlan.newPredicate ( org.apache.spark.sql.catalyst.expressions.Expression expression, scala.collection.Seq<org.apache.spark.sql.catalyst.expressions.Attribute> inputSchema )  :  scala.Function1<org.apache.spark.sql.Row,Object>
SparkPlan.newProjection ( scala.collection.Seq<org.apache.spark.sql.catalyst.expressions.Expression> expressions, scala.collection.Seq<org.apache.spark.sql.catalyst.expressions.Attribute> inputSchema )  :  org.apache.spark.sql.catalyst.expressions.package.Projection
SparkPlan.org.apache.spark.Logging..log_ ( )  :  org.slf4j.Logger
SparkPlan.org.apache.spark.Logging..log__.eq ( org.slf4j.Logger p1 )  :  void
SparkPlan.sparkContext ( )  :  org.apache.spark.SparkContext
SparkPlan.sqlContext ( )  :  org.apache.spark.sql.SQLContext

spark-sql_2.10-1.3.0.jar, SparkSQLParser.class
package org.apache.spark.sql
SparkSQLParser.AS ( )  :  catalyst.AbstractSparkSQLParser.Keyword
SparkSQLParser.CACHE ( )  :  catalyst.AbstractSparkSQLParser.Keyword
SparkSQLParser.CLEAR ( )  :  catalyst.AbstractSparkSQLParser.Keyword
SparkSQLParser.IN ( )  :  catalyst.AbstractSparkSQLParser.Keyword
SparkSQLParser.LAZY ( )  :  catalyst.AbstractSparkSQLParser.Keyword
SparkSQLParser.SparkSQLParser..others ( )  :  scala.util.parsing.combinator.Parsers.Parser<catalyst.plans.logical.LogicalPlan>
SparkSQLParser.SparkSQLParser..set ( )  :  scala.util.parsing.combinator.Parsers.Parser<catalyst.plans.logical.LogicalPlan>
SparkSQLParser.SparkSQLParser..SetCommandParser ( )  :  SparkSQLParser.SetCommandParser.
SparkSQLParser.SparkSQLParser..show ( )  :  scala.util.parsing.combinator.Parsers.Parser<catalyst.plans.logical.LogicalPlan>
SparkSQLParser.SparkSQLParser..uncache ( )  :  scala.util.parsing.combinator.Parsers.Parser<catalyst.plans.logical.LogicalPlan>
SparkSQLParser.SET ( )  :  catalyst.AbstractSparkSQLParser.Keyword
SparkSQLParser.SHOW ( )  :  catalyst.AbstractSparkSQLParser.Keyword
SparkSQLParser.SparkSQLParser ( scala.Function1<String,catalyst.plans.logical.LogicalPlan> fallback )
SparkSQLParser.start ( )  :  scala.util.parsing.combinator.Parsers.Parser<catalyst.plans.logical.LogicalPlan>
SparkSQLParser.TABLE ( )  :  catalyst.AbstractSparkSQLParser.Keyword
SparkSQLParser.TABLES ( )  :  catalyst.AbstractSparkSQLParser.Keyword
SparkSQLParser.UNCACHE ( )  :  catalyst.AbstractSparkSQLParser.Keyword

spark-sql_2.10-1.3.0.jar, SparkStrategies.class
package org.apache.spark.sql.execution
SparkStrategies.DDLStrategy ( )  :  SparkStrategies.DDLStrategy.
SparkStrategies.HashAggregation ( )  :  SparkStrategies.HashAggregation.
SparkStrategies.InMemoryScans ( )  :  SparkStrategies.InMemoryScans.
SparkStrategies.LeftSemiJoin ( )  :  SparkStrategies.LeftSemiJoin.

spark-sql_2.10-1.3.0.jar, SQLConf.class
package org.apache.spark.sql
SQLConf.autoBroadcastJoinThreshold ( )  :  int
SQLConf.broadcastTimeout ( )  :  int
SQLConf.codegenEnabled ( )  :  boolean
SQLConf.columnNameOfCorruptRecord ( )  :  String
SQLConf.dataFrameEagerAnalysis ( )  :  boolean
SQLConf.defaultDataSourceName ( )  :  String
SQLConf.defaultSizeInBytes ( )  :  long
SQLConf.dialect ( )  :  String
SQLConf.externalSortEnabled ( )  :  boolean
SQLConf.getAllConfs ( )  :  scala.collection.immutable.Map<String,String>
SQLConf.getConf ( String key )  :  String
SQLConf.getConf ( String key, String defaultValue )  :  String
SQLConf.inMemoryPartitionPruning ( )  :  boolean
SQLConf.isParquetBinaryAsString ( )  :  boolean
SQLConf.isParquetINT96AsTimestamp ( )  :  boolean
SQLConf.numShufflePartitions ( )  :  int
SQLConf.parquetCompressionCodec ( )  :  String
SQLConf.parquetFilterPushDown ( )  :  boolean
SQLConf.parquetUseDataSourceApi ( )  :  boolean
SQLConf.setConf ( java.util.Properties props )  :  void
SQLConf.setConf ( String key, String value )  :  void
SQLConf.SQLConf ( )

spark-sql_2.10-1.3.0.jar, SQLContext.class
package org.apache.spark.sql
SQLContext.applySchemaToPythonRDD ( org.apache.spark.rdd.RDD<Object[ ]> rdd, types.StructType schema )  :  DataFrame
SQLContext.applySchemaToPythonRDD ( org.apache.spark.rdd.RDD<Object[ ]> rdd, String schemaString )  :  DataFrame
SQLContext.baseRelationToDataFrame ( sources.BaseRelation baseRelation )  :  DataFrame
SQLContext.cacheManager ( )  :  CacheManager
SQLContext.checkAnalysis ( )  :  catalyst.analysis.CheckAnalysis
SQLContext.clearCache ( )  :  void
SQLContext.conf ( )  :  SQLConf
SQLContext.createDataFrame ( org.apache.spark.api.java.JavaRDD<?> rdd, Class<?> beanClass )  :  DataFrame
SQLContext.createDataFrame ( org.apache.spark.api.java.JavaRDD<Row> rowRDD, java.util.List<String> columns )  :  DataFrame
SQLContext.createDataFrame ( org.apache.spark.api.java.JavaRDD<Row> rowRDD, types.StructType schema )  :  DataFrame
SQLContext.createDataFrame ( org.apache.spark.rdd.RDD<?> rdd, Class<?> beanClass )  :  DataFrame
SQLContext.createDataFrame ( org.apache.spark.rdd.RDD<A> rdd, scala.reflect.api.TypeTags.TypeTag<A> p2 )  :  DataFrame
SQLContext.createDataFrame ( org.apache.spark.rdd.RDD<Row> rowRDD, types.StructType schema )  :  DataFrame
SQLContext.createDataFrame ( scala.collection.Seq<A> data, scala.reflect.api.TypeTags.TypeTag<A> p2 )  :  DataFrame
SQLContext.createExternalTable ( String tableName, String path )  :  DataFrame
SQLContext.createExternalTable ( String tableName, String path, String source )  :  DataFrame
SQLContext.createExternalTable ( String tableName, String source, java.util.Map<String,String> options )  :  DataFrame
SQLContext.createExternalTable ( String tableName, String source, types.StructType schema, java.util.Map<String,String> options )  :  DataFrame
SQLContext.createExternalTable ( String tableName, String source, types.StructType schema, scala.collection.immutable.Map<String,String> options )  :  DataFrame
SQLContext.createExternalTable ( String tableName, String source, scala.collection.immutable.Map<String,String> options )  :  DataFrame
SQLContext.ddlParser ( )  :  sources.DDLParser
SQLContext.dropTempTable ( String tableName )  :  void
SQLContext.emptyDataFrame ( )  :  DataFrame
SQLContext.emptyResult ( )  :  org.apache.spark.rdd.RDD<Row>
SQLContext.experimental ( )  :  ExperimentalMethods
SQLContext.functionRegistry ( )  :  catalyst.analysis.FunctionRegistry
SQLContext.getAllConfs ( )  :  scala.collection.immutable.Map<String,String>
SQLContext.getConf ( String key )  :  String
SQLContext.getConf ( String key, String defaultValue )  :  String
SQLContext.getSchema ( Class<?> beanClass )  :  scala.collection.Seq<catalyst.expressions.AttributeReference>
SQLContext.implicits ( )  :  SQLContext.implicits.
SQLContext.isCached ( String tableName )  :  boolean
SQLContext.isTraceEnabled ( )  :  boolean
SQLContext.jdbc ( String url, String table )  :  DataFrame
SQLContext.jdbc ( String url, String table, String columnName, long lowerBound, long upperBound, int numPartitions )  :  DataFrame
SQLContext.jdbc ( String url, String table, String[ ] theParts )  :  DataFrame
SQLContext.jsonFile ( String path )  :  DataFrame
SQLContext.jsonFile ( String path, double samplingRatio )  :  DataFrame
SQLContext.jsonFile ( String path, types.StructType schema )  :  DataFrame
SQLContext.jsonRDD ( org.apache.spark.api.java.JavaRDD<String> json )  :  DataFrame
SQLContext.jsonRDD ( org.apache.spark.api.java.JavaRDD<String> json, double samplingRatio )  :  DataFrame
SQLContext.jsonRDD ( org.apache.spark.api.java.JavaRDD<String> json, types.StructType schema )  :  DataFrame
SQLContext.jsonRDD ( org.apache.spark.rdd.RDD<String> json )  :  DataFrame
SQLContext.jsonRDD ( org.apache.spark.rdd.RDD<String> json, double samplingRatio )  :  DataFrame
SQLContext.jsonRDD ( org.apache.spark.rdd.RDD<String> json, types.StructType schema )  :  DataFrame
SQLContext.load ( String path )  :  DataFrame
SQLContext.load ( String path, String source )  :  DataFrame
SQLContext.load ( String source, java.util.Map<String,String> options )  :  DataFrame
SQLContext.load ( String source, types.StructType schema, java.util.Map<String,String> options )  :  DataFrame
SQLContext.load ( String source, types.StructType schema, scala.collection.immutable.Map<String,String> options )  :  DataFrame
SQLContext.load ( String source, scala.collection.immutable.Map<String,String> options )  :  DataFrame
SQLContext.log ( )  :  org.slf4j.Logger
SQLContext.logDebug ( scala.Function0<String> msg )  :  void
SQLContext.logDebug ( scala.Function0<String> msg, Throwable throwable )  :  void
SQLContext.logError ( scala.Function0<String> msg )  :  void
SQLContext.logError ( scala.Function0<String> msg, Throwable throwable )  :  void
SQLContext.logInfo ( scala.Function0<String> msg )  :  void
SQLContext.logInfo ( scala.Function0<String> msg, Throwable throwable )  :  void
SQLContext.logName ( )  :  String
SQLContext.logTrace ( scala.Function0<String> msg )  :  void
SQLContext.logTrace ( scala.Function0<String> msg, Throwable throwable )  :  void
SQLContext.logWarning ( scala.Function0<String> msg )  :  void
SQLContext.logWarning ( scala.Function0<String> msg, Throwable throwable )  :  void
SQLContext.optimizer ( )  :  catalyst.optimizer.Optimizer
SQLContext.org.apache.spark.Logging..log_ ( )  :  org.slf4j.Logger
SQLContext.org.apache.spark.Logging..log__.eq ( org.slf4j.Logger p1 )  :  void
SQLContext.parquetFile ( scala.collection.Seq<String> paths )  :  DataFrame
SQLContext.parquetFile ( String... paths )  :  DataFrame
SQLContext.parseDataType ( String dataTypeString )  :  types.DataType
SQLContext.registerDataFrameAsTable ( DataFrame df, String tableName )  :  void
SQLContext.setConf ( java.util.Properties props )  :  void
SQLContext.setConf ( String key, String value )  :  void
SQLContext.sql ( String sqlText )  :  DataFrame
SQLContext.SQLContext ( org.apache.spark.api.java.JavaSparkContext sparkContext )
SQLContext.sqlParser ( )  :  SparkSQLParser
SQLContext.table ( String tableName )  :  DataFrame
SQLContext.tableNames ( )  :  String[ ]
SQLContext.tableNames ( String databaseName )  :  String[ ]
SQLContext.tables ( )  :  DataFrame
SQLContext.tables ( String databaseName )  :  DataFrame
SQLContext.udf ( )  :  UDFRegistration

spark-sql_2.10-1.3.0.jar, TableScan.class
package org.apache.spark.sql.sources
TableScan.buildScan ( ) [abstract]  :  org.apache.spark.rdd.RDD<org.apache.spark.sql.Row>

spark-sql_2.10-1.3.0.jar, TakeOrdered.class
package org.apache.spark.sql.execution
TakeOrdered.copy ( int limit, scala.collection.Seq<org.apache.spark.sql.catalyst.expressions.SortOrder> sortOrder, SparkPlan child )  :  TakeOrdered
TakeOrdered.curried ( ) [static]  :  scala.Function1<Object,scala.Function1<scala.collection.Seq<org.apache.spark.sql.catalyst.expressions.SortOrder>,scala.Function1<SparkPlan,TakeOrdered>>>
TakeOrdered.ord ( )  :  org.apache.spark.sql.catalyst.expressions.RowOrdering
TakeOrdered.TakeOrdered ( int limit, scala.collection.Seq<org.apache.spark.sql.catalyst.expressions.SortOrder> sortOrder, SparkPlan child )
TakeOrdered.tupled ( ) [static]  :  scala.Function1<scala.Tuple3<Object,scala.collection.Seq<org.apache.spark.sql.catalyst.expressions.SortOrder>,SparkPlan>,TakeOrdered>

spark-sql_2.10-1.3.0.jar, TestGroupWriteSupport.class
package org.apache.spark.sql.parquet
TestGroupWriteSupport.TestGroupWriteSupport ( parquet.schema.MessageType schema )

spark-sql_2.10-1.3.0.jar, TimestampColumnAccessor.class
package org.apache.spark.sql.columnar
TimestampColumnAccessor.TimestampColumnAccessor ( java.nio.ByteBuffer buffer )

spark-sql_2.10-1.3.0.jar, TimestampColumnBuilder.class
package org.apache.spark.sql.columnar
TimestampColumnBuilder.TimestampColumnBuilder ( )

spark-sql_2.10-1.3.0.jar, TimestampColumnStats.class
package org.apache.spark.sql.columnar
TimestampColumnStats.TimestampColumnStats ( )

spark-sql_2.10-1.3.0.jar, UDFRegistration.class
package org.apache.spark.sql
UDFRegistration.UDFRegistration ( SQLContext sqlContext )

spark-sql_2.10-1.3.0.jar, UncacheTableCommand.class
package org.apache.spark.sql.execution
UncacheTableCommand.andThen ( scala.Function1<UncacheTableCommand,A> p1 ) [static]  :  scala.Function1<String,A>
UncacheTableCommand.canEqual ( Object p1 )  :  boolean
UncacheTableCommand.compose ( scala.Function1<A,String> p1 ) [static]  :  scala.Function1<A,UncacheTableCommand>
UncacheTableCommand.copy ( String tableName )  :  UncacheTableCommand
UncacheTableCommand.equals ( Object p1 )  :  boolean
UncacheTableCommand.hashCode ( )  :  int
UncacheTableCommand.output ( )  :  scala.collection.Seq<org.apache.spark.sql.catalyst.expressions.Attribute>
UncacheTableCommand.productArity ( )  :  int
UncacheTableCommand.productElement ( int p1 )  :  Object
UncacheTableCommand.productIterator ( )  :  scala.collection.Iterator<Object>
UncacheTableCommand.productPrefix ( )  :  String
UncacheTableCommand.run ( org.apache.spark.sql.SQLContext sqlContext )  :  scala.collection.Seq<org.apache.spark.sql.Row>
UncacheTableCommand.tableName ( )  :  String
UncacheTableCommand.UncacheTableCommand ( String tableName )

spark-sql_2.10-1.3.0.jar, Union.class
package org.apache.spark.sql.execution
Union.andThen ( scala.Function1<Union,A> p1 ) [static]  :  scala.Function1<scala.collection.Seq<SparkPlan>,A>
Union.compose ( scala.Function1<A,scala.collection.Seq<SparkPlan>> p1 ) [static]  :  scala.Function1<A,Union>
Union.copy ( scala.collection.Seq<SparkPlan> children )  :  Union
Union.Union ( scala.collection.Seq<SparkPlan> children )

spark-sql_2.10-1.3.0.jar, UniqueKeyHashedRelation.class
package org.apache.spark.sql.execution.joins
UniqueKeyHashedRelation.UniqueKeyHashedRelation ( java.util.HashMap<org.apache.spark.sql.Row,org.apache.spark.sql.Row> hashTable )

spark-sql_2.10-1.3.0.jar, UserDefinedFunction.class
package org.apache.spark.sql
UserDefinedFunction.apply ( scala.collection.Seq<Column> exprs )  :  Column
UserDefinedFunction.canEqual ( Object p1 )  :  boolean
UserDefinedFunction.copy ( Object f, types.DataType dataType )  :  UserDefinedFunction
UserDefinedFunction.curried ( ) [static]  :  scala.Function1<Object,scala.Function1<types.DataType,UserDefinedFunction>>
UserDefinedFunction.dataType ( )  :  types.DataType
UserDefinedFunction.equals ( Object p1 )  :  boolean
UserDefinedFunction.f ( )  :  Object
UserDefinedFunction.hashCode ( )  :  int
UserDefinedFunction.productArity ( )  :  int
UserDefinedFunction.productElement ( int p1 )  :  Object
UserDefinedFunction.productIterator ( )  :  scala.collection.Iterator<Object>
UserDefinedFunction.productPrefix ( )  :  String
UserDefinedFunction.toString ( )  :  String
UserDefinedFunction.tupled ( ) [static]  :  scala.Function1<scala.Tuple2<Object,types.DataType>,UserDefinedFunction>
UserDefinedFunction.UserDefinedFunction ( Object f, types.DataType dataType )

spark-sql_2.10-1.3.0.jar, UserDefinedPythonFunction.class
package org.apache.spark.sql
UserDefinedPythonFunction.accumulator ( )  :  org.apache.spark.Accumulator<java.util.List<byte[ ]>>
UserDefinedPythonFunction.apply ( scala.collection.Seq<Column> exprs )  :  Column
UserDefinedPythonFunction.broadcastVars ( )  :  java.util.List<org.apache.spark.broadcast.Broadcast<org.apache.spark.api.python.PythonBroadcast>>
UserDefinedPythonFunction.canEqual ( Object p1 )  :  boolean
UserDefinedPythonFunction.command ( )  :  byte[ ]
UserDefinedPythonFunction.copy ( String name, byte[ ] command, java.util.Map<String,String> envVars, java.util.List<String> pythonIncludes, String pythonExec, java.util.List<org.apache.spark.broadcast.Broadcast<org.apache.spark.api.python.PythonBroadcast>> broadcastVars, org.apache.spark.Accumulator<java.util.List<byte[ ]>> accumulator, types.DataType dataType )  :  UserDefinedPythonFunction
UserDefinedPythonFunction.curried ( ) [static]  :  scala.Function1<String,scala.Function1<byte[ ],scala.Function1<java.util.Map<String,String>,scala.Function1<java.util.List<String>,scala.Function1<String,scala.Function1<java.util.List<org.apache.spark.broadcast.Broadcast<org.apache.spark.api.python.PythonBroadcast>>,scala.Function1<org.apache.spark.Accumulator<java.util.List<byte[ ]>>,scala.Function1<types.DataType,UserDefinedPythonFunction>>>>>>>>
UserDefinedPythonFunction.dataType ( )  :  types.DataType
UserDefinedPythonFunction.envVars ( )  :  java.util.Map<String,String>
UserDefinedPythonFunction.equals ( Object p1 )  :  boolean
UserDefinedPythonFunction.hashCode ( )  :  int
UserDefinedPythonFunction.name ( )  :  String
UserDefinedPythonFunction.productArity ( )  :  int
UserDefinedPythonFunction.productElement ( int p1 )  :  Object
UserDefinedPythonFunction.productIterator ( )  :  scala.collection.Iterator<Object>
UserDefinedPythonFunction.productPrefix ( )  :  String
UserDefinedPythonFunction.pythonExec ( )  :  String
UserDefinedPythonFunction.pythonIncludes ( )  :  java.util.List<String>
UserDefinedPythonFunction.toString ( )  :  String
UserDefinedPythonFunction.tupled ( ) [static]  :  scala.Function1<scala.Tuple8<String,byte[ ],java.util.Map<String,String>,java.util.List<String>,String,java.util.List<org.apache.spark.broadcast.Broadcast<org.apache.spark.api.python.PythonBroadcast>>,org.apache.spark.Accumulator<java.util.List<byte[ ]>>,types.DataType>,UserDefinedPythonFunction>
UserDefinedPythonFunction.UserDefinedPythonFunction ( String name, byte[ ] command, java.util.Map<String,String> envVars, java.util.List<String> pythonIncludes, String pythonExec, java.util.List<org.apache.spark.broadcast.Broadcast<org.apache.spark.api.python.PythonBroadcast>> broadcastVars, org.apache.spark.Accumulator<java.util.List<byte[ ]>> accumulator, types.DataType dataType )

to the top

Problems with Data Types, High Severity (151)


spark-sql_2.10-1.3.0.jar
package org.apache.spark.sql
[+] CachedData (1)
[+] CacheManager (1)
[+] Column (1)
[+] ColumnName (1)
[+] DataFrame (1)
[+] DataFrameHolder (1)
[+] ExperimentalMethods (1)
[+] GroupedData (1)
[+] SaveMode (1)
[+] SparkSQLParser (1)
[+] SQLConf (1)
[+] SQLContext (1)
[+] SQLContext.QueryExecution (1)
[+] UDFRegistration (1)
[+] UserDefinedFunction (1)
[+] UserDefinedPythonFunction (1)

package org.apache.spark.sql.columnar
[+] BinaryColumnStats (1)
[+] BooleanColumnStats (1)
[+] ByteColumnStats (1)
[+] CachedBatch (1)
[+] ColumnBuilder (1)
[+] DateColumnAccessor (1)
[+] DateColumnBuilder (1)
[+] DateColumnStats (1)
[+] DoubleColumnStats (1)
[+] FloatColumnStats (1)
[+] GenericColumnStats (1)
[+] InMemoryRelation (1)
[+] IntColumnStats (1)
[+] LongColumnStats (1)
[+] NullableColumnBuilder (7)
[+] PartitionStatistics (1)
[+] ShortColumnStats (1)
[+] StringColumnStats (1)
[+] TimestampColumnAccessor (1)
[+] TimestampColumnBuilder (1)
[+] TimestampColumnStats (1)

package org.apache.spark.sql.columnar.compression
[+] CompressionScheme (1)
[+] Decoder<T> (2)
[+] Encoder<T> (2)

package org.apache.spark.sql.execution
[+] AddExchange (3)
[+] AggregateEvaluation (1)
[+] BatchPythonEvaluation (1)
[+] CacheTableCommand (1)
[+] DescribeCommand (1)
[+] Distinct (1)
[+] EvaluatePython (1)
[+] Except (1)
[+] ExecutedCommand (1)
[+] Expand (1)
[+] ExplainCommand (1)
[+] ExternalSort (1)
[+] GeneratedAggregate (1)
[+] IntegerHashSetSerializer (1)
[+] Intersect (1)
[+] JavaBigDecimalSerializer (1)
[+] KryoResourcePool (1)
[+] LocalTableScan (1)
[+] LogicalLocalTable (1)
[+] LogicalRDD (1)
[+] LongHashSetSerializer (1)
[+] OpenHashSetSerializer (1)
[+] OutputFaker (1)
[+] PhysicalRDD (1)
[+] PythonUDF (1)
[+] RunnableCommand (1)
[+] ScalaBigDecimalSerializer (1)
[+] SetCommand (1)
[+] ShowTablesCommand (1)
[+] SparkPlan (2)
[+] UncacheTableCommand (1)

package org.apache.spark.sql.execution.joins
[+] BroadcastHashJoin (1)
[+] BroadcastLeftSemiJoinHash (1)
[+] BroadcastNestedLoopJoin (1)
[+] CartesianProduct (1)
[+] GeneralHashedRelation (1)
[+] HashedRelation (1)
[+] HashJoin (1)
[+] HashOuterJoin (1)
[+] LeftSemiJoinBNL (1)
[+] LeftSemiJoinHash (1)
[+] ShuffledHashJoin (1)
[+] UniqueKeyHashedRelation (1)

package org.apache.spark.sql.jdbc
[+] DriverQuirks (1)
[+] JDBCPartition (1)
[+] JDBCPartitioningInfo (1)
[+] JDBCRDD (1)
[+] JDBCRelation (1)
[+] MySQLQuirks (1)
[+] NoQuirks (1)
[+] PostgresQuirks (1)

package org.apache.spark.sql.json
[+] JSONRelation (1)

package org.apache.spark.sql.parquet
[+] CatalystArrayContainsNullConverter (1)
[+] CatalystArrayConverter (1)
[+] CatalystConverter (1)
[+] CatalystMapConverter (1)
[+] CatalystNativeArrayConverter (1)
[+] CatalystPrimitiveRowConverter (1)
[+] CatalystStructConverter (1)
[+] InsertIntoParquetTable (1)
[+] ParquetRelation2 (1)
[+] ParquetTest (1)
[+] ParquetTypeInfo (1)
[+] Partition (1)
[+] PartitionSpec (1)
[+] TestGroupWriteSupport (1)

package org.apache.spark.sql.parquet.timestamp
[+] NanoTime (1)

package org.apache.spark.sql.sources
[+] And (1)
[+] BaseRelation (1)
[+] CaseInsensitiveMap (1)
[+] CatalystScan (1)
[+] CreatableRelationProvider (1)
[+] CreateTableUsing (1)
[+] CreateTableUsingAsSelect (1)
[+] CreateTempTableUsing (1)
[+] CreateTempTableUsingAsSelect (1)
[+] DDLParser (1)
[+] DescribeCommand (1)
[+] EqualTo (1)
[+] Filter (1)
[+] GreaterThan (1)
[+] GreaterThanOrEqual (1)
[+] In (1)
[+] InsertableRelation (1)
[+] InsertIntoDataSource (1)
[+] IsNotNull (1)
[+] IsNull (1)
[+] LessThan (1)
[+] LessThanOrEqual (1)
[+] LogicalRelation (1)
[+] Not (1)
[+] Or (1)
[+] PreWriteCheck (1)
[+] PrunedFilteredScan (1)
[+] RefreshTable (1)
[+] RelationProvider (1)
[+] ResolvedDataSource (1)
[+] SchemaRelationProvider (1)
[+] TableScan (1)

package org.apache.spark.sql.test
[+] ExamplePoint (1)

to the top

Problems with Methods, High Severity (4)


spark-sql_2.10-1.3.0.jar, AddExchange
package org.apache.spark.sql.execution
[+] AddExchange.apply ( SparkPlan plan )  :  SparkPlan (1)
[+] AddExchange.numPartitions ( )  :  int (1)

spark-sql_2.10-1.3.0.jar, RowWriteSupport
package org.apache.spark.sql.parquet
[+] RowWriteSupport.writer ( )  :  parquet.io.api.RecordConsumer (1)
[+] RowWriteSupport.writer_.eq ( parquet.io.api.RecordConsumer p1 )  :  void (1)

to the top

Problems with Data Types, Medium Severity (32)


spark-sql_2.10-1.3.0.jar
package org.apache.spark.sql.columnar
[+] BinaryColumnAccessor (1)
[+] BinaryColumnBuilder (1)
[+] BooleanColumnAccessor (1)
[+] BooleanColumnBuilder (1)
[+] ByteColumnAccessor (1)
[+] ByteColumnBuilder (1)
[+] DoubleColumnAccessor (1)
[+] DoubleColumnBuilder (1)
[+] FloatColumnAccessor (1)
[+] FloatColumnBuilder (1)
[+] GenericColumnAccessor (1)
[+] GenericColumnBuilder (1)
[+] IntColumnAccessor (1)
[+] IntColumnBuilder (1)
[+] LongColumnAccessor (1)
[+] LongColumnBuilder (1)
[+] ShortColumnAccessor (1)
[+] ShortColumnBuilder (1)
[+] StringColumnAccessor (1)
[+] StringColumnBuilder (1)

package org.apache.spark.sql.execution
[+] AddExchange (1)
[+] SparkPlan (1)
[+] SparkStrategies.BasicOperators. (1)
[+] SparkStrategies.BroadcastNestedLoopJoin. (1)
[+] SparkStrategies.CartesianProduct. (1)
[+] SparkStrategies.HashJoin. (1)
[+] SparkStrategies.ParquetOperations. (1)
[+] SparkStrategies.TakeOrdered. (1)

package org.apache.spark.sql.parquet
[+] AppendingParquetOutputFormat (1)
[+] CatalystGroupConverter (1)
[+] RowReadSupport (1)
[+] RowWriteSupport (1)

to the top

Problems with Data Types, Low Severity (12)


spark-sql_2.10-1.3.0.jar
package org.apache.spark.sql.columnar
[+] BooleanColumnStats (1)
[+] ByteColumnStats (1)
[+] DoubleColumnStats (1)
[+] FloatColumnStats (1)
[+] IntColumnStats (1)
[+] LongColumnStats (1)
[+] ShortColumnStats (1)
[+] StringColumnStats (1)

package org.apache.spark.sql.execution
[+] Limit (2)
[+] TakeOrdered (2)

to the top

Other Changes in Data Types (10)


spark-sql_2.10-1.3.0.jar
package org.apache.spark.sql.columnar
[+] ColumnBuilder (1)
[+] NullableColumnBuilder (6)

package org.apache.spark.sql.columnar.compression
[+] CompressionScheme (1)
[+] Encoder<T> (2)

to the top

Java ARchives (1)


spark-sql_2.10-1.3.0.jar

to the top




Generated on Wed Oct 28 11:07:31 2015 for succinct-0.1.2 by Java API Compliance Checker 1.4.1  
A tool for checking backward compatibility of a Java library API