Binary compatibility report for the succinct-0.1.2 library  between 1.3.0 and 1.2.0 versions   (relating to the portability of client application succinct-0.1.2.jar)

Test Info


Library Namesuccinct-0.1.2
Version #11.3.0
Version #21.2.0
Java Version1.7.0_75

Test Results


Total Java ARchives1
Total Methods / Classes2445 / 463
VerdictIncompatible
(44.2%)

Problem Summary


SeverityCount
Added Methods-211
Removed MethodsHigh896
Problems with
Data Types
High75
Medium38
Low12
Problems with
Methods
High1
Medium0
Low0
Other Changes
in Data Types
-8

Added Methods (211)


spark-sql_2.10-1.2.0.jar, BaseRelation.class
package org.apache.spark.sql.sources
BaseRelation.schema ( ) [abstract]  :  org.apache.spark.sql.catalyst.types.StructType

spark-sql_2.10-1.2.0.jar, CachedBatch.class
package org.apache.spark.sql.columnar
CachedBatch.CachedBatch ( byte[ ][ ] buffers, org.apache.spark.sql.catalyst.expressions.Row stats )
CachedBatch.copy ( byte[ ][ ] buffers, org.apache.spark.sql.catalyst.expressions.Row stats )  :  CachedBatch
CachedBatch.stats ( )  :  org.apache.spark.sql.catalyst.expressions.Row

spark-sql_2.10-1.2.0.jar, CacheTableCommand.class
package org.apache.spark.sql.execution
CacheTableCommand.children ( )  :  scala.collection.immutable.Nil.
CacheTableCommand.children ( )  :  scala.collection.Seq
CacheTableCommand.execute ( )  :  org.apache.spark.rdd.RDD<org.apache.spark.sql.catalyst.expressions.Row>
CacheTableCommand.executeCollect ( )  :  org.apache.spark.sql.catalyst.expressions.Row[ ]
CacheTableCommand.sideEffectResult ( )  :  scala.collection.Seq<org.apache.spark.sql.catalyst.expressions.Row>

spark-sql_2.10-1.2.0.jar, CatalystConverter.class
package org.apache.spark.sql.parquet
CatalystConverter.getCurrentRecord ( )  :  org.apache.spark.sql.catalyst.expressions.Row
CatalystConverter.readDecimal ( org.apache.spark.sql.catalyst.types.decimal.Decimal dest, parquet.io.api.Binary value, org.apache.spark.sql.catalyst.types.DecimalType ctype )  :  void
CatalystConverter.updateDecimal ( int fieldIndex, parquet.io.api.Binary value, org.apache.spark.sql.catalyst.types.DecimalType ctype )  :  void
CatalystConverter.updateString ( int fieldIndex, parquet.io.api.Binary value )  :  void

spark-sql_2.10-1.2.0.jar, CatalystGroupConverter.class
package org.apache.spark.sql.parquet
CatalystGroupConverter.CatalystGroupConverter ( org.apache.spark.sql.catalyst.types.StructField[ ] schema, int index, CatalystConverter parent )
CatalystGroupConverter.CatalystGroupConverter ( org.apache.spark.sql.catalyst.types.StructField[ ] schema, int index, CatalystConverter parent, scala.collection.mutable.ArrayBuffer<Object> current, scala.collection.mutable.ArrayBuffer<org.apache.spark.sql.catalyst.expressions.Row> buffer )
CatalystGroupConverter.getCurrentRecord ( )  :  org.apache.spark.sql.catalyst.expressions.Row
CatalystGroupConverter.schema ( )  :  org.apache.spark.sql.catalyst.types.StructField[ ]

spark-sql_2.10-1.2.0.jar, CatalystScan.class
package org.apache.spark.sql.sources
CatalystScan.CatalystScan ( )

spark-sql_2.10-1.2.0.jar, ColumnBuilder.class
package org.apache.spark.sql.columnar
ColumnBuilder.appendFrom ( org.apache.spark.sql.catalyst.expressions.Row p1, int p2 ) [abstract]  :  void

spark-sql_2.10-1.2.0.jar, ColumnStats.class
package org.apache.spark.sql.columnar
ColumnStats.collectedStatistics ( ) [abstract]  :  org.apache.spark.sql.catalyst.expressions.Row
ColumnStats.gatherStats ( org.apache.spark.sql.catalyst.expressions.Row p1, int p2 ) [abstract]  :  void

spark-sql_2.10-1.2.0.jar, CreateTableUsing.class
package org.apache.spark.sql.sources
CreateTableUsing.copy ( String tableName, String provider, scala.collection.immutable.Map<String,String> options )  :  CreateTableUsing
CreateTableUsing.CreateTableUsing ( String tableName, String provider, scala.collection.immutable.Map<String,String> options )
CreateTableUsing.run ( org.apache.spark.sql.SQLContext sqlContext )  :  scala.collection.Seq<scala.runtime.Nothing.>

spark-sql_2.10-1.2.0.jar, DescribeCommand.class
package org.apache.spark.sql.execution
DescribeCommand.children ( )  :  scala.collection.immutable.Nil.
DescribeCommand.children ( )  :  scala.collection.Seq
DescribeCommand.copy ( SparkPlan child, scala.collection.Seq<org.apache.spark.sql.catalyst.expressions.Attribute> output, org.apache.spark.sql.SQLContext context )  :  DescribeCommand
DescribeCommand.DescribeCommand ( SparkPlan child, scala.collection.Seq<org.apache.spark.sql.catalyst.expressions.Attribute> output, org.apache.spark.sql.SQLContext context )
DescribeCommand.execute ( )  :  org.apache.spark.rdd.RDD<org.apache.spark.sql.catalyst.expressions.Row>
DescribeCommand.executeCollect ( )  :  org.apache.spark.sql.catalyst.expressions.Row[ ]
DescribeCommand.sideEffectResult ( )  :  scala.collection.Seq<org.apache.spark.sql.catalyst.expressions.Row>

spark-sql_2.10-1.2.0.jar, Encoder<T>.class
package org.apache.spark.sql.columnar.compression
Encoder<T>.gatherCompressibilityStats ( org.apache.spark.sql.catalyst.expressions.Row p1, int p2 ) [abstract]  :  void

spark-sql_2.10-1.2.0.jar, EvaluatePython.class
package org.apache.spark.sql.execution
EvaluatePython.fromJava ( Object p1, org.apache.spark.sql.catalyst.types.DataType p2 ) [static]  :  Object
EvaluatePython.rowToArray ( org.apache.spark.sql.catalyst.expressions.Row p1, scala.collection.Seq<org.apache.spark.sql.catalyst.types.DataType> p2 ) [static]  :  Object[ ]
EvaluatePython.toJava ( Object p1, org.apache.spark.sql.catalyst.types.DataType p2 ) [static]  :  Object

spark-sql_2.10-1.2.0.jar, ExecutedCommand.class
package org.apache.spark.sql.execution
ExecutedCommand.executeCollect ( )  :  org.apache.spark.sql.catalyst.expressions.Row[ ]

spark-sql_2.10-1.2.0.jar, ExplainCommand.class
package org.apache.spark.sql.execution
ExplainCommand.children ( )  :  scala.collection.immutable.Nil.
ExplainCommand.children ( )  :  scala.collection.Seq
ExplainCommand.copy ( org.apache.spark.sql.catalyst.plans.logical.LogicalPlan logicalPlan, scala.collection.Seq<org.apache.spark.sql.catalyst.expressions.Attribute> output, boolean extended, org.apache.spark.sql.SQLContext context )  :  ExplainCommand
ExplainCommand.execute ( )  :  org.apache.spark.rdd.RDD<org.apache.spark.sql.catalyst.expressions.Row>
ExplainCommand.executeCollect ( )  :  org.apache.spark.sql.catalyst.expressions.Row[ ]
ExplainCommand.ExplainCommand ( org.apache.spark.sql.catalyst.plans.logical.LogicalPlan logicalPlan, scala.collection.Seq<org.apache.spark.sql.catalyst.expressions.Attribute> output, boolean extended, org.apache.spark.sql.SQLContext context )
ExplainCommand.otherCopyArgs ( )  :  scala.collection.immutable.List<org.apache.spark.sql.SQLContext>
ExplainCommand.otherCopyArgs ( )  :  scala.collection.Seq
ExplainCommand.sideEffectResult ( )  :  scala.collection.Seq<org.apache.spark.sql.catalyst.expressions.Row>

spark-sql_2.10-1.2.0.jar, HashedRelation.class
package org.apache.spark.sql.execution.joins
HashedRelation.get ( org.apache.spark.sql.catalyst.expressions.Row p1 ) [abstract]  :  org.apache.spark.util.collection.CompactBuffer<org.apache.spark.sql.catalyst.expressions.Row>

spark-sql_2.10-1.2.0.jar, HashOuterJoin.class
package org.apache.spark.sql.execution.joins
HashOuterJoin.HashOuterJoin..fullOuterIterator ( org.apache.spark.sql.catalyst.expressions.Row key, scala.collection.Iterable<org.apache.spark.sql.catalyst.expressions.Row> leftIter, scala.collection.Iterable<org.apache.spark.sql.catalyst.expressions.Row> rightIter )  :  scala.collection.Iterator<org.apache.spark.sql.catalyst.expressions.Row>
HashOuterJoin.HashOuterJoin..leftOuterIterator ( org.apache.spark.sql.catalyst.expressions.Row key, scala.collection.Iterable<org.apache.spark.sql.catalyst.expressions.Row> leftIter, scala.collection.Iterable<org.apache.spark.sql.catalyst.expressions.Row> rightIter )  :  scala.collection.Iterator<org.apache.spark.sql.catalyst.expressions.Row>
HashOuterJoin.HashOuterJoin..rightOuterIterator ( org.apache.spark.sql.catalyst.expressions.Row key, scala.collection.Iterable<org.apache.spark.sql.catalyst.expressions.Row> leftIter, scala.collection.Iterable<org.apache.spark.sql.catalyst.expressions.Row> rightIter )  :  scala.collection.Iterator<org.apache.spark.sql.catalyst.expressions.Row>

spark-sql_2.10-1.2.0.jar, InMemoryColumnarTableScan.class
package org.apache.spark.sql.columnar
InMemoryColumnarTableScan.sqlContext ( )  :  org.apache.spark.sql.SQLContext

spark-sql_2.10-1.2.0.jar, IntColumnStats.class
package org.apache.spark.sql.columnar
IntColumnStats.collectedStatistics ( )  :  org.apache.spark.sql.catalyst.expressions.Row
IntColumnStats.gatherStats ( org.apache.spark.sql.catalyst.expressions.Row row, int ordinal )  :  void

spark-sql_2.10-1.2.0.jar, JSONRelation.class
package org.apache.spark.sql.json
JSONRelation.copy ( String fileName, double samplingRatio, org.apache.spark.sql.SQLContext sqlContext )  :  JSONRelation
JSONRelation.fileName ( )  :  String
JSONRelation.JSONRelation ( String fileName, double samplingRatio, org.apache.spark.sql.SQLContext sqlContext )
JSONRelation.schema ( )  :  org.apache.spark.sql.catalyst.types.StructType

spark-sql_2.10-1.2.0.jar, Limit.class
package org.apache.spark.sql.execution
Limit.executeCollect ( )  :  org.apache.spark.sql.catalyst.expressions.Row[ ]

spark-sql_2.10-1.2.0.jar, NativeColumnType<T>.class
package org.apache.spark.sql.columnar
NativeColumnType<T>.dataType ( )  :  T
NativeColumnType<T>.NativeColumnType ( T dataType, int typeId, int defaultSize )  :  public

spark-sql_2.10-1.2.0.jar, NullableColumnBuilder.class
package org.apache.spark.sql.columnar
NullableColumnBuilder.appendFrom ( org.apache.spark.sql.catalyst.expressions.Row p1, int p2 ) [abstract]  :  void
NullableColumnBuilder.NullableColumnBuilder..super.appendFrom ( org.apache.spark.sql.catalyst.expressions.Row p1, int p2 ) [abstract]  :  void

spark-sql_2.10-1.2.0.jar, ParquetRelation2.class
package org.apache.spark.sql.parquet
ParquetRelation2.copy ( String path, org.apache.spark.sql.SQLContext sqlContext )  :  ParquetRelation2
ParquetRelation2.dataIncludesKey ( )  :  boolean
ParquetRelation2.dataSchema ( )  :  org.apache.spark.sql.catalyst.types.StructType
ParquetRelation2.ParquetRelation2..partitionKeys ( )  :  scala.collection.Seq<String>
ParquetRelation2.ParquetRelation2 ( String path, org.apache.spark.sql.SQLContext sqlContext )
ParquetRelation2.path ( )  :  String
ParquetRelation2.schema ( )  :  org.apache.spark.sql.catalyst.types.StructType

spark-sql_2.10-1.2.0.jar, ParquetTableScan.class
package org.apache.spark.sql.parquet
ParquetTableScan.normalOutput ( )  :  scala.collection.Seq<org.apache.spark.sql.catalyst.expressions.Attribute>
ParquetTableScan.partOutput ( )  :  scala.collection.Seq<org.apache.spark.sql.catalyst.expressions.Attribute>

spark-sql_2.10-1.2.0.jar, Partition.class
package org.apache.spark.sql.parquet
Partition.copy ( scala.collection.immutable.Map<String,Object> partitionValues, scala.collection.Seq<org.apache.hadoop.fs.FileStatus> files )  :  Partition
Partition.files ( )  :  scala.collection.Seq<org.apache.hadoop.fs.FileStatus>
Partition.Partition ( scala.collection.immutable.Map<String,Object> partitionValues, scala.collection.Seq<org.apache.hadoop.fs.FileStatus> files )
Partition.partitionValues ( )  :  scala.collection.immutable.Map<String,Object>

spark-sql_2.10-1.2.0.jar, PrunedFilteredScan.class
package org.apache.spark.sql.sources
PrunedFilteredScan.PrunedFilteredScan ( )

spark-sql_2.10-1.2.0.jar, PythonUDF.class
package org.apache.spark.sql.execution
PythonUDF.copy ( String name, byte[ ] command, java.util.Map<String,String> envVars, java.util.List<String> pythonIncludes, String pythonExec, java.util.List<org.apache.spark.broadcast.Broadcast<org.apache.spark.api.python.PythonBroadcast>> broadcastVars, org.apache.spark.Accumulator<java.util.List<byte[ ]>> accumulator, org.apache.spark.sql.catalyst.types.DataType dataType, scala.collection.Seq<org.apache.spark.sql.catalyst.expressions.Expression> children )  :  PythonUDF
PythonUDF.dataType ( )  :  org.apache.spark.sql.catalyst.types.DataType
PythonUDF.eval ( org.apache.spark.sql.catalyst.expressions.Row input )  :  Object
PythonUDF.eval ( org.apache.spark.sql.catalyst.expressions.Row input )  :  scala.runtime.Nothing.
PythonUDF.PythonUDF ( String name, byte[ ] command, java.util.Map<String,String> envVars, java.util.List<String> pythonIncludes, String pythonExec, java.util.List<org.apache.spark.broadcast.Broadcast<org.apache.spark.api.python.PythonBroadcast>> broadcastVars, org.apache.spark.Accumulator<java.util.List<byte[ ]>> accumulator, org.apache.spark.sql.catalyst.types.DataType dataType, scala.collection.Seq<org.apache.spark.sql.catalyst.expressions.Expression> children )

spark-sql_2.10-1.2.0.jar, RowWriteSupport.class
package org.apache.spark.sql.parquet
RowWriteSupport.attributes ( )  :  scala.collection.Seq<org.apache.spark.sql.catalyst.expressions.Attribute>
RowWriteSupport.attributes_.eq ( scala.collection.Seq<org.apache.spark.sql.catalyst.expressions.Attribute> p1 )  :  void
RowWriteSupport.write ( org.apache.spark.sql.catalyst.expressions.Row record )  :  void
RowWriteSupport.writeArray ( org.apache.spark.sql.catalyst.types.ArrayType schema, scala.collection.Seq<Object> array )  :  void
RowWriteSupport.writeDecimal ( org.apache.spark.sql.catalyst.types.decimal.Decimal decimal, int precision )  :  void
RowWriteSupport.writeMap ( org.apache.spark.sql.catalyst.types.MapType schema, scala.collection.immutable.Map<?,Object> map )  :  void
RowWriteSupport.writePrimitive ( org.apache.spark.sql.catalyst.types.PrimitiveType schema, Object value )  :  void
RowWriteSupport.writeStruct ( org.apache.spark.sql.catalyst.types.StructType schema, scala.collection.Seq<Object> struct )  :  void
RowWriteSupport.writeValue ( org.apache.spark.sql.catalyst.types.DataType schema, Object value )  :  void

spark-sql_2.10-1.2.0.jar, RunnableCommand.class
package org.apache.spark.sql.execution
RunnableCommand.output ( ) [abstract]  :  scala.collection.Seq<org.apache.spark.sql.catalyst.expressions.Attribute>

spark-sql_2.10-1.2.0.jar, SetCommand.class
package org.apache.spark.sql.execution
SetCommand.children ( )  :  scala.collection.immutable.Nil.
SetCommand.children ( )  :  scala.collection.Seq
SetCommand.copy ( scala.Option<scala.Tuple2<String,scala.Option<String>>> kv, scala.collection.Seq<org.apache.spark.sql.catalyst.expressions.Attribute> output, org.apache.spark.sql.SQLContext context )  :  SetCommand
SetCommand.execute ( )  :  org.apache.spark.rdd.RDD<org.apache.spark.sql.catalyst.expressions.Row>
SetCommand.executeCollect ( )  :  org.apache.spark.sql.catalyst.expressions.Row[ ]
SetCommand.otherCopyArgs ( )  :  scala.collection.immutable.List<org.apache.spark.sql.SQLContext>
SetCommand.otherCopyArgs ( )  :  scala.collection.Seq
SetCommand.SetCommand ( scala.Option<scala.Tuple2<String,scala.Option<String>>> kv, scala.collection.Seq<org.apache.spark.sql.catalyst.expressions.Attribute> output, org.apache.spark.sql.SQLContext context )
SetCommand.sideEffectResult ( )  :  scala.collection.Seq<org.apache.spark.sql.catalyst.expressions.Row>

spark-sql_2.10-1.2.0.jar, SparkPlan.class
package org.apache.spark.sql.execution
SparkPlan.executeCollect ( )  :  org.apache.spark.sql.catalyst.expressions.Row[ ]

spark-sql_2.10-1.2.0.jar, SparkStrategies.class
package org.apache.spark.sql.execution
SparkStrategies.CommandStrategy ( )  :  SparkStrategies.CommandStrategy.

spark-sql_2.10-1.2.0.jar, SQLContext.class
package org.apache.spark.sql
SQLContext.abs ( catalyst.expressions.Expression e )  :  catalyst.expressions.Abs
SQLContext.applySchema ( org.apache.spark.rdd.RDD<catalyst.expressions.Row> rowRDD, catalyst.types.StructType schema )  :  SchemaRDD
SQLContext.applySchemaToPythonRDD ( org.apache.spark.rdd.RDD<Object[ ]> rdd, catalyst.types.StructType schema )  :  SchemaRDD
SQLContext.applySchemaToPythonRDD ( org.apache.spark.rdd.RDD<Object[ ]> rdd, String schemaString )  :  SchemaRDD
SQLContext.approxCountDistinct ( catalyst.expressions.Expression e, double rsd )  :  catalyst.expressions.ApproxCountDistinct
SQLContext.autoBroadcastJoinThreshold ( )  :  int
SQLContext.avg ( catalyst.expressions.Expression e )  :  catalyst.expressions.Average
SQLContext.baseRelationToSchemaRDD ( sources.BaseRelation baseRelation )  :  SchemaRDD
SQLContext.bigDecimalToLiteral ( scala.math.BigDecimal d )  :  catalyst.expressions.Literal
SQLContext.binaryToLiteral ( byte[ ] a )  :  catalyst.expressions.Literal
SQLContext.booleanToLiteral ( boolean b )  :  catalyst.expressions.Literal
SQLContext.byteToLiteral ( byte b )  :  catalyst.expressions.Literal
SQLContext.cacheQuery ( SchemaRDD query, scala.Option<String> tableName, org.apache.spark.storage.StorageLevel storageLevel )  :  void
SQLContext.clear ( )  :  void
SQLContext.codegenEnabled ( )  :  boolean
SQLContext.columnBatchSize ( )  :  int
SQLContext.columnNameOfCorruptRecord ( )  :  String
SQLContext.count ( catalyst.expressions.Expression e )  :  catalyst.expressions.Count
SQLContext.countDistinct ( scala.collection.Seq<catalyst.expressions.Expression> e )  :  catalyst.expressions.CountDistinct
SQLContext.createParquetFile ( String path, boolean allowExisting, org.apache.hadoop.conf.Configuration conf, scala.reflect.api.TypeTags.TypeTag<A> p4 )  :  SchemaRDD
SQLContext.createSchemaRDD ( org.apache.spark.rdd.RDD<A> rdd, scala.reflect.api.TypeTags.TypeTag<A> p2 )  :  SchemaRDD
SQLContext.dateToLiteral ( java.sql.Date d )  :  catalyst.expressions.Literal
SQLContext.decimalToLiteral ( catalyst.types.decimal.Decimal d )  :  catalyst.expressions.Literal
SQLContext.defaultSizeInBytes ( )  :  long
SQLContext.dialect ( )  :  String
SQLContext.doubleToLiteral ( double d )  :  catalyst.expressions.Literal
SQLContext.DslAttribute ( catalyst.expressions.AttributeReference a )  :  catalyst.dsl.package.ExpressionConversions.DslAttribute
SQLContext.DslExpression ( catalyst.expressions.Expression e )  :  catalyst.dsl.package.ExpressionConversions.DslExpression
SQLContext.DslString ( String s )  :  catalyst.dsl.package.ExpressionConversions.DslString
SQLContext.DslSymbol ( scala.Symbol sym )  :  catalyst.dsl.package.ExpressionConversions.DslSymbol
SQLContext.externalSortEnabled ( )  :  boolean
SQLContext.extraStrategies ( )  :  scala.collection.Seq<catalyst.planning.GenericStrategy<execution.SparkPlan>>
SQLContext.extraStrategies_.eq ( scala.collection.Seq<catalyst.planning.GenericStrategy<execution.SparkPlan>> p1 )  :  void
SQLContext.first ( catalyst.expressions.Expression e )  :  catalyst.expressions.First
SQLContext.floatToLiteral ( float f )  :  catalyst.expressions.Literal
SQLContext.inMemoryPartitionPruning ( )  :  boolean
SQLContext.intToLiteral ( int i )  :  catalyst.expressions.Literal
SQLContext.invalidateCache ( catalyst.plans.logical.LogicalPlan plan )  :  void
SQLContext.isParquetBinaryAsString ( )  :  boolean
SQLContext.jsonFile ( String path )  :  SchemaRDD
SQLContext.jsonFile ( String path, double samplingRatio )  :  SchemaRDD
SQLContext.jsonFile ( String path, catalyst.types.StructType schema )  :  SchemaRDD
SQLContext.jsonRDD ( org.apache.spark.rdd.RDD<String> json )  :  SchemaRDD
SQLContext.jsonRDD ( org.apache.spark.rdd.RDD<String> json, double samplingRatio )  :  SchemaRDD
SQLContext.jsonRDD ( org.apache.spark.rdd.RDD<String> json, catalyst.types.StructType schema )  :  SchemaRDD
SQLContext.last ( catalyst.expressions.Expression e )  :  catalyst.expressions.Last
SQLContext.logicalPlanToSparkQuery ( catalyst.plans.logical.LogicalPlan plan )  :  SchemaRDD
SQLContext.longToLiteral ( long l )  :  catalyst.expressions.Literal
SQLContext.lookupCachedData ( catalyst.plans.logical.LogicalPlan plan )  :  scala.Option<CachedData>
SQLContext.lookupCachedData ( SchemaRDD query )  :  scala.Option<CachedData>
SQLContext.lower ( catalyst.expressions.Expression e )  :  catalyst.expressions.Lower
SQLContext.max ( catalyst.expressions.Expression e )  :  catalyst.expressions.Max
SQLContext.min ( catalyst.expressions.Expression e )  :  catalyst.expressions.Min
SQLContext.numShufflePartitions ( )  :  int
SQLContext.CacheManager..cachedData ( )  :  scala.collection.mutable.ArrayBuffer<CachedData>
SQLContext.CacheManager..cacheLock ( )  :  java.util.concurrent.locks.ReentrantReadWriteLock
SQLContext.CacheManager._setter_.CacheManager..cachedData_.eq ( scala.collection.mutable.ArrayBuffer p1 )  :  void
SQLContext.CacheManager._setter_.CacheManager..cacheLock_.eq ( java.util.concurrent.locks.ReentrantReadWriteLock p1 )  :  void
SQLContext.SQLConf._setter_.settings_.eq ( java.util.Map p1 )  :  void
SQLContext.parquetCompressionCodec ( )  :  String
SQLContext.parquetFile ( String path )  :  SchemaRDD
SQLContext.parquetFilterPushDown ( )  :  boolean
SQLContext.parseDataType ( String dataTypeString )  :  catalyst.types.DataType
SQLContext.registerFunction ( String name, scala.Function10<?,?,?,?,?,?,?,?,?,?,T> func, scala.reflect.api.TypeTags.TypeTag<T> p3 )  :  void
SQLContext.registerFunction ( String name, scala.Function11<?,?,?,?,?,?,?,?,?,?,?,T> func, scala.reflect.api.TypeTags.TypeTag<T> p3 )  :  void
SQLContext.registerFunction ( String name, scala.Function12<?,?,?,?,?,?,?,?,?,?,?,?,T> func, scala.reflect.api.TypeTags.TypeTag<T> p3 )  :  void
SQLContext.registerFunction ( String name, scala.Function13<?,?,?,?,?,?,?,?,?,?,?,?,?,T> func, scala.reflect.api.TypeTags.TypeTag<T> p3 )  :  void
SQLContext.registerFunction ( String name, scala.Function14<?,?,?,?,?,?,?,?,?,?,?,?,?,?,T> func, scala.reflect.api.TypeTags.TypeTag<T> p3 )  :  void
SQLContext.registerFunction ( String name, scala.Function15<?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,T> func, scala.reflect.api.TypeTags.TypeTag<T> p3 )  :  void
SQLContext.registerFunction ( String name, scala.Function16<?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,T> func, scala.reflect.api.TypeTags.TypeTag<T> p3 )  :  void
SQLContext.registerFunction ( String name, scala.Function17<?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,T> func, scala.reflect.api.TypeTags.TypeTag<T> p3 )  :  void
SQLContext.registerFunction ( String name, scala.Function18<?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,T> func, scala.reflect.api.TypeTags.TypeTag<T> p3 )  :  void
SQLContext.registerFunction ( String name, scala.Function19<?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,T> func, scala.reflect.api.TypeTags.TypeTag<T> p3 )  :  void
SQLContext.registerFunction ( String name, scala.Function1<?,T> func, scala.reflect.api.TypeTags.TypeTag<T> p3 )  :  void
SQLContext.registerFunction ( String name, scala.Function20<?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,T> func, scala.reflect.api.TypeTags.TypeTag<T> p3 )  :  void
SQLContext.registerFunction ( String name, scala.Function21<?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,T> func, scala.reflect.api.TypeTags.TypeTag<T> p3 )  :  void
SQLContext.registerFunction ( String name, scala.Function22<?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,T> func, scala.reflect.api.TypeTags.TypeTag<T> p3 )  :  void
SQLContext.registerFunction ( String name, scala.Function2<?,?,T> func, scala.reflect.api.TypeTags.TypeTag<T> p3 )  :  void
SQLContext.registerFunction ( String name, scala.Function3<?,?,?,T> func, scala.reflect.api.TypeTags.TypeTag<T> p3 )  :  void
SQLContext.registerFunction ( String name, scala.Function4<?,?,?,?,T> func, scala.reflect.api.TypeTags.TypeTag<T> p3 )  :  void
SQLContext.registerFunction ( String name, scala.Function5<?,?,?,?,?,T> func, scala.reflect.api.TypeTags.TypeTag<T> p3 )  :  void
SQLContext.registerFunction ( String name, scala.Function6<?,?,?,?,?,?,T> func, scala.reflect.api.TypeTags.TypeTag<T> p3 )  :  void
SQLContext.registerFunction ( String name, scala.Function7<?,?,?,?,?,?,?,T> func, scala.reflect.api.TypeTags.TypeTag<T> p3 )  :  void
SQLContext.registerFunction ( String name, scala.Function8<?,?,?,?,?,?,?,?,T> func, scala.reflect.api.TypeTags.TypeTag<T> p3 )  :  void
SQLContext.registerFunction ( String name, scala.Function9<?,?,?,?,?,?,?,?,?,T> func, scala.reflect.api.TypeTags.TypeTag<T> p3 )  :  void
SQLContext.registerPython ( String name, byte[ ] command, java.util.Map<String,String> envVars, java.util.List<String> pythonIncludes, String pythonExec, java.util.List<org.apache.spark.broadcast.Broadcast<org.apache.spark.api.python.PythonBroadcast>> broadcastVars, org.apache.spark.Accumulator<java.util.List<byte[ ]>> accumulator, String stringDataType )  :  void
SQLContext.registerRDDAsTable ( SchemaRDD rdd, String tableName )  :  void
SQLContext.settings ( )  :  java.util.Map<String,String>
SQLContext.shortToLiteral ( short s )  :  catalyst.expressions.Literal
SQLContext.sql ( String sqlText )  :  SchemaRDD
SQLContext.sqlParser ( )  :  catalyst.SparkSQLParser
SQLContext.sqrt ( catalyst.expressions.Expression e )  :  catalyst.expressions.Sqrt
SQLContext.stringToLiteral ( String s )  :  catalyst.expressions.Literal
SQLContext.sum ( catalyst.expressions.Expression e )  :  catalyst.expressions.Sum
SQLContext.sumDistinct ( catalyst.expressions.Expression e )  :  catalyst.expressions.SumDistinct
SQLContext.symbolToUnresolvedAttribute ( scala.Symbol s )  :  catalyst.analysis.UnresolvedAttribute
SQLContext.table ( String tableName )  :  SchemaRDD
SQLContext.timestampToLiteral ( java.sql.Timestamp t )  :  catalyst.expressions.Literal
SQLContext.tryUncacheQuery ( SchemaRDD query, boolean blocking )  :  boolean
SQLContext.uncacheQuery ( SchemaRDD query, boolean blocking )  :  void
SQLContext.upper ( catalyst.expressions.Expression e )  :  catalyst.expressions.Upper
SQLContext.useCachedData ( catalyst.plans.logical.LogicalPlan plan )  :  catalyst.plans.logical.LogicalPlan
SQLContext.useCompression ( )  :  boolean

spark-sql_2.10-1.2.0.jar, TableScan.class
package org.apache.spark.sql.sources
TableScan.TableScan ( )

spark-sql_2.10-1.2.0.jar, TakeOrdered.class
package org.apache.spark.sql.execution
TakeOrdered.executeCollect ( )  :  org.apache.spark.sql.catalyst.expressions.Row[ ]

spark-sql_2.10-1.2.0.jar, UncacheTableCommand.class
package org.apache.spark.sql.execution
UncacheTableCommand.children ( )  :  scala.collection.immutable.Nil.
UncacheTableCommand.children ( )  :  scala.collection.Seq
UncacheTableCommand.execute ( )  :  org.apache.spark.rdd.RDD<org.apache.spark.sql.catalyst.expressions.Row>
UncacheTableCommand.executeCollect ( )  :  org.apache.spark.sql.catalyst.expressions.Row[ ]
UncacheTableCommand.sideEffectResult ( )  :  scala.collection.Seq<org.apache.spark.sql.catalyst.expressions.Row>

to the top

Removed Methods (896)


spark-sql_2.10-1.3.0.jar, And.class
package org.apache.spark.sql.sources
And.And ( Filter left, Filter right )
And.canEqual ( Object p1 )  :  boolean
And.copy ( Filter left, Filter right )  :  And
And.curried ( ) [static]  :  scala.Function1<Filter,scala.Function1<Filter,And>>
And.equals ( Object p1 )  :  boolean
And.hashCode ( )  :  int
And.left ( )  :  Filter
And.productArity ( )  :  int
And.productElement ( int p1 )  :  Object
And.productIterator ( )  :  scala.collection.Iterator<Object>
And.productPrefix ( )  :  String
And.right ( )  :  Filter
And.toString ( )  :  String
And.tupled ( ) [static]  :  scala.Function1<scala.Tuple2<Filter,Filter>,And>

spark-sql_2.10-1.3.0.jar, BaseRelation.class
package org.apache.spark.sql.sources
BaseRelation.schema ( ) [abstract]  :  org.apache.spark.sql.types.StructType

spark-sql_2.10-1.3.0.jar, BroadcastHashJoin.class
package org.apache.spark.sql.execution.joins
BroadcastHashJoin.timeout ( )  :  scala.concurrent.duration.Duration

spark-sql_2.10-1.3.0.jar, BroadcastLeftSemiJoinHash.class
package org.apache.spark.sql.execution.joins
BroadcastLeftSemiJoinHash.BroadcastLeftSemiJoinHash ( scala.collection.Seq<org.apache.spark.sql.catalyst.expressions.Expression> leftKeys, scala.collection.Seq<org.apache.spark.sql.catalyst.expressions.Expression> rightKeys, org.apache.spark.sql.execution.SparkPlan left, org.apache.spark.sql.execution.SparkPlan right )
BroadcastLeftSemiJoinHash.buildKeys ( )  :  scala.collection.Seq<org.apache.spark.sql.catalyst.expressions.Expression>
BroadcastLeftSemiJoinHash.buildPlan ( )  :  org.apache.spark.sql.execution.SparkPlan
BroadcastLeftSemiJoinHash.buildSide ( )  :  package.BuildRight.
BroadcastLeftSemiJoinHash.buildSide ( )  :  package.BuildSide
BroadcastLeftSemiJoinHash.buildSideKeyGenerator ( )  :  org.apache.spark.sql.catalyst.expressions.package.Projection
BroadcastLeftSemiJoinHash.canEqual ( Object p1 )  :  boolean
BroadcastLeftSemiJoinHash.children ( )  :  scala.collection.Seq<org.apache.spark.sql.execution.SparkPlan>
BroadcastLeftSemiJoinHash.copy ( scala.collection.Seq<org.apache.spark.sql.catalyst.expressions.Expression> leftKeys, scala.collection.Seq<org.apache.spark.sql.catalyst.expressions.Expression> rightKeys, org.apache.spark.sql.execution.SparkPlan left, org.apache.spark.sql.execution.SparkPlan right )  :  BroadcastLeftSemiJoinHash
BroadcastLeftSemiJoinHash.curried ( ) [static]  :  scala.Function1<scala.collection.Seq<org.apache.spark.sql.catalyst.expressions.Expression>,scala.Function1<scala.collection.Seq<org.apache.spark.sql.catalyst.expressions.Expression>,scala.Function1<org.apache.spark.sql.execution.SparkPlan,scala.Function1<org.apache.spark.sql.execution.SparkPlan,BroadcastLeftSemiJoinHash>>>>
BroadcastLeftSemiJoinHash.equals ( Object p1 )  :  boolean
BroadcastLeftSemiJoinHash.execute ( )  :  org.apache.spark.rdd.RDD<org.apache.spark.sql.Row>
BroadcastLeftSemiJoinHash.hashCode ( )  :  int
BroadcastLeftSemiJoinHash.hashJoin ( scala.collection.Iterator<org.apache.spark.sql.Row> streamIter, HashedRelation hashedRelation )  :  scala.collection.Iterator<org.apache.spark.sql.Row>
BroadcastLeftSemiJoinHash.left ( )  :  org.apache.spark.sql.catalyst.trees.TreeNode
BroadcastLeftSemiJoinHash.left ( )  :  org.apache.spark.sql.execution.SparkPlan
BroadcastLeftSemiJoinHash.leftKeys ( )  :  scala.collection.Seq<org.apache.spark.sql.catalyst.expressions.Expression>
BroadcastLeftSemiJoinHash.output ( )  :  scala.collection.Seq<org.apache.spark.sql.catalyst.expressions.Attribute>
BroadcastLeftSemiJoinHash.productArity ( )  :  int
BroadcastLeftSemiJoinHash.productElement ( int p1 )  :  Object
BroadcastLeftSemiJoinHash.productIterator ( )  :  scala.collection.Iterator<Object>
BroadcastLeftSemiJoinHash.productPrefix ( )  :  String
BroadcastLeftSemiJoinHash.right ( )  :  org.apache.spark.sql.catalyst.trees.TreeNode
BroadcastLeftSemiJoinHash.right ( )  :  org.apache.spark.sql.execution.SparkPlan
BroadcastLeftSemiJoinHash.rightKeys ( )  :  scala.collection.Seq<org.apache.spark.sql.catalyst.expressions.Expression>
BroadcastLeftSemiJoinHash.streamedKeys ( )  :  scala.collection.Seq<org.apache.spark.sql.catalyst.expressions.Expression>
BroadcastLeftSemiJoinHash.streamedPlan ( )  :  org.apache.spark.sql.execution.SparkPlan
BroadcastLeftSemiJoinHash.streamSideKeyGenerator ( )  :  scala.Function0<org.apache.spark.sql.catalyst.expressions.package.MutableProjection>
BroadcastLeftSemiJoinHash.tupled ( ) [static]  :  scala.Function1<scala.Tuple4<scala.collection.Seq<org.apache.spark.sql.catalyst.expressions.Expression>,scala.collection.Seq<org.apache.spark.sql.catalyst.expressions.Expression>,org.apache.spark.sql.execution.SparkPlan,org.apache.spark.sql.execution.SparkPlan>,BroadcastLeftSemiJoinHash>

spark-sql_2.10-1.3.0.jar, CachedBatch.class
package org.apache.spark.sql.columnar
CachedBatch.CachedBatch ( byte[ ][ ] buffers, org.apache.spark.sql.Row stats )
CachedBatch.copy ( byte[ ][ ] buffers, org.apache.spark.sql.Row stats )  :  CachedBatch
CachedBatch.stats ( )  :  org.apache.spark.sql.Row

spark-sql_2.10-1.3.0.jar, CacheManager.class
package org.apache.spark.sql
CacheManager.CacheManager ( SQLContext sqlContext )
CacheManager.cacheQuery ( DataFrame query, scala.Option<String> tableName, org.apache.spark.storage.StorageLevel storageLevel )  :  void
CacheManager.tryUncacheQuery ( DataFrame query, boolean blocking )  :  boolean

spark-sql_2.10-1.3.0.jar, CacheTableCommand.class
package org.apache.spark.sql.execution
CacheTableCommand.run ( org.apache.spark.sql.SQLContext sqlContext )  :  scala.collection.Seq<org.apache.spark.sql.Row>

spark-sql_2.10-1.3.0.jar, CaseInsensitiveMap.class
package org.apache.spark.sql.sources
CaseInsensitiveMap.CaseInsensitiveMap ( scala.collection.immutable.Map<String,String> map )

spark-sql_2.10-1.3.0.jar, CatalystArrayContainsNullConverter.class
package org.apache.spark.sql.parquet
CatalystArrayContainsNullConverter.CatalystArrayContainsNullConverter ( org.apache.spark.sql.types.DataType elementType, int index, CatalystConverter parent )

spark-sql_2.10-1.3.0.jar, CatalystArrayConverter.class
package org.apache.spark.sql.parquet
CatalystArrayConverter.CatalystArrayConverter ( org.apache.spark.sql.types.DataType elementType, int index, CatalystConverter parent )

spark-sql_2.10-1.3.0.jar, CatalystConverter.class
package org.apache.spark.sql.parquet
CatalystConverter.getCurrentRecord ( )  :  org.apache.spark.sql.Row
CatalystConverter.readDecimal ( org.apache.spark.sql.types.Decimal dest, parquet.io.api.Binary value, org.apache.spark.sql.types.DecimalType ctype )  :  void
CatalystConverter.readTimestamp ( parquet.io.api.Binary value )  :  java.sql.Timestamp
CatalystConverter.THRIFT_ARRAY_ELEMENTS_SCHEMA_NAME_SUFFIX ( ) [static]  :  String
CatalystConverter.updateDecimal ( int fieldIndex, parquet.io.api.Binary value, org.apache.spark.sql.types.DecimalType ctype )  :  void
CatalystConverter.updateString ( int fieldIndex, String value )  :  void
CatalystConverter.updateTimestamp ( int fieldIndex, parquet.io.api.Binary value )  :  void

spark-sql_2.10-1.3.0.jar, CatalystGroupConverter.class
package org.apache.spark.sql.parquet
CatalystGroupConverter.CatalystGroupConverter ( org.apache.spark.sql.types.StructField[ ] schema, int index, CatalystConverter parent )
CatalystGroupConverter.CatalystGroupConverter ( org.apache.spark.sql.types.StructField[ ] schema, int index, CatalystConverter parent, scala.collection.mutable.ArrayBuffer<Object> current, scala.collection.mutable.ArrayBuffer<org.apache.spark.sql.Row> buffer )
CatalystGroupConverter.schema ( )  :  org.apache.spark.sql.types.StructField[ ]

spark-sql_2.10-1.3.0.jar, CatalystMapConverter.class
package org.apache.spark.sql.parquet
CatalystMapConverter.CatalystMapConverter ( org.apache.spark.sql.types.StructField[ ] schema, int index, CatalystConverter parent )

spark-sql_2.10-1.3.0.jar, CatalystNativeArrayConverter.class
package org.apache.spark.sql.parquet
CatalystNativeArrayConverter.CatalystNativeArrayConverter ( org.apache.spark.sql.types.NativeType elementType, int index, CatalystConverter parent, int capacity )

spark-sql_2.10-1.3.0.jar, CatalystPrimitiveStringConverter.class
package org.apache.spark.sql.parquet
CatalystPrimitiveStringConverter.CatalystPrimitiveStringConverter ( CatalystConverter parent, int fieldIndex )

spark-sql_2.10-1.3.0.jar, CatalystStructConverter.class
package org.apache.spark.sql.parquet
CatalystStructConverter.CatalystStructConverter ( org.apache.spark.sql.types.StructField[ ] schema, int index, CatalystConverter parent )

spark-sql_2.10-1.3.0.jar, Column.class
package org.apache.spark.sql
Column.and ( Column other )  :  Column
Column.apply ( catalyst.expressions.Expression p1 ) [static]  :  Column
Column.apply ( String p1 ) [static]  :  Column
Column.as ( scala.Symbol alias )  :  Column
Column.as ( String alias )  :  Column
Column.asc ( )  :  Column
Column.cast ( types.DataType to )  :  Column
Column.cast ( String to )  :  Column
Column.Column ( catalyst.expressions.Expression expr )
Column.Column ( String name )
Column.contains ( Object other )  :  Column
Column.desc ( )  :  Column
Column.divide ( Object other )  :  Column
Column.endsWith ( Column other )  :  Column
Column.endsWith ( String literal )  :  Column
Column.eqNullSafe ( Object other )  :  Column
Column.equalTo ( Object other )  :  Column
Column.explain ( boolean extended )  :  void
Column.expr ( )  :  catalyst.expressions.Expression
Column.geq ( Object other )  :  Column
Column.getField ( String fieldName )  :  Column
Column.getItem ( int ordinal )  :  Column
Column.gt ( Object other )  :  Column
Column.in ( Column... list )  :  Column
Column.in ( scala.collection.Seq<Column> list )  :  Column
Column.isNotNull ( )  :  Column
Column.isNull ( )  :  Column
Column.leq ( Object other )  :  Column
Column.like ( String literal )  :  Column
Column.lt ( Object other )  :  Column
Column.minus ( Object other )  :  Column
Column.mod ( Object other )  :  Column
Column.multiply ( Object other )  :  Column
Column.notEqual ( Object other )  :  Column
Column.or ( Column other )  :  Column
Column.plus ( Object other )  :  Column
Column.rlike ( String literal )  :  Column
Column.startsWith ( Column other )  :  Column
Column.startsWith ( String literal )  :  Column
Column.substr ( int startPos, int len )  :  Column
Column.substr ( Column startPos, Column len )  :  Column
Column.toString ( )  :  String
Column.unapply ( Column p1 ) [static]  :  scala.Option<catalyst.expressions.Expression>
Column.unary_.bang ( )  :  Column
Column.unary_.minus ( )  :  Column

spark-sql_2.10-1.3.0.jar, ColumnBuilder.class
package org.apache.spark.sql.columnar
ColumnBuilder.appendFrom ( org.apache.spark.sql.Row p1, int p2 ) [abstract]  :  void

spark-sql_2.10-1.3.0.jar, ColumnName.class
package org.apache.spark.sql
ColumnName.ColumnName ( String name )

spark-sql_2.10-1.3.0.jar, ColumnStats.class
package org.apache.spark.sql.columnar
ColumnStats.collectedStatistics ( ) [abstract]  :  org.apache.spark.sql.Row
ColumnStats.gatherStats ( org.apache.spark.sql.Row p1, int p2 ) [abstract]  :  void

spark-sql_2.10-1.3.0.jar, CreatableRelationProvider.class
package org.apache.spark.sql.sources
CreatableRelationProvider.createRelation ( org.apache.spark.sql.SQLContext p1, org.apache.spark.sql.SaveMode p2, scala.collection.immutable.Map<String,String> p3, org.apache.spark.sql.DataFrame p4 ) [abstract]  :  BaseRelation

spark-sql_2.10-1.3.0.jar, CreateTableUsing.class
package org.apache.spark.sql.sources
CreateTableUsing.allowExisting ( )  :  boolean
CreateTableUsing.copy ( String tableName, scala.Option<org.apache.spark.sql.types.StructType> userSpecifiedSchema, String provider, boolean temporary, scala.collection.immutable.Map<String,String> options, boolean allowExisting, boolean managedIfNoPath )  :  CreateTableUsing
CreateTableUsing.CreateTableUsing ( String tableName, scala.Option<org.apache.spark.sql.types.StructType> userSpecifiedSchema, String provider, boolean temporary, scala.collection.immutable.Map<String,String> options, boolean allowExisting, boolean managedIfNoPath )
CreateTableUsing.managedIfNoPath ( )  :  boolean
CreateTableUsing.temporary ( )  :  boolean
CreateTableUsing.userSpecifiedSchema ( )  :  scala.Option<org.apache.spark.sql.types.StructType>

spark-sql_2.10-1.3.0.jar, CreateTableUsingAsSelect.class
package org.apache.spark.sql.sources
CreateTableUsingAsSelect.canEqual ( Object p1 )  :  boolean
CreateTableUsingAsSelect.child ( )  :  org.apache.spark.sql.catalyst.plans.logical.LogicalPlan
CreateTableUsingAsSelect.child ( )  :  org.apache.spark.sql.catalyst.trees.TreeNode
CreateTableUsingAsSelect.copy ( String tableName, String provider, boolean temporary, org.apache.spark.sql.SaveMode mode, scala.collection.immutable.Map<String,String> options, org.apache.spark.sql.catalyst.plans.logical.LogicalPlan child )  :  CreateTableUsingAsSelect
CreateTableUsingAsSelect.CreateTableUsingAsSelect ( String tableName, String provider, boolean temporary, org.apache.spark.sql.SaveMode mode, scala.collection.immutable.Map<String,String> options, org.apache.spark.sql.catalyst.plans.logical.LogicalPlan child )
CreateTableUsingAsSelect.curried ( ) [static]  :  scala.Function1<String,scala.Function1<String,scala.Function1<Object,scala.Function1<org.apache.spark.sql.SaveMode,scala.Function1<scala.collection.immutable.Map<String,String>,scala.Function1<org.apache.spark.sql.catalyst.plans.logical.LogicalPlan,CreateTableUsingAsSelect>>>>>>
CreateTableUsingAsSelect.equals ( Object p1 )  :  boolean
CreateTableUsingAsSelect.hashCode ( )  :  int
CreateTableUsingAsSelect.mode ( )  :  org.apache.spark.sql.SaveMode
CreateTableUsingAsSelect.options ( )  :  scala.collection.immutable.Map<String,String>
CreateTableUsingAsSelect.output ( )  :  scala.collection.Seq<org.apache.spark.sql.catalyst.expressions.Attribute>
CreateTableUsingAsSelect.productArity ( )  :  int
CreateTableUsingAsSelect.productElement ( int p1 )  :  Object
CreateTableUsingAsSelect.productIterator ( )  :  scala.collection.Iterator<Object>
CreateTableUsingAsSelect.productPrefix ( )  :  String
CreateTableUsingAsSelect.provider ( )  :  String
CreateTableUsingAsSelect.tableName ( )  :  String
CreateTableUsingAsSelect.temporary ( )  :  boolean
CreateTableUsingAsSelect.tupled ( ) [static]  :  scala.Function1<scala.Tuple6<String,String,Object,org.apache.spark.sql.SaveMode,scala.collection.immutable.Map<String,String>,org.apache.spark.sql.catalyst.plans.logical.LogicalPlan>,CreateTableUsingAsSelect>

spark-sql_2.10-1.3.0.jar, CreateTempTableUsing.class
package org.apache.spark.sql.sources
CreateTempTableUsing.canEqual ( Object p1 )  :  boolean
CreateTempTableUsing.copy ( String tableName, scala.Option<org.apache.spark.sql.types.StructType> userSpecifiedSchema, String provider, scala.collection.immutable.Map<String,String> options )  :  CreateTempTableUsing
CreateTempTableUsing.CreateTempTableUsing ( String tableName, scala.Option<org.apache.spark.sql.types.StructType> userSpecifiedSchema, String provider, scala.collection.immutable.Map<String,String> options )
CreateTempTableUsing.curried ( ) [static]  :  scala.Function1<String,scala.Function1<scala.Option<org.apache.spark.sql.types.StructType>,scala.Function1<String,scala.Function1<scala.collection.immutable.Map<String,String>,CreateTempTableUsing>>>>
CreateTempTableUsing.equals ( Object p1 )  :  boolean
CreateTempTableUsing.hashCode ( )  :  int
CreateTempTableUsing.options ( )  :  scala.collection.immutable.Map<String,String>
CreateTempTableUsing.productArity ( )  :  int
CreateTempTableUsing.productElement ( int p1 )  :  Object
CreateTempTableUsing.productIterator ( )  :  scala.collection.Iterator<Object>
CreateTempTableUsing.productPrefix ( )  :  String
CreateTempTableUsing.provider ( )  :  String
CreateTempTableUsing.run ( org.apache.spark.sql.SQLContext sqlContext )  :  scala.collection.Seq<scala.runtime.Nothing.>
CreateTempTableUsing.tableName ( )  :  String
CreateTempTableUsing.tupled ( ) [static]  :  scala.Function1<scala.Tuple4<String,scala.Option<org.apache.spark.sql.types.StructType>,String,scala.collection.immutable.Map<String,String>>,CreateTempTableUsing>
CreateTempTableUsing.userSpecifiedSchema ( )  :  scala.Option<org.apache.spark.sql.types.StructType>

spark-sql_2.10-1.3.0.jar, CreateTempTableUsingAsSelect.class
package org.apache.spark.sql.sources
CreateTempTableUsingAsSelect.canEqual ( Object p1 )  :  boolean
CreateTempTableUsingAsSelect.copy ( String tableName, String provider, org.apache.spark.sql.SaveMode mode, scala.collection.immutable.Map<String,String> options, org.apache.spark.sql.catalyst.plans.logical.LogicalPlan query )  :  CreateTempTableUsingAsSelect
CreateTempTableUsingAsSelect.CreateTempTableUsingAsSelect ( String tableName, String provider, org.apache.spark.sql.SaveMode mode, scala.collection.immutable.Map<String,String> options, org.apache.spark.sql.catalyst.plans.logical.LogicalPlan query )
CreateTempTableUsingAsSelect.curried ( ) [static]  :  scala.Function1<String,scala.Function1<String,scala.Function1<org.apache.spark.sql.SaveMode,scala.Function1<scala.collection.immutable.Map<String,String>,scala.Function1<org.apache.spark.sql.catalyst.plans.logical.LogicalPlan,CreateTempTableUsingAsSelect>>>>>
CreateTempTableUsingAsSelect.equals ( Object p1 )  :  boolean
CreateTempTableUsingAsSelect.hashCode ( )  :  int
CreateTempTableUsingAsSelect.mode ( )  :  org.apache.spark.sql.SaveMode
CreateTempTableUsingAsSelect.options ( )  :  scala.collection.immutable.Map<String,String>
CreateTempTableUsingAsSelect.productArity ( )  :  int
CreateTempTableUsingAsSelect.productElement ( int p1 )  :  Object
CreateTempTableUsingAsSelect.productIterator ( )  :  scala.collection.Iterator<Object>
CreateTempTableUsingAsSelect.productPrefix ( )  :  String
CreateTempTableUsingAsSelect.provider ( )  :  String
CreateTempTableUsingAsSelect.query ( )  :  org.apache.spark.sql.catalyst.plans.logical.LogicalPlan
CreateTempTableUsingAsSelect.run ( org.apache.spark.sql.SQLContext sqlContext )  :  scala.collection.Seq<scala.runtime.Nothing.>
CreateTempTableUsingAsSelect.tableName ( )  :  String
CreateTempTableUsingAsSelect.tupled ( ) [static]  :  scala.Function1<scala.Tuple5<String,String,org.apache.spark.sql.SaveMode,scala.collection.immutable.Map<String,String>,org.apache.spark.sql.catalyst.plans.logical.LogicalPlan>,CreateTempTableUsingAsSelect>

spark-sql_2.10-1.3.0.jar, DataFrame.class
package org.apache.spark.sql
DataFrame.agg ( java.util.Map<String,String> exprs )  :  DataFrame
DataFrame.agg ( Column expr, Column... exprs )  :  DataFrame
DataFrame.agg ( Column expr, scala.collection.Seq<Column> exprs )  :  DataFrame
DataFrame.agg ( scala.collection.immutable.Map<String,String> exprs )  :  DataFrame
DataFrame.agg ( scala.Tuple2<String,String> aggExpr, scala.collection.Seq<scala.Tuple2<String,String>> aggExprs )  :  DataFrame
DataFrame.apply ( String colName )  :  Column
DataFrame.as ( scala.Symbol alias )  :  DataFrame
DataFrame.as ( String alias )  :  DataFrame
DataFrame.cache ( )  :  DataFrame
DataFrame.cache ( )  :  RDDApi
DataFrame.col ( String colName )  :  Column
DataFrame.collect ( )  :  Object
DataFrame.collect ( )  :  Row[ ]
DataFrame.collectAsList ( )  :  java.util.List<Row>
DataFrame.columns ( )  :  String[ ]
DataFrame.count ( )  :  long
DataFrame.createJDBCTable ( String url, String table, boolean allowExisting )  :  void
DataFrame.DataFrame ( SQLContext sqlContext, catalyst.plans.logical.LogicalPlan logicalPlan )
DataFrame.DataFrame ( SQLContext sqlContext, SQLContext.QueryExecution queryExecution )
DataFrame.distinct ( )  :  DataFrame
DataFrame.dtypes ( )  :  scala.Tuple2<String,String>[ ]
DataFrame.except ( DataFrame other )  :  DataFrame
DataFrame.explain ( )  :  void
DataFrame.explain ( boolean extended )  :  void
DataFrame.explode ( scala.collection.Seq<Column> input, scala.Function1<Row,scala.collection.TraversableOnce<A>> f, scala.reflect.api.TypeTags.TypeTag<A> p3 )  :  DataFrame
DataFrame.explode ( String inputColumn, String outputColumn, scala.Function1<A,scala.collection.TraversableOnce<B>> f, scala.reflect.api.TypeTags.TypeTag<B> p4 )  :  DataFrame
DataFrame.filter ( Column condition )  :  DataFrame
DataFrame.filter ( String conditionExpr )  :  DataFrame
DataFrame.first ( )  :  Object
DataFrame.first ( )  :  Row
DataFrame.flatMap ( scala.Function1<Row,scala.collection.TraversableOnce<R>> f, scala.reflect.ClassTag<R> p2 )  :  org.apache.spark.rdd.RDD<R>
DataFrame.foreach ( scala.Function1<Row,scala.runtime.BoxedUnit> f )  :  void
DataFrame.foreachPartition ( scala.Function1<scala.collection.Iterator<Row>,scala.runtime.BoxedUnit> f )  :  void
DataFrame.groupBy ( Column... cols )  :  GroupedData
DataFrame.groupBy ( scala.collection.Seq<Column> cols )  :  GroupedData
DataFrame.groupBy ( String col1, scala.collection.Seq<String> cols )  :  GroupedData
DataFrame.groupBy ( String col1, String... cols )  :  GroupedData
DataFrame.head ( )  :  Row
DataFrame.head ( int n )  :  Row[ ]
DataFrame.insertInto ( String tableName )  :  void
DataFrame.insertInto ( String tableName, boolean overwrite )  :  void
DataFrame.insertIntoJDBC ( String url, String table, boolean overwrite )  :  void
DataFrame.intersect ( DataFrame other )  :  DataFrame
DataFrame.isLocal ( )  :  boolean
DataFrame.javaRDD ( )  :  org.apache.spark.api.java.JavaRDD<Row>
DataFrame.javaToPython ( )  :  org.apache.spark.api.java.JavaRDD<byte[ ]>
DataFrame.join ( DataFrame right )  :  DataFrame
DataFrame.join ( DataFrame right, Column joinExprs )  :  DataFrame
DataFrame.join ( DataFrame right, Column joinExprs, String joinType )  :  DataFrame
DataFrame.limit ( int n )  :  DataFrame
DataFrame.logicalPlan ( )  :  catalyst.plans.logical.LogicalPlan
DataFrame.map ( scala.Function1<Row,R> f, scala.reflect.ClassTag<R> p2 )  :  org.apache.spark.rdd.RDD<R>
DataFrame.mapPartitions ( scala.Function1<scala.collection.Iterator<Row>,scala.collection.Iterator<R>> f, scala.reflect.ClassTag<R> p2 )  :  org.apache.spark.rdd.RDD<R>
DataFrame.numericColumns ( )  :  scala.collection.Seq<catalyst.expressions.Expression>
DataFrame.orderBy ( Column... sortExprs )  :  DataFrame
DataFrame.orderBy ( scala.collection.Seq<Column> sortExprs )  :  DataFrame
DataFrame.orderBy ( String sortCol, scala.collection.Seq<String> sortCols )  :  DataFrame
DataFrame.orderBy ( String sortCol, String... sortCols )  :  DataFrame
DataFrame.persist ( )  :  DataFrame
DataFrame.persist ( )  :  RDDApi
DataFrame.persist ( org.apache.spark.storage.StorageLevel newLevel )  :  DataFrame
DataFrame.persist ( org.apache.spark.storage.StorageLevel newLevel )  :  RDDApi
DataFrame.printSchema ( )  :  void
DataFrame.queryExecution ( )  :  SQLContext.QueryExecution
DataFrame.rdd ( )  :  org.apache.spark.rdd.RDD<Row>
DataFrame.registerTempTable ( String tableName )  :  void
DataFrame.repartition ( int numPartitions )  :  DataFrame
DataFrame.resolve ( String colName )  :  catalyst.expressions.NamedExpression
DataFrame.sample ( boolean withReplacement, double fraction )  :  DataFrame
DataFrame.sample ( boolean withReplacement, double fraction, long seed )  :  DataFrame
DataFrame.save ( String path )  :  void
DataFrame.save ( String path, SaveMode mode )  :  void
DataFrame.save ( String path, String source )  :  void
DataFrame.save ( String path, String source, SaveMode mode )  :  void
DataFrame.save ( String source, SaveMode mode, java.util.Map<String,String> options )  :  void
DataFrame.save ( String source, SaveMode mode, scala.collection.immutable.Map<String,String> options )  :  void
DataFrame.saveAsParquetFile ( String path )  :  void
DataFrame.saveAsTable ( String tableName )  :  void
DataFrame.saveAsTable ( String tableName, SaveMode mode )  :  void
DataFrame.saveAsTable ( String tableName, String source )  :  void
DataFrame.saveAsTable ( String tableName, String source, SaveMode mode )  :  void
DataFrame.saveAsTable ( String tableName, String source, SaveMode mode, java.util.Map<String,String> options )  :  void
DataFrame.saveAsTable ( String tableName, String source, SaveMode mode, scala.collection.immutable.Map<String,String> options )  :  void
DataFrame.schema ( )  :  types.StructType
DataFrame.select ( Column... cols )  :  DataFrame
DataFrame.select ( scala.collection.Seq<Column> cols )  :  DataFrame
DataFrame.select ( String col, scala.collection.Seq<String> cols )  :  DataFrame
DataFrame.select ( String col, String... cols )  :  DataFrame
DataFrame.selectExpr ( scala.collection.Seq<String> exprs )  :  DataFrame
DataFrame.selectExpr ( String... exprs )  :  DataFrame
DataFrame.show ( )  :  void
DataFrame.show ( int numRows )  :  void
DataFrame.showString ( int numRows )  :  String
DataFrame.sort ( Column... sortExprs )  :  DataFrame
DataFrame.sort ( scala.collection.Seq<Column> sortExprs )  :  DataFrame
DataFrame.sort ( String sortCol, scala.collection.Seq<String> sortCols )  :  DataFrame
DataFrame.sort ( String sortCol, String... sortCols )  :  DataFrame
DataFrame.sqlContext ( )  :  SQLContext
DataFrame.take ( int n )  :  Object
DataFrame.take ( int n )  :  Row[ ]
DataFrame.toDF ( )  :  DataFrame
DataFrame.toDF ( scala.collection.Seq<String> colNames )  :  DataFrame
DataFrame.toDF ( String... colNames )  :  DataFrame
DataFrame.toJavaRDD ( )  :  org.apache.spark.api.java.JavaRDD<Row>
DataFrame.toJSON ( )  :  org.apache.spark.rdd.RDD<String>
DataFrame.toString ( )  :  String
DataFrame.unionAll ( DataFrame other )  :  DataFrame
DataFrame.unpersist ( )  :  DataFrame
DataFrame.unpersist ( )  :  RDDApi
DataFrame.unpersist ( boolean blocking )  :  DataFrame
DataFrame.unpersist ( boolean blocking )  :  RDDApi
DataFrame.where ( Column condition )  :  DataFrame
DataFrame.withColumn ( String colName, Column col )  :  DataFrame
DataFrame.withColumnRenamed ( String existingName, String newName )  :  DataFrame

spark-sql_2.10-1.3.0.jar, DataFrameHolder.class
package org.apache.spark.sql
DataFrameHolder.andThen ( scala.Function1<DataFrameHolder,A> p1 ) [static]  :  scala.Function1<DataFrame,A>
DataFrameHolder.canEqual ( Object p1 )  :  boolean
DataFrameHolder.compose ( scala.Function1<A,DataFrame> p1 ) [static]  :  scala.Function1<A,DataFrameHolder>
DataFrameHolder.copy ( DataFrame df )  :  DataFrameHolder
DataFrameHolder.DataFrameHolder ( DataFrame df )
DataFrameHolder.df ( )  :  DataFrame
DataFrameHolder.equals ( Object p1 )  :  boolean
DataFrameHolder.hashCode ( )  :  int
DataFrameHolder.productArity ( )  :  int
DataFrameHolder.productElement ( int p1 )  :  Object
DataFrameHolder.productIterator ( )  :  scala.collection.Iterator<Object>
DataFrameHolder.productPrefix ( )  :  String
DataFrameHolder.toDF ( )  :  DataFrame
DataFrameHolder.toDF ( scala.collection.Seq<String> colNames )  :  DataFrame
DataFrameHolder.toString ( )  :  String

spark-sql_2.10-1.3.0.jar, DDLParser.class
package org.apache.spark.sql.sources
DDLParser.apply ( String input, boolean exceptionOnError )  :  scala.Option<org.apache.spark.sql.catalyst.plans.logical.LogicalPlan>
DDLParser.DDLParser ( scala.Function1<String,org.apache.spark.sql.catalyst.plans.logical.LogicalPlan> parseQuery )

spark-sql_2.10-1.3.0.jar, DescribeCommand.class
package org.apache.spark.sql.execution
DescribeCommand.copy ( SparkPlan child, scala.collection.Seq<org.apache.spark.sql.catalyst.expressions.Attribute> output, boolean isExtended )  :  DescribeCommand
DescribeCommand.curried ( ) [static]  :  scala.Function1<SparkPlan,scala.Function1<scala.collection.Seq<org.apache.spark.sql.catalyst.expressions.Attribute>,scala.Function1<Object,DescribeCommand>>>
DescribeCommand.DescribeCommand ( SparkPlan child, scala.collection.Seq<org.apache.spark.sql.catalyst.expressions.Attribute> output, boolean isExtended )
DescribeCommand.isExtended ( )  :  boolean
DescribeCommand.run ( org.apache.spark.sql.SQLContext sqlContext )  :  scala.collection.Seq<org.apache.spark.sql.Row>
DescribeCommand.tupled ( ) [static]  :  scala.Function1<scala.Tuple3<SparkPlan,scala.collection.Seq<org.apache.spark.sql.catalyst.expressions.Attribute>,Object>,DescribeCommand>
package org.apache.spark.sql.sources
DescribeCommand.canEqual ( Object p1 )  :  boolean
DescribeCommand.copy ( org.apache.spark.sql.catalyst.plans.logical.LogicalPlan table, boolean isExtended )  :  DescribeCommand
DescribeCommand.curried ( ) [static]  :  scala.Function1<org.apache.spark.sql.catalyst.plans.logical.LogicalPlan,scala.Function1<Object,DescribeCommand>>
DescribeCommand.DescribeCommand ( org.apache.spark.sql.catalyst.plans.logical.LogicalPlan table, boolean isExtended )
DescribeCommand.equals ( Object p1 )  :  boolean
DescribeCommand.hashCode ( )  :  int
DescribeCommand.isExtended ( )  :  boolean
DescribeCommand.output ( )  :  scala.collection.Seq<org.apache.spark.sql.catalyst.expressions.AttributeReference>
DescribeCommand.productArity ( )  :  int
DescribeCommand.productElement ( int p1 )  :  Object
DescribeCommand.productIterator ( )  :  scala.collection.Iterator<Object>
DescribeCommand.productPrefix ( )  :  String
DescribeCommand.table ( )  :  org.apache.spark.sql.catalyst.plans.logical.LogicalPlan
DescribeCommand.tupled ( ) [static]  :  scala.Function1<scala.Tuple2<org.apache.spark.sql.catalyst.plans.logical.LogicalPlan,Object>,DescribeCommand>

spark-sql_2.10-1.3.0.jar, DriverQuirks.class
package org.apache.spark.sql.jdbc
DriverQuirks.DriverQuirks ( )
DriverQuirks.get ( String p1 ) [static]  :  DriverQuirks
DriverQuirks.getCatalystType ( int p1, String p2, int p3, org.apache.spark.sql.types.MetadataBuilder p4 ) [abstract]  :  org.apache.spark.sql.types.DataType
DriverQuirks.getJDBCType ( org.apache.spark.sql.types.DataType p1 ) [abstract]  :  scala.Tuple2<String,scala.Option<Object>>

spark-sql_2.10-1.3.0.jar, Encoder<T>.class
package org.apache.spark.sql.columnar.compression
Encoder<T>.gatherCompressibilityStats ( org.apache.spark.sql.Row p1, int p2 ) [abstract]  :  void

spark-sql_2.10-1.3.0.jar, EvaluatePython.class
package org.apache.spark.sql.execution
EvaluatePython.fromJava ( Object p1, org.apache.spark.sql.types.DataType p2 ) [static]  :  Object
EvaluatePython.references ( )  :  org.apache.spark.sql.catalyst.expressions.AttributeSet
EvaluatePython.rowToArray ( org.apache.spark.sql.Row p1, scala.collection.Seq<org.apache.spark.sql.types.DataType> p2 ) [static]  :  Object[ ]
EvaluatePython.toJava ( Object p1, org.apache.spark.sql.types.DataType p2 ) [static]  :  Object

spark-sql_2.10-1.3.0.jar, ExecutedCommand.class
package org.apache.spark.sql.execution
ExecutedCommand.executeTake ( int limit )  :  org.apache.spark.sql.Row[ ]

spark-sql_2.10-1.3.0.jar, Expand.class
package org.apache.spark.sql.execution
Expand.canEqual ( Object p1 )  :  boolean
Expand.child ( )  :  org.apache.spark.sql.catalyst.trees.TreeNode
Expand.child ( )  :  SparkPlan
Expand.children ( )  :  scala.collection.immutable.List<SparkPlan>
Expand.children ( )  :  scala.collection.Seq
Expand.copy ( scala.collection.Seq<org.apache.spark.sql.catalyst.expressions.GroupExpression> projections, scala.collection.Seq<org.apache.spark.sql.catalyst.expressions.Attribute> output, SparkPlan child )  :  Expand
Expand.curried ( ) [static]  :  scala.Function1<scala.collection.Seq<org.apache.spark.sql.catalyst.expressions.GroupExpression>,scala.Function1<scala.collection.Seq<org.apache.spark.sql.catalyst.expressions.Attribute>,scala.Function1<SparkPlan,Expand>>>
Expand.equals ( Object p1 )  :  boolean
Expand.execute ( )  :  org.apache.spark.rdd.RDD<org.apache.spark.sql.Row>
Expand.Expand ( scala.collection.Seq<org.apache.spark.sql.catalyst.expressions.GroupExpression> projections, scala.collection.Seq<org.apache.spark.sql.catalyst.expressions.Attribute> output, SparkPlan child )
Expand.hashCode ( )  :  int
Expand.output ( )  :  scala.collection.Seq<org.apache.spark.sql.catalyst.expressions.Attribute>
Expand.outputPartitioning ( )  :  org.apache.spark.sql.catalyst.plans.physical.Partitioning
Expand.productArity ( )  :  int
Expand.productElement ( int p1 )  :  Object
Expand.productIterator ( )  :  scala.collection.Iterator<Object>
Expand.productPrefix ( )  :  String
Expand.projections ( )  :  scala.collection.Seq<org.apache.spark.sql.catalyst.expressions.GroupExpression>
Expand.tupled ( ) [static]  :  scala.Function1<scala.Tuple3<scala.collection.Seq<org.apache.spark.sql.catalyst.expressions.GroupExpression>,scala.collection.Seq<org.apache.spark.sql.catalyst.expressions.Attribute>,SparkPlan>,Expand>

spark-sql_2.10-1.3.0.jar, ExperimentalMethods.class
package org.apache.spark.sql
ExperimentalMethods.ExperimentalMethods ( SQLContext sqlContext )
ExperimentalMethods.extraStrategies ( )  :  scala.collection.Seq<catalyst.planning.GenericStrategy<execution.SparkPlan>>

spark-sql_2.10-1.3.0.jar, ExplainCommand.class
package org.apache.spark.sql.execution
ExplainCommand.copy ( org.apache.spark.sql.catalyst.plans.logical.LogicalPlan logicalPlan, scala.collection.Seq<org.apache.spark.sql.catalyst.expressions.Attribute> output, boolean extended )  :  ExplainCommand
ExplainCommand.curried ( ) [static]  :  scala.Function1<org.apache.spark.sql.catalyst.plans.logical.LogicalPlan,scala.Function1<scala.collection.Seq<org.apache.spark.sql.catalyst.expressions.Attribute>,scala.Function1<Object,ExplainCommand>>>
ExplainCommand.ExplainCommand ( org.apache.spark.sql.catalyst.plans.logical.LogicalPlan logicalPlan, scala.collection.Seq<org.apache.spark.sql.catalyst.expressions.Attribute> output, boolean extended )
ExplainCommand.run ( org.apache.spark.sql.SQLContext sqlContext )  :  scala.collection.Seq<org.apache.spark.sql.Row>
ExplainCommand.tupled ( ) [static]  :  scala.Function1<scala.Tuple3<org.apache.spark.sql.catalyst.plans.logical.LogicalPlan,scala.collection.Seq<org.apache.spark.sql.catalyst.expressions.Attribute>,Object>,ExplainCommand>

spark-sql_2.10-1.3.0.jar, GroupedData.class
package org.apache.spark.sql
GroupedData.agg ( java.util.Map<String,String> exprs )  :  DataFrame
GroupedData.agg ( Column expr, scala.collection.Seq<Column> exprs )  :  DataFrame
GroupedData.agg ( scala.collection.immutable.Map<String,String> exprs )  :  DataFrame
GroupedData.agg ( scala.Tuple2<String,String> aggExpr, scala.collection.Seq<scala.Tuple2<String,String>> aggExprs )  :  DataFrame
GroupedData.count ( )  :  DataFrame
GroupedData.GroupedData ( DataFrame df, scala.collection.Seq<catalyst.expressions.Expression> groupingExprs )

spark-sql_2.10-1.3.0.jar, HashedRelation.class
package org.apache.spark.sql.execution.joins
HashedRelation.get ( org.apache.spark.sql.Row p1 ) [abstract]  :  org.apache.spark.util.collection.CompactBuffer<org.apache.spark.sql.Row>

spark-sql_2.10-1.3.0.jar, HashOuterJoin.class
package org.apache.spark.sql.execution.joins
HashOuterJoin.HashOuterJoin..boundCondition ( )  :  scala.Function1<org.apache.spark.sql.Row,Object>
HashOuterJoin.HashOuterJoin..fullOuterIterator ( org.apache.spark.sql.Row key, scala.collection.Iterable<org.apache.spark.sql.Row> leftIter, scala.collection.Iterable<org.apache.spark.sql.Row> rightIter, org.apache.spark.sql.catalyst.expressions.JoinedRow joinedRow )  :  scala.collection.Iterator<org.apache.spark.sql.Row>
HashOuterJoin.HashOuterJoin..leftNullRow ( )  :  org.apache.spark.sql.catalyst.expressions.GenericRow
HashOuterJoin.HashOuterJoin..leftOuterIterator ( org.apache.spark.sql.Row key, org.apache.spark.sql.catalyst.expressions.JoinedRow joinedRow, scala.collection.Iterable<org.apache.spark.sql.Row> rightIter )  :  scala.collection.Iterator<org.apache.spark.sql.Row>
HashOuterJoin.HashOuterJoin..rightNullRow ( )  :  org.apache.spark.sql.catalyst.expressions.GenericRow
HashOuterJoin.HashOuterJoin..rightOuterIterator ( org.apache.spark.sql.Row key, scala.collection.Iterable<org.apache.spark.sql.Row> leftIter, org.apache.spark.sql.catalyst.expressions.JoinedRow joinedRow )  :  scala.collection.Iterator<org.apache.spark.sql.Row>

spark-sql_2.10-1.3.0.jar, InsertableRelation.class
package org.apache.spark.sql.sources
InsertableRelation.insert ( org.apache.spark.sql.DataFrame p1, boolean p2 ) [abstract]  :  void

spark-sql_2.10-1.3.0.jar, InsertIntoDataSource.class
package org.apache.spark.sql.sources
InsertIntoDataSource.canEqual ( Object p1 )  :  boolean
InsertIntoDataSource.copy ( LogicalRelation logicalRelation, org.apache.spark.sql.catalyst.plans.logical.LogicalPlan query, boolean overwrite )  :  InsertIntoDataSource
InsertIntoDataSource.curried ( ) [static]  :  scala.Function1<LogicalRelation,scala.Function1<org.apache.spark.sql.catalyst.plans.logical.LogicalPlan,scala.Function1<Object,InsertIntoDataSource>>>
InsertIntoDataSource.equals ( Object p1 )  :  boolean
InsertIntoDataSource.hashCode ( )  :  int
InsertIntoDataSource.InsertIntoDataSource ( LogicalRelation logicalRelation, org.apache.spark.sql.catalyst.plans.logical.LogicalPlan query, boolean overwrite )
InsertIntoDataSource.logicalRelation ( )  :  LogicalRelation
InsertIntoDataSource.overwrite ( )  :  boolean
InsertIntoDataSource.productArity ( )  :  int
InsertIntoDataSource.productElement ( int p1 )  :  Object
InsertIntoDataSource.productIterator ( )  :  scala.collection.Iterator<Object>
InsertIntoDataSource.productPrefix ( )  :  String
InsertIntoDataSource.query ( )  :  org.apache.spark.sql.catalyst.plans.logical.LogicalPlan
InsertIntoDataSource.run ( org.apache.spark.sql.SQLContext sqlContext )  :  scala.collection.Seq<org.apache.spark.sql.Row>
InsertIntoDataSource.tupled ( ) [static]  :  scala.Function1<scala.Tuple3<LogicalRelation,org.apache.spark.sql.catalyst.plans.logical.LogicalPlan,Object>,InsertIntoDataSource>

spark-sql_2.10-1.3.0.jar, IntColumnStats.class
package org.apache.spark.sql.columnar
IntColumnStats.collectedStatistics ( )  :  org.apache.spark.sql.Row
IntColumnStats.gatherStats ( org.apache.spark.sql.Row row, int ordinal )  :  void

spark-sql_2.10-1.3.0.jar, IsNotNull.class
package org.apache.spark.sql.sources
IsNotNull.andThen ( scala.Function1<IsNotNull,A> p1 ) [static]  :  scala.Function1<String,A>
IsNotNull.attribute ( )  :  String
IsNotNull.canEqual ( Object p1 )  :  boolean
IsNotNull.compose ( scala.Function1<A,String> p1 ) [static]  :  scala.Function1<A,IsNotNull>
IsNotNull.copy ( String attribute )  :  IsNotNull
IsNotNull.equals ( Object p1 )  :  boolean
IsNotNull.hashCode ( )  :  int
IsNotNull.IsNotNull ( String attribute )
IsNotNull.productArity ( )  :  int
IsNotNull.productElement ( int p1 )  :  Object
IsNotNull.productIterator ( )  :  scala.collection.Iterator<Object>
IsNotNull.productPrefix ( )  :  String
IsNotNull.toString ( )  :  String

spark-sql_2.10-1.3.0.jar, IsNull.class
package org.apache.spark.sql.sources
IsNull.andThen ( scala.Function1<IsNull,A> p1 ) [static]  :  scala.Function1<String,A>
IsNull.attribute ( )  :  String
IsNull.canEqual ( Object p1 )  :  boolean
IsNull.compose ( scala.Function1<A,String> p1 ) [static]  :  scala.Function1<A,IsNull>
IsNull.copy ( String attribute )  :  IsNull
IsNull.equals ( Object p1 )  :  boolean
IsNull.hashCode ( )  :  int
IsNull.IsNull ( String attribute )
IsNull.productArity ( )  :  int
IsNull.productElement ( int p1 )  :  Object
IsNull.productIterator ( )  :  scala.collection.Iterator<Object>
IsNull.productPrefix ( )  :  String
IsNull.toString ( )  :  String

spark-sql_2.10-1.3.0.jar, JavaBigDecimalSerializer.class
package org.apache.spark.sql.execution
JavaBigDecimalSerializer.JavaBigDecimalSerializer ( )

spark-sql_2.10-1.3.0.jar, JDBCPartition.class
package org.apache.spark.sql.jdbc
JDBCPartition.canEqual ( Object p1 )  :  boolean
JDBCPartition.copy ( String whereClause, int idx )  :  JDBCPartition
JDBCPartition.curried ( ) [static]  :  scala.Function1<String,scala.Function1<Object,JDBCPartition>>
JDBCPartition.equals ( Object p1 )  :  boolean
JDBCPartition.hashCode ( )  :  int
JDBCPartition.idx ( )  :  int
JDBCPartition.index ( )  :  int
JDBCPartition.JDBCPartition ( String whereClause, int idx )
JDBCPartition.productArity ( )  :  int
JDBCPartition.productElement ( int p1 )  :  Object
JDBCPartition.productIterator ( )  :  scala.collection.Iterator<Object>
JDBCPartition.productPrefix ( )  :  String
JDBCPartition.toString ( )  :  String
JDBCPartition.tupled ( ) [static]  :  scala.Function1<scala.Tuple2<String,Object>,JDBCPartition>
JDBCPartition.whereClause ( )  :  String

spark-sql_2.10-1.3.0.jar, JDBCPartitioningInfo.class
package org.apache.spark.sql.jdbc
JDBCPartitioningInfo.canEqual ( Object p1 )  :  boolean
JDBCPartitioningInfo.column ( )  :  String
JDBCPartitioningInfo.copy ( String column, long lowerBound, long upperBound, int numPartitions )  :  JDBCPartitioningInfo
JDBCPartitioningInfo.curried ( ) [static]  :  scala.Function1<String,scala.Function1<Object,scala.Function1<Object,scala.Function1<Object,JDBCPartitioningInfo>>>>
JDBCPartitioningInfo.equals ( Object p1 )  :  boolean
JDBCPartitioningInfo.hashCode ( )  :  int
JDBCPartitioningInfo.JDBCPartitioningInfo ( String column, long lowerBound, long upperBound, int numPartitions )
JDBCPartitioningInfo.lowerBound ( )  :  long
JDBCPartitioningInfo.numPartitions ( )  :  int
JDBCPartitioningInfo.productArity ( )  :  int
JDBCPartitioningInfo.productElement ( int p1 )  :  Object
JDBCPartitioningInfo.productIterator ( )  :  scala.collection.Iterator<Object>
JDBCPartitioningInfo.productPrefix ( )  :  String
JDBCPartitioningInfo.toString ( )  :  String
JDBCPartitioningInfo.tupled ( ) [static]  :  scala.Function1<scala.Tuple4<String,Object,Object,Object>,JDBCPartitioningInfo>
JDBCPartitioningInfo.upperBound ( )  :  long

spark-sql_2.10-1.3.0.jar, JDBCRDD.class
package org.apache.spark.sql.jdbc
JDBCRDD.BinaryConversion ( )  :  JDBCRDD.BinaryConversion.
JDBCRDD.BinaryLongConversion ( )  :  JDBCRDD.BinaryLongConversion.
JDBCRDD.BooleanConversion ( )  :  JDBCRDD.BooleanConversion.
JDBCRDD.compute ( org.apache.spark.Partition thePart, org.apache.spark.TaskContext context )  :  Object
JDBCRDD.DateConversion ( )  :  JDBCRDD.DateConversion.
JDBCRDD.DecimalConversion ( )  :  JDBCRDD.DecimalConversion.
JDBCRDD.DoubleConversion ( )  :  JDBCRDD.DoubleConversion.
JDBCRDD.FloatConversion ( )  :  JDBCRDD.FloatConversion.
JDBCRDD.getConnector ( String p1, String p2 ) [static]  :  scala.Function0<java.sql.Connection>
JDBCRDD.getConversions ( org.apache.spark.sql.types.StructType schema )  :  JDBCRDD.JDBCConversion[ ]
JDBCRDD.getPartitions ( )  :  org.apache.spark.Partition[ ]
JDBCRDD.IntegerConversion ( )  :  JDBCRDD.IntegerConversion.
JDBCRDD.JDBCRDD ( org.apache.spark.SparkContext sc, scala.Function0<java.sql.Connection> getConnection, org.apache.spark.sql.types.StructType schema, String fqTable, String[ ] columns, org.apache.spark.sql.sources.Filter[ ] filters, org.apache.spark.Partition[ ] partitions )
JDBCRDD.LongConversion ( )  :  JDBCRDD.LongConversion.
JDBCRDD.JDBCRDD..columnList ( )  :  String
JDBCRDD.JDBCRDD..compileFilter ( org.apache.spark.sql.sources.Filter f )  :  String
JDBCRDD.JDBCRDD..getWhereClause ( JDBCPartition part )  :  String
JDBCRDD.resolveTable ( String p1, String p2 ) [static]  :  org.apache.spark.sql.types.StructType
JDBCRDD.scanTable ( org.apache.spark.SparkContext p1, org.apache.spark.sql.types.StructType p2, String p3, String p4, String p5, String[ ] p6, org.apache.spark.sql.sources.Filter[ ] p7, org.apache.spark.Partition[ ] p8 ) [static]  :  org.apache.spark.rdd.RDD<org.apache.spark.sql.Row>
JDBCRDD.StringConversion ( )  :  JDBCRDD.StringConversion.
JDBCRDD.TimestampConversion ( )  :  JDBCRDD.TimestampConversion.

spark-sql_2.10-1.3.0.jar, JDBCRelation.class
package org.apache.spark.sql.jdbc
JDBCRelation.buildScan ( String[ ] requiredColumns, org.apache.spark.sql.sources.Filter[ ] filters )  :  org.apache.spark.rdd.RDD<org.apache.spark.sql.Row>
JDBCRelation.canEqual ( Object p1 )  :  boolean
JDBCRelation.columnPartition ( JDBCPartitioningInfo p1 ) [static]  :  org.apache.spark.Partition[ ]
JDBCRelation.copy ( String url, String table, org.apache.spark.Partition[ ] parts, org.apache.spark.sql.SQLContext sqlContext )  :  JDBCRelation
JDBCRelation.equals ( Object p1 )  :  boolean
JDBCRelation.hashCode ( )  :  int
JDBCRelation.JDBCRelation ( String url, String table, org.apache.spark.Partition[ ] parts, org.apache.spark.sql.SQLContext sqlContext )
JDBCRelation.parts ( )  :  org.apache.spark.Partition[ ]
JDBCRelation.productArity ( )  :  int
JDBCRelation.productElement ( int p1 )  :  Object
JDBCRelation.productIterator ( )  :  scala.collection.Iterator<Object>
JDBCRelation.productPrefix ( )  :  String
JDBCRelation.schema ( )  :  org.apache.spark.sql.types.StructType
JDBCRelation.sqlContext ( )  :  org.apache.spark.sql.SQLContext
JDBCRelation.table ( )  :  String
JDBCRelation.toString ( )  :  String
JDBCRelation.url ( )  :  String

spark-sql_2.10-1.3.0.jar, JSONRelation.class
package org.apache.spark.sql.json
JSONRelation.copy ( String path, double samplingRatio, scala.Option<org.apache.spark.sql.types.StructType> userSpecifiedSchema, org.apache.spark.sql.SQLContext sqlContext )  :  JSONRelation
JSONRelation.insert ( org.apache.spark.sql.DataFrame data, boolean overwrite )  :  void
JSONRelation.JSONRelation ( String path, double samplingRatio, scala.Option<org.apache.spark.sql.types.StructType> userSpecifiedSchema, org.apache.spark.sql.SQLContext sqlContext )
JSONRelation.JSONRelation..baseRDD ( )  :  org.apache.spark.rdd.RDD<String>
JSONRelation.path ( )  :  String
JSONRelation.userSpecifiedSchema ( )  :  scala.Option<org.apache.spark.sql.types.StructType>

spark-sql_2.10-1.3.0.jar, LocalTableScan.class
package org.apache.spark.sql.execution
LocalTableScan.canEqual ( Object p1 )  :  boolean
LocalTableScan.children ( )  :  scala.collection.immutable.Nil.
LocalTableScan.children ( )  :  scala.collection.Seq
LocalTableScan.copy ( scala.collection.Seq<org.apache.spark.sql.catalyst.expressions.Attribute> output, scala.collection.Seq<org.apache.spark.sql.Row> rows )  :  LocalTableScan
LocalTableScan.curried ( ) [static]  :  scala.Function1<scala.collection.Seq<org.apache.spark.sql.catalyst.expressions.Attribute>,scala.Function1<scala.collection.Seq<org.apache.spark.sql.Row>,LocalTableScan>>
LocalTableScan.equals ( Object p1 )  :  boolean
LocalTableScan.execute ( )  :  org.apache.spark.rdd.RDD<org.apache.spark.sql.Row>
LocalTableScan.executeCollect ( )  :  org.apache.spark.sql.Row[ ]
LocalTableScan.executeTake ( int limit )  :  org.apache.spark.sql.Row[ ]
LocalTableScan.hashCode ( )  :  int
LocalTableScan.LocalTableScan ( scala.collection.Seq<org.apache.spark.sql.catalyst.expressions.Attribute> output, scala.collection.Seq<org.apache.spark.sql.Row> rows )
LocalTableScan.output ( )  :  scala.collection.Seq<org.apache.spark.sql.catalyst.expressions.Attribute>
LocalTableScan.productArity ( )  :  int
LocalTableScan.productElement ( int p1 )  :  Object
LocalTableScan.productIterator ( )  :  scala.collection.Iterator<Object>
LocalTableScan.productPrefix ( )  :  String
LocalTableScan.rows ( )  :  scala.collection.Seq<org.apache.spark.sql.Row>
LocalTableScan.tupled ( ) [static]  :  scala.Function1<scala.Tuple2<scala.collection.Seq<org.apache.spark.sql.catalyst.expressions.Attribute>,scala.collection.Seq<org.apache.spark.sql.Row>>,LocalTableScan>

spark-sql_2.10-1.3.0.jar, LogicalLocalTable.class
package org.apache.spark.sql.execution
LogicalLocalTable.canEqual ( Object p1 )  :  boolean
LogicalLocalTable.children ( )  :  scala.collection.immutable.Nil.
LogicalLocalTable.children ( )  :  scala.collection.Seq
LogicalLocalTable.copy ( scala.collection.Seq<org.apache.spark.sql.catalyst.expressions.Attribute> output, scala.collection.Seq<org.apache.spark.sql.Row> rows, org.apache.spark.sql.SQLContext sqlContext )  :  LogicalLocalTable
LogicalLocalTable.equals ( Object p1 )  :  boolean
LogicalLocalTable.hashCode ( )  :  int
LogicalLocalTable.LogicalLocalTable ( scala.collection.Seq<org.apache.spark.sql.catalyst.expressions.Attribute> output, scala.collection.Seq<org.apache.spark.sql.Row> rows, org.apache.spark.sql.SQLContext sqlContext )
LogicalLocalTable.newInstance ( )  :  org.apache.spark.sql.catalyst.analysis.MultiInstanceRelation
LogicalLocalTable.newInstance ( )  :  LogicalLocalTable
LogicalLocalTable.output ( )  :  scala.collection.Seq<org.apache.spark.sql.catalyst.expressions.Attribute>
LogicalLocalTable.productArity ( )  :  int
LogicalLocalTable.productElement ( int p1 )  :  Object
LogicalLocalTable.productIterator ( )  :  scala.collection.Iterator<Object>
LogicalLocalTable.productPrefix ( )  :  String
LogicalLocalTable.rows ( )  :  scala.collection.Seq<org.apache.spark.sql.Row>
LogicalLocalTable.sameResult ( org.apache.spark.sql.catalyst.plans.logical.LogicalPlan plan )  :  boolean
LogicalLocalTable.statistics ( )  :  org.apache.spark.sql.catalyst.plans.logical.Statistics

spark-sql_2.10-1.3.0.jar, MySQLQuirks.class
package org.apache.spark.sql.jdbc
MySQLQuirks.MySQLQuirks ( )

spark-sql_2.10-1.3.0.jar, NanoTime.class
package org.apache.spark.sql.parquet.timestamp
NanoTime.getJulianDay ( )  :  int
NanoTime.getTimeOfDayNanos ( )  :  long
NanoTime.NanoTime ( )
NanoTime.set ( int julianDay, long timeOfDayNanos )  :  NanoTime
NanoTime.toBinary ( )  :  parquet.io.api.Binary

spark-sql_2.10-1.3.0.jar, NativeColumnType<T>.class
package org.apache.spark.sql.columnar
NativeColumnType<T>.dataType ( )  :  T
NativeColumnType<T>.NativeColumnType ( T dataType, int typeId, int defaultSize )  :  public

spark-sql_2.10-1.3.0.jar, NoQuirks.class
package org.apache.spark.sql.jdbc
NoQuirks.NoQuirks ( )

spark-sql_2.10-1.3.0.jar, Not.class
package org.apache.spark.sql.sources
Not.andThen ( scala.Function1<Not,A> p1 ) [static]  :  scala.Function1<Filter,A>
Not.canEqual ( Object p1 )  :  boolean
Not.child ( )  :  Filter
Not.compose ( scala.Function1<A,Filter> p1 ) [static]  :  scala.Function1<A,Not>
Not.copy ( Filter child )  :  Not
Not.equals ( Object p1 )  :  boolean
Not.hashCode ( )  :  int
Not.Not ( Filter child )
Not.productArity ( )  :  int
Not.productElement ( int p1 )  :  Object
Not.productIterator ( )  :  scala.collection.Iterator<Object>
Not.productPrefix ( )  :  String
Not.toString ( )  :  String

spark-sql_2.10-1.3.0.jar, NullableColumnBuilder.class
package org.apache.spark.sql.columnar
NullableColumnBuilder.appendFrom ( org.apache.spark.sql.Row p1, int p2 ) [abstract]  :  void
NullableColumnBuilder.NullableColumnBuilder..super.appendFrom ( org.apache.spark.sql.Row p1, int p2 ) [abstract]  :  void

spark-sql_2.10-1.3.0.jar, Or.class
package org.apache.spark.sql.sources
Or.canEqual ( Object p1 )  :  boolean
Or.copy ( Filter left, Filter right )  :  Or
Or.curried ( ) [static]  :  scala.Function1<Filter,scala.Function1<Filter,Or>>
Or.equals ( Object p1 )  :  boolean
Or.hashCode ( )  :  int
Or.left ( )  :  Filter
Or.Or ( Filter left, Filter right )
Or.productArity ( )  :  int
Or.productElement ( int p1 )  :  Object
Or.productIterator ( )  :  scala.collection.Iterator<Object>
Or.productPrefix ( )  :  String
Or.right ( )  :  Filter
Or.toString ( )  :  String
Or.tupled ( ) [static]  :  scala.Function1<scala.Tuple2<Filter,Filter>,Or>

spark-sql_2.10-1.3.0.jar, ParquetRelation.class
package org.apache.spark.sql.parquet
ParquetRelation.attributeMap ( )  :  org.apache.spark.sql.catalyst.expressions.AttributeMap<org.apache.spark.sql.catalyst.expressions.Attribute>

spark-sql_2.10-1.3.0.jar, ParquetRelation2.class
package org.apache.spark.sql.parquet
ParquetRelation2.copy ( scala.collection.Seq<String> paths, scala.collection.immutable.Map<String,String> parameters, scala.Option<org.apache.spark.sql.types.StructType> maybeSchema, scala.Option<PartitionSpec> maybePartitionSpec, org.apache.spark.sql.SQLContext sqlContext )  :  ParquetRelation2
ParquetRelation2.DEFAULT_PARTITION_NAME ( ) [static]  :  String
ParquetRelation2.insert ( org.apache.spark.sql.DataFrame data, boolean overwrite )  :  void
ParquetRelation2.isPartitioned ( )  :  boolean
ParquetRelation2.maybePartitionSpec ( )  :  scala.Option<PartitionSpec>
ParquetRelation2.maybeSchema ( )  :  scala.Option<org.apache.spark.sql.types.StructType>
ParquetRelation2.MERGE_SCHEMA ( ) [static]  :  String
ParquetRelation2.newJobContext ( org.apache.hadoop.conf.Configuration conf, org.apache.hadoop.mapreduce.JobID jobId )  :  org.apache.hadoop.mapreduce.JobContext
ParquetRelation2.newTaskAttemptContext ( org.apache.hadoop.conf.Configuration conf, org.apache.hadoop.mapreduce.TaskAttemptID attemptId )  :  org.apache.hadoop.mapreduce.TaskAttemptContext
ParquetRelation2.newTaskAttemptID ( String jtIdentifier, int jobId, boolean isMap, int taskId, int attemptId )  :  org.apache.hadoop.mapreduce.TaskAttemptID
ParquetRelation2.ParquetRelation2..defaultPartitionName ( )  :  String
ParquetRelation2.ParquetRelation2..isSummaryFile ( org.apache.hadoop.fs.Path file )  :  boolean
ParquetRelation2.ParquetRelation2..maybeMetastoreSchema ( )  :  scala.Option<org.apache.spark.sql.types.StructType>
ParquetRelation2.ParquetRelation2..metadataCache ( )  :  ParquetRelation2.MetadataCache
ParquetRelation2.ParquetRelation2..shouldMergeSchemas ( )  :  boolean
ParquetRelation2.parameters ( )  :  scala.collection.immutable.Map<String,String>
ParquetRelation2.ParquetRelation2 ( scala.collection.Seq<String> paths, scala.collection.immutable.Map<String,String> parameters, scala.Option<org.apache.spark.sql.types.StructType> maybeSchema, scala.Option<PartitionSpec> maybePartitionSpec, org.apache.spark.sql.SQLContext sqlContext )
ParquetRelation2.partitionColumns ( )  :  org.apache.spark.sql.types.StructType
ParquetRelation2.partitionSpec ( )  :  PartitionSpec
ParquetRelation2.paths ( )  :  scala.collection.Seq<String>

spark-sql_2.10-1.3.0.jar, ParquetTableScan.class
package org.apache.spark.sql.parquet
ParquetTableScan.requestedPartitionOrdinals ( )  :  scala.Tuple2<Object,Object>[ ]

spark-sql_2.10-1.3.0.jar, ParquetTest.class
package org.apache.spark.sql.parquet
ParquetTest.configuration ( ) [abstract]  :  org.apache.hadoop.conf.Configuration
ParquetTest.makeParquetFile ( org.apache.spark.sql.DataFrame p1, java.io.File p2, scala.reflect.ClassTag<T> p3, scala.reflect.api.TypeTags.TypeTag<T> p4 ) [abstract]  :  void
ParquetTest.makeParquetFile ( scala.collection.Seq<T> p1, java.io.File p2, scala.reflect.ClassTag<T> p3, scala.reflect.api.TypeTags.TypeTag<T> p4 ) [abstract]  :  void
ParquetTest.makePartitionDir ( java.io.File p1, String p2, scala.collection.Seq<scala.Tuple2<String,Object>> p3 ) [abstract]  :  java.io.File
ParquetTest.sqlContext ( ) [abstract]  :  org.apache.spark.sql.SQLContext
ParquetTest.withParquetDataFrame ( scala.collection.Seq<T> p1, scala.Function1<org.apache.spark.sql.DataFrame,scala.runtime.BoxedUnit> p2, scala.reflect.ClassTag<T> p3, scala.reflect.api.TypeTags.TypeTag<T> p4 ) [abstract]  :  void
ParquetTest.withParquetFile ( scala.collection.Seq<T> p1, scala.Function1<String,scala.runtime.BoxedUnit> p2, scala.reflect.ClassTag<T> p3, scala.reflect.api.TypeTags.TypeTag<T> p4 ) [abstract]  :  void
ParquetTest.withParquetTable ( scala.collection.Seq<T> p1, String p2, scala.Function0<scala.runtime.BoxedUnit> p3, scala.reflect.ClassTag<T> p4, scala.reflect.api.TypeTags.TypeTag<T> p5 ) [abstract]  :  void
ParquetTest.withSQLConf ( scala.collection.Seq<scala.Tuple2<String,String>> p1, scala.Function0<scala.runtime.BoxedUnit> p2 ) [abstract]  :  void
ParquetTest.withTempDir ( scala.Function1<java.io.File,scala.runtime.BoxedUnit> p1 ) [abstract]  :  void
ParquetTest.withTempPath ( scala.Function1<java.io.File,scala.runtime.BoxedUnit> p1 ) [abstract]  :  void
ParquetTest.withTempTable ( String p1, scala.Function0<scala.runtime.BoxedUnit> p2 ) [abstract]  :  void

spark-sql_2.10-1.3.0.jar, Partition.class
package org.apache.spark.sql.parquet
Partition.copy ( org.apache.spark.sql.Row values, String path )  :  Partition
Partition.Partition ( org.apache.spark.sql.Row values, String path )
Partition.path ( )  :  String
Partition.values ( )  :  org.apache.spark.sql.Row

spark-sql_2.10-1.3.0.jar, PartitionSpec.class
package org.apache.spark.sql.parquet
PartitionSpec.canEqual ( Object p1 )  :  boolean
PartitionSpec.copy ( org.apache.spark.sql.types.StructType partitionColumns, scala.collection.Seq<Partition> partitions )  :  PartitionSpec
PartitionSpec.curried ( ) [static]  :  scala.Function1<org.apache.spark.sql.types.StructType,scala.Function1<scala.collection.Seq<Partition>,PartitionSpec>>
PartitionSpec.equals ( Object p1 )  :  boolean
PartitionSpec.hashCode ( )  :  int
PartitionSpec.partitionColumns ( )  :  org.apache.spark.sql.types.StructType
PartitionSpec.partitions ( )  :  scala.collection.Seq<Partition>
PartitionSpec.PartitionSpec ( org.apache.spark.sql.types.StructType partitionColumns, scala.collection.Seq<Partition> partitions )
PartitionSpec.productArity ( )  :  int
PartitionSpec.productElement ( int p1 )  :  Object
PartitionSpec.productIterator ( )  :  scala.collection.Iterator<Object>
PartitionSpec.productPrefix ( )  :  String
PartitionSpec.toString ( )  :  String
PartitionSpec.tupled ( ) [static]  :  scala.Function1<scala.Tuple2<org.apache.spark.sql.types.StructType,scala.collection.Seq<Partition>>,PartitionSpec>

spark-sql_2.10-1.3.0.jar, PostgresQuirks.class
package org.apache.spark.sql.jdbc
PostgresQuirks.PostgresQuirks ( )

spark-sql_2.10-1.3.0.jar, PreWriteCheck.class
package org.apache.spark.sql.sources
PreWriteCheck.andThen ( scala.Function1<scala.runtime.BoxedUnit,A> g )  :  scala.Function1<org.apache.spark.sql.catalyst.plans.logical.LogicalPlan,A>
PreWriteCheck.andThen.mcDD.sp ( scala.Function1<Object,A> g )  :  scala.Function1<Object,A>
PreWriteCheck.andThen.mcDF.sp ( scala.Function1<Object,A> g )  :  scala.Function1<Object,A>
PreWriteCheck.andThen.mcDI.sp ( scala.Function1<Object,A> g )  :  scala.Function1<Object,A>
PreWriteCheck.andThen.mcDJ.sp ( scala.Function1<Object,A> g )  :  scala.Function1<Object,A>
PreWriteCheck.andThen.mcFD.sp ( scala.Function1<Object,A> g )  :  scala.Function1<Object,A>
PreWriteCheck.andThen.mcFF.sp ( scala.Function1<Object,A> g )  :  scala.Function1<Object,A>
PreWriteCheck.andThen.mcFI.sp ( scala.Function1<Object,A> g )  :  scala.Function1<Object,A>
PreWriteCheck.andThen.mcFJ.sp ( scala.Function1<Object,A> g )  :  scala.Function1<Object,A>
PreWriteCheck.andThen.mcID.sp ( scala.Function1<Object,A> g )  :  scala.Function1<Object,A>
PreWriteCheck.andThen.mcIF.sp ( scala.Function1<Object,A> g )  :  scala.Function1<Object,A>
PreWriteCheck.andThen.mcII.sp ( scala.Function1<Object,A> g )  :  scala.Function1<Object,A>
PreWriteCheck.andThen.mcIJ.sp ( scala.Function1<Object,A> g )  :  scala.Function1<Object,A>
PreWriteCheck.andThen.mcJD.sp ( scala.Function1<Object,A> g )  :  scala.Function1<Object,A>
PreWriteCheck.andThen.mcJF.sp ( scala.Function1<Object,A> g )  :  scala.Function1<Object,A>
PreWriteCheck.andThen.mcJI.sp ( scala.Function1<Object,A> g )  :  scala.Function1<Object,A>
PreWriteCheck.andThen.mcJJ.sp ( scala.Function1<Object,A> g )  :  scala.Function1<Object,A>
PreWriteCheck.andThen.mcVD.sp ( scala.Function1<scala.runtime.BoxedUnit,A> g )  :  scala.Function1<Object,A>
PreWriteCheck.andThen.mcVF.sp ( scala.Function1<scala.runtime.BoxedUnit,A> g )  :  scala.Function1<Object,A>
PreWriteCheck.andThen.mcVI.sp ( scala.Function1<scala.runtime.BoxedUnit,A> g )  :  scala.Function1<Object,A>
PreWriteCheck.andThen.mcVJ.sp ( scala.Function1<scala.runtime.BoxedUnit,A> g )  :  scala.Function1<Object,A>
PreWriteCheck.andThen.mcZD.sp ( scala.Function1<Object,A> g )  :  scala.Function1<Object,A>
PreWriteCheck.andThen.mcZF.sp ( scala.Function1<Object,A> g )  :  scala.Function1<Object,A>
PreWriteCheck.andThen.mcZI.sp ( scala.Function1<Object,A> g )  :  scala.Function1<Object,A>
PreWriteCheck.andThen.mcZJ.sp ( scala.Function1<Object,A> g )  :  scala.Function1<Object,A>
PreWriteCheck.apply ( Object v1 )  :  Object
PreWriteCheck.apply ( org.apache.spark.sql.catalyst.plans.logical.LogicalPlan plan )  :  void
PreWriteCheck.apply.mcDD.sp ( double v1 )  :  double
PreWriteCheck.apply.mcDF.sp ( float v1 )  :  double
PreWriteCheck.apply.mcDI.sp ( int v1 )  :  double
PreWriteCheck.apply.mcDJ.sp ( long v1 )  :  double
PreWriteCheck.apply.mcFD.sp ( double v1 )  :  float
PreWriteCheck.apply.mcFF.sp ( float v1 )  :  float
PreWriteCheck.apply.mcFI.sp ( int v1 )  :  float
PreWriteCheck.apply.mcFJ.sp ( long v1 )  :  float
PreWriteCheck.apply.mcID.sp ( double v1 )  :  int
PreWriteCheck.apply.mcIF.sp ( float v1 )  :  int
PreWriteCheck.apply.mcII.sp ( int v1 )  :  int
PreWriteCheck.apply.mcIJ.sp ( long v1 )  :  int
PreWriteCheck.apply.mcJD.sp ( double v1 )  :  long
PreWriteCheck.apply.mcJF.sp ( float v1 )  :  long
PreWriteCheck.apply.mcJI.sp ( int v1 )  :  long
PreWriteCheck.apply.mcJJ.sp ( long v1 )  :  long
PreWriteCheck.apply.mcVD.sp ( double v1 )  :  void
PreWriteCheck.apply.mcVF.sp ( float v1 )  :  void
PreWriteCheck.apply.mcVI.sp ( int v1 )  :  void
PreWriteCheck.apply.mcVJ.sp ( long v1 )  :  void
PreWriteCheck.apply.mcZD.sp ( double v1 )  :  boolean
PreWriteCheck.apply.mcZF.sp ( float v1 )  :  boolean
PreWriteCheck.apply.mcZI.sp ( int v1 )  :  boolean
PreWriteCheck.apply.mcZJ.sp ( long v1 )  :  boolean
PreWriteCheck.canEqual ( Object p1 )  :  boolean
PreWriteCheck.catalog ( )  :  org.apache.spark.sql.catalyst.analysis.Catalog
PreWriteCheck.compose ( scala.Function1<A,org.apache.spark.sql.catalyst.plans.logical.LogicalPlan> g )  :  scala.Function1<A,scala.runtime.BoxedUnit>
PreWriteCheck.compose.mcDD.sp ( scala.Function1<A,Object> g )  :  scala.Function1<A,Object>
PreWriteCheck.compose.mcDF.sp ( scala.Function1<A,Object> g )  :  scala.Function1<A,Object>
PreWriteCheck.compose.mcDI.sp ( scala.Function1<A,Object> g )  :  scala.Function1<A,Object>
PreWriteCheck.compose.mcDJ.sp ( scala.Function1<A,Object> g )  :  scala.Function1<A,Object>
PreWriteCheck.compose.mcFD.sp ( scala.Function1<A,Object> g )  :  scala.Function1<A,Object>
PreWriteCheck.compose.mcFF.sp ( scala.Function1<A,Object> g )  :  scala.Function1<A,Object>
PreWriteCheck.compose.mcFI.sp ( scala.Function1<A,Object> g )  :  scala.Function1<A,Object>
PreWriteCheck.compose.mcFJ.sp ( scala.Function1<A,Object> g )  :  scala.Function1<A,Object>
PreWriteCheck.compose.mcID.sp ( scala.Function1<A,Object> g )  :  scala.Function1<A,Object>
PreWriteCheck.compose.mcIF.sp ( scala.Function1<A,Object> g )  :  scala.Function1<A,Object>
PreWriteCheck.compose.mcII.sp ( scala.Function1<A,Object> g )  :  scala.Function1<A,Object>
PreWriteCheck.compose.mcIJ.sp ( scala.Function1<A,Object> g )  :  scala.Function1<A,Object>
PreWriteCheck.compose.mcJD.sp ( scala.Function1<A,Object> g )  :  scala.Function1<A,Object>
PreWriteCheck.compose.mcJF.sp ( scala.Function1<A,Object> g )  :  scala.Function1<A,Object>
PreWriteCheck.compose.mcJI.sp ( scala.Function1<A,Object> g )  :  scala.Function1<A,Object>
PreWriteCheck.compose.mcJJ.sp ( scala.Function1<A,Object> g )  :  scala.Function1<A,Object>
PreWriteCheck.compose.mcVD.sp ( scala.Function1<A,Object> g )  :  scala.Function1<A,scala.runtime.BoxedUnit>
PreWriteCheck.compose.mcVF.sp ( scala.Function1<A,Object> g )  :  scala.Function1<A,scala.runtime.BoxedUnit>
PreWriteCheck.compose.mcVI.sp ( scala.Function1<A,Object> g )  :  scala.Function1<A,scala.runtime.BoxedUnit>
PreWriteCheck.compose.mcVJ.sp ( scala.Function1<A,Object> g )  :  scala.Function1<A,scala.runtime.BoxedUnit>
PreWriteCheck.compose.mcZD.sp ( scala.Function1<A,Object> g )  :  scala.Function1<A,Object>
PreWriteCheck.compose.mcZF.sp ( scala.Function1<A,Object> g )  :  scala.Function1<A,Object>
PreWriteCheck.compose.mcZI.sp ( scala.Function1<A,Object> g )  :  scala.Function1<A,Object>
PreWriteCheck.compose.mcZJ.sp ( scala.Function1<A,Object> g )  :  scala.Function1<A,Object>
PreWriteCheck.copy ( org.apache.spark.sql.catalyst.analysis.Catalog catalog )  :  PreWriteCheck
PreWriteCheck.equals ( Object p1 )  :  boolean
PreWriteCheck.failAnalysis ( String msg )  :  scala.runtime.Nothing.
PreWriteCheck.hashCode ( )  :  int
PreWriteCheck.PreWriteCheck ( org.apache.spark.sql.catalyst.analysis.Catalog catalog )
PreWriteCheck.productArity ( )  :  int
PreWriteCheck.productElement ( int p1 )  :  Object
PreWriteCheck.productIterator ( )  :  scala.collection.Iterator<Object>
PreWriteCheck.productPrefix ( )  :  String
PreWriteCheck.toString ( )  :  String

spark-sql_2.10-1.3.0.jar, PythonUDF.class
package org.apache.spark.sql.execution
PythonUDF.copy ( String name, byte[ ] command, java.util.Map<String,String> envVars, java.util.List<String> pythonIncludes, String pythonExec, java.util.List<org.apache.spark.broadcast.Broadcast<org.apache.spark.api.python.PythonBroadcast>> broadcastVars, org.apache.spark.Accumulator<java.util.List<byte[ ]>> accumulator, org.apache.spark.sql.types.DataType dataType, scala.collection.Seq<org.apache.spark.sql.catalyst.expressions.Expression> children )  :  PythonUDF
PythonUDF.dataType ( )  :  org.apache.spark.sql.types.DataType
PythonUDF.eval ( org.apache.spark.sql.Row input )  :  Object
PythonUDF.eval ( org.apache.spark.sql.Row input )  :  scala.runtime.Nothing.
PythonUDF.PythonUDF ( String name, byte[ ] command, java.util.Map<String,String> envVars, java.util.List<String> pythonIncludes, String pythonExec, java.util.List<org.apache.spark.broadcast.Broadcast<org.apache.spark.api.python.PythonBroadcast>> broadcastVars, org.apache.spark.Accumulator<java.util.List<byte[ ]>> accumulator, org.apache.spark.sql.types.DataType dataType, scala.collection.Seq<org.apache.spark.sql.catalyst.expressions.Expression> children )

spark-sql_2.10-1.3.0.jar, RefreshTable.class
package org.apache.spark.sql.sources
RefreshTable.canEqual ( Object p1 )  :  boolean
RefreshTable.copy ( String databaseName, String tableName )  :  RefreshTable
RefreshTable.curried ( ) [static]  :  scala.Function1<String,scala.Function1<String,RefreshTable>>
RefreshTable.databaseName ( )  :  String
RefreshTable.equals ( Object p1 )  :  boolean
RefreshTable.hashCode ( )  :  int
RefreshTable.productArity ( )  :  int
RefreshTable.productElement ( int p1 )  :  Object
RefreshTable.productIterator ( )  :  scala.collection.Iterator<Object>
RefreshTable.productPrefix ( )  :  String
RefreshTable.RefreshTable ( String databaseName, String tableName )
RefreshTable.run ( org.apache.spark.sql.SQLContext sqlContext )  :  scala.collection.Seq<org.apache.spark.sql.Row>
RefreshTable.tableName ( )  :  String
RefreshTable.tupled ( ) [static]  :  scala.Function1<scala.Tuple2<String,String>,RefreshTable>

spark-sql_2.10-1.3.0.jar, ResolvedDataSource.class
package org.apache.spark.sql.sources
ResolvedDataSource.apply ( org.apache.spark.sql.SQLContext p1, scala.Option<org.apache.spark.sql.types.StructType> p2, String p3, scala.collection.immutable.Map<String,String> p4 ) [static]  :  ResolvedDataSource
ResolvedDataSource.apply ( org.apache.spark.sql.SQLContext p1, String p2, org.apache.spark.sql.SaveMode p3, scala.collection.immutable.Map<String,String> p4, org.apache.spark.sql.DataFrame p5 ) [static]  :  ResolvedDataSource
ResolvedDataSource.canEqual ( Object p1 )  :  boolean
ResolvedDataSource.copy ( Class<?> provider, BaseRelation relation )  :  ResolvedDataSource
ResolvedDataSource.equals ( Object p1 )  :  boolean
ResolvedDataSource.hashCode ( )  :  int
ResolvedDataSource.lookupDataSource ( String p1 ) [static]  :  Class<?>
ResolvedDataSource.productArity ( )  :  int
ResolvedDataSource.productElement ( int p1 )  :  Object
ResolvedDataSource.productIterator ( )  :  scala.collection.Iterator<Object>
ResolvedDataSource.productPrefix ( )  :  String
ResolvedDataSource.provider ( )  :  Class<?>
ResolvedDataSource.relation ( )  :  BaseRelation
ResolvedDataSource.ResolvedDataSource ( Class<?> provider, BaseRelation relation )
ResolvedDataSource.toString ( )  :  String

spark-sql_2.10-1.3.0.jar, RowWriteSupport.class
package org.apache.spark.sql.parquet
RowWriteSupport.attributes ( )  :  org.apache.spark.sql.catalyst.expressions.Attribute[ ]
RowWriteSupport.attributes_.eq ( org.apache.spark.sql.catalyst.expressions.Attribute[ ] p1 )  :  void
RowWriteSupport.write ( org.apache.spark.sql.Row record )  :  void
RowWriteSupport.writeArray ( org.apache.spark.sql.types.ArrayType schema, scala.collection.Seq<Object> array )  :  void
RowWriteSupport.writeDecimal ( org.apache.spark.sql.types.Decimal decimal, int precision )  :  void
RowWriteSupport.writeMap ( org.apache.spark.sql.types.MapType schema, scala.collection.immutable.Map<?,Object> map )  :  void
RowWriteSupport.writePrimitive ( org.apache.spark.sql.types.DataType schema, Object value )  :  void
RowWriteSupport.writeStruct ( org.apache.spark.sql.types.StructType schema, org.apache.spark.sql.Row struct )  :  void
RowWriteSupport.writeTimestamp ( java.sql.Timestamp ts )  :  void
RowWriteSupport.writeValue ( org.apache.spark.sql.types.DataType schema, Object value )  :  void

spark-sql_2.10-1.3.0.jar, SaveMode.class
package org.apache.spark.sql
SaveMode.valueOf ( String name ) [static]  :  SaveMode
SaveMode.values ( ) [static]  :  SaveMode[ ]

spark-sql_2.10-1.3.0.jar, ScalaBigDecimalSerializer.class
package org.apache.spark.sql.execution
ScalaBigDecimalSerializer.ScalaBigDecimalSerializer ( )

spark-sql_2.10-1.3.0.jar, SchemaRelationProvider.class
package org.apache.spark.sql.sources
SchemaRelationProvider.createRelation ( org.apache.spark.sql.SQLContext p1, scala.collection.immutable.Map<String,String> p2, org.apache.spark.sql.types.StructType p3 ) [abstract]  :  BaseRelation

spark-sql_2.10-1.3.0.jar, SetCommand.class
package org.apache.spark.sql.execution
SetCommand.copy ( scala.Option<scala.Tuple2<String,scala.Option<String>>> kv, scala.collection.Seq<org.apache.spark.sql.catalyst.expressions.Attribute> output )  :  SetCommand
SetCommand.curried ( ) [static]  :  scala.Function1<scala.Option<scala.Tuple2<String,scala.Option<String>>>,scala.Function1<scala.collection.Seq<org.apache.spark.sql.catalyst.expressions.Attribute>,SetCommand>>
SetCommand.run ( org.apache.spark.sql.SQLContext sqlContext )  :  scala.collection.Seq<org.apache.spark.sql.Row>
SetCommand.SetCommand ( scala.Option<scala.Tuple2<String,scala.Option<String>>> kv, scala.collection.Seq<org.apache.spark.sql.catalyst.expressions.Attribute> output )
SetCommand.tupled ( ) [static]  :  scala.Function1<scala.Tuple2<scala.Option<scala.Tuple2<String,scala.Option<String>>>,scala.collection.Seq<org.apache.spark.sql.catalyst.expressions.Attribute>>,SetCommand>

spark-sql_2.10-1.3.0.jar, ShowTablesCommand.class
package org.apache.spark.sql.execution
ShowTablesCommand.andThen ( scala.Function1<ShowTablesCommand,A> p1 ) [static]  :  scala.Function1<scala.Option<String>,A>
ShowTablesCommand.canEqual ( Object p1 )  :  boolean
ShowTablesCommand.compose ( scala.Function1<A,scala.Option<String>> p1 ) [static]  :  scala.Function1<A,ShowTablesCommand>
ShowTablesCommand.copy ( scala.Option<String> databaseName )  :  ShowTablesCommand
ShowTablesCommand.databaseName ( )  :  scala.Option<String>
ShowTablesCommand.equals ( Object p1 )  :  boolean
ShowTablesCommand.hashCode ( )  :  int
ShowTablesCommand.output ( )  :  scala.collection.Seq<org.apache.spark.sql.catalyst.expressions.AttributeReference>
ShowTablesCommand.productArity ( )  :  int
ShowTablesCommand.productElement ( int p1 )  :  Object
ShowTablesCommand.productIterator ( )  :  scala.collection.Iterator<Object>
ShowTablesCommand.productPrefix ( )  :  String
ShowTablesCommand.run ( org.apache.spark.sql.SQLContext sqlContext )  :  scala.collection.Seq<org.apache.spark.sql.Row>
ShowTablesCommand.ShowTablesCommand ( scala.Option<String> databaseName )

spark-sql_2.10-1.3.0.jar, SparkPlan.class
package org.apache.spark.sql.execution
SparkPlan.executeCollect ( )  :  org.apache.spark.sql.Row[ ]
SparkPlan.executeTake ( int n )  :  org.apache.spark.sql.Row[ ]

spark-sql_2.10-1.3.0.jar, SparkSQLParser.class
package org.apache.spark.sql
SparkSQLParser.AS ( )  :  catalyst.AbstractSparkSQLParser.Keyword
SparkSQLParser.CACHE ( )  :  catalyst.AbstractSparkSQLParser.Keyword
SparkSQLParser.CLEAR ( )  :  catalyst.AbstractSparkSQLParser.Keyword
SparkSQLParser.IN ( )  :  catalyst.AbstractSparkSQLParser.Keyword
SparkSQLParser.LAZY ( )  :  catalyst.AbstractSparkSQLParser.Keyword
SparkSQLParser.SparkSQLParser..others ( )  :  scala.util.parsing.combinator.Parsers.Parser<catalyst.plans.logical.LogicalPlan>
SparkSQLParser.SparkSQLParser..set ( )  :  scala.util.parsing.combinator.Parsers.Parser<catalyst.plans.logical.LogicalPlan>
SparkSQLParser.SparkSQLParser..SetCommandParser ( )  :  SparkSQLParser.SetCommandParser.
SparkSQLParser.SparkSQLParser..show ( )  :  scala.util.parsing.combinator.Parsers.Parser<catalyst.plans.logical.LogicalPlan>
SparkSQLParser.SparkSQLParser..uncache ( )  :  scala.util.parsing.combinator.Parsers.Parser<catalyst.plans.logical.LogicalPlan>
SparkSQLParser.SET ( )  :  catalyst.AbstractSparkSQLParser.Keyword
SparkSQLParser.SHOW ( )  :  catalyst.AbstractSparkSQLParser.Keyword
SparkSQLParser.SparkSQLParser ( scala.Function1<String,catalyst.plans.logical.LogicalPlan> fallback )
SparkSQLParser.start ( )  :  scala.util.parsing.combinator.Parsers.Parser<catalyst.plans.logical.LogicalPlan>
SparkSQLParser.TABLE ( )  :  catalyst.AbstractSparkSQLParser.Keyword
SparkSQLParser.TABLES ( )  :  catalyst.AbstractSparkSQLParser.Keyword
SparkSQLParser.UNCACHE ( )  :  catalyst.AbstractSparkSQLParser.Keyword

spark-sql_2.10-1.3.0.jar, SparkStrategies.class
package org.apache.spark.sql.execution
SparkStrategies.DDLStrategy ( )  :  SparkStrategies.DDLStrategy.

spark-sql_2.10-1.3.0.jar, SQLConf.class
package org.apache.spark.sql
SQLConf.broadcastTimeout ( )  :  int
SQLConf.dataFrameEagerAnalysis ( )  :  boolean
SQLConf.defaultDataSourceName ( )  :  String
SQLConf.isParquetINT96AsTimestamp ( )  :  boolean
SQLConf.parquetUseDataSourceApi ( )  :  boolean
SQLConf.SQLConf ( )

spark-sql_2.10-1.3.0.jar, SQLContext.class
package org.apache.spark.sql
SQLContext.applySchemaToPythonRDD ( org.apache.spark.rdd.RDD<Object[ ]> rdd, types.StructType schema )  :  DataFrame
SQLContext.applySchemaToPythonRDD ( org.apache.spark.rdd.RDD<Object[ ]> rdd, String schemaString )  :  DataFrame
SQLContext.baseRelationToDataFrame ( sources.BaseRelation baseRelation )  :  DataFrame
SQLContext.cacheManager ( )  :  CacheManager
SQLContext.checkAnalysis ( )  :  catalyst.analysis.CheckAnalysis
SQLContext.conf ( )  :  SQLConf
SQLContext.createDataFrame ( org.apache.spark.api.java.JavaRDD<?> rdd, Class<?> beanClass )  :  DataFrame
SQLContext.createDataFrame ( org.apache.spark.api.java.JavaRDD<Row> rowRDD, java.util.List<String> columns )  :  DataFrame
SQLContext.createDataFrame ( org.apache.spark.api.java.JavaRDD<Row> rowRDD, types.StructType schema )  :  DataFrame
SQLContext.createDataFrame ( org.apache.spark.rdd.RDD<?> rdd, Class<?> beanClass )  :  DataFrame
SQLContext.createDataFrame ( org.apache.spark.rdd.RDD<A> rdd, scala.reflect.api.TypeTags.TypeTag<A> p2 )  :  DataFrame
SQLContext.createDataFrame ( org.apache.spark.rdd.RDD<Row> rowRDD, types.StructType schema )  :  DataFrame
SQLContext.createDataFrame ( scala.collection.Seq<A> data, scala.reflect.api.TypeTags.TypeTag<A> p2 )  :  DataFrame
SQLContext.createExternalTable ( String tableName, String path )  :  DataFrame
SQLContext.createExternalTable ( String tableName, String path, String source )  :  DataFrame
SQLContext.createExternalTable ( String tableName, String source, java.util.Map<String,String> options )  :  DataFrame
SQLContext.createExternalTable ( String tableName, String source, types.StructType schema, java.util.Map<String,String> options )  :  DataFrame
SQLContext.createExternalTable ( String tableName, String source, types.StructType schema, scala.collection.immutable.Map<String,String> options )  :  DataFrame
SQLContext.createExternalTable ( String tableName, String source, scala.collection.immutable.Map<String,String> options )  :  DataFrame
SQLContext.emptyDataFrame ( )  :  DataFrame
SQLContext.experimental ( )  :  ExperimentalMethods
SQLContext.getSchema ( Class<?> beanClass )  :  scala.collection.Seq<catalyst.expressions.AttributeReference>
SQLContext.implicits ( )  :  SQLContext.implicits.
SQLContext.jdbc ( String url, String table )  :  DataFrame
SQLContext.jdbc ( String url, String table, String columnName, long lowerBound, long upperBound, int numPartitions )  :  DataFrame
SQLContext.jdbc ( String url, String table, String[ ] theParts )  :  DataFrame
SQLContext.jsonFile ( String path )  :  DataFrame
SQLContext.jsonFile ( String path, double samplingRatio )  :  DataFrame
SQLContext.jsonFile ( String path, types.StructType schema )  :  DataFrame
SQLContext.jsonRDD ( org.apache.spark.api.java.JavaRDD<String> json )  :  DataFrame
SQLContext.jsonRDD ( org.apache.spark.api.java.JavaRDD<String> json, double samplingRatio )  :  DataFrame
SQLContext.jsonRDD ( org.apache.spark.api.java.JavaRDD<String> json, types.StructType schema )  :  DataFrame
SQLContext.jsonRDD ( org.apache.spark.rdd.RDD<String> json )  :  DataFrame
SQLContext.jsonRDD ( org.apache.spark.rdd.RDD<String> json, double samplingRatio )  :  DataFrame
SQLContext.jsonRDD ( org.apache.spark.rdd.RDD<String> json, types.StructType schema )  :  DataFrame
SQLContext.load ( String path )  :  DataFrame
SQLContext.load ( String path, String source )  :  DataFrame
SQLContext.load ( String source, java.util.Map<String,String> options )  :  DataFrame
SQLContext.load ( String source, types.StructType schema, java.util.Map<String,String> options )  :  DataFrame
SQLContext.load ( String source, types.StructType schema, scala.collection.immutable.Map<String,String> options )  :  DataFrame
SQLContext.load ( String source, scala.collection.immutable.Map<String,String> options )  :  DataFrame
SQLContext.parquetFile ( scala.collection.Seq<String> paths )  :  DataFrame
SQLContext.parquetFile ( String... paths )  :  DataFrame
SQLContext.parseDataType ( String dataTypeString )  :  types.DataType
SQLContext.registerDataFrameAsTable ( DataFrame df, String tableName )  :  void
SQLContext.sql ( String sqlText )  :  DataFrame
SQLContext.SQLContext ( org.apache.spark.api.java.JavaSparkContext sparkContext )
SQLContext.sqlParser ( )  :  SparkSQLParser
SQLContext.table ( String tableName )  :  DataFrame
SQLContext.tableNames ( )  :  String[ ]
SQLContext.tableNames ( String databaseName )  :  String[ ]
SQLContext.tables ( )  :  DataFrame
SQLContext.tables ( String databaseName )  :  DataFrame
SQLContext.udf ( )  :  UDFRegistration

spark-sql_2.10-1.3.0.jar, UDFRegistration.class
package org.apache.spark.sql
UDFRegistration.UDFRegistration ( SQLContext sqlContext )

spark-sql_2.10-1.3.0.jar, UncacheTableCommand.class
package org.apache.spark.sql.execution
UncacheTableCommand.run ( org.apache.spark.sql.SQLContext sqlContext )  :  scala.collection.Seq<org.apache.spark.sql.Row>

spark-sql_2.10-1.3.0.jar, UserDefinedFunction.class
package org.apache.spark.sql
UserDefinedFunction.apply ( scala.collection.Seq<Column> exprs )  :  Column
UserDefinedFunction.canEqual ( Object p1 )  :  boolean
UserDefinedFunction.copy ( Object f, types.DataType dataType )  :  UserDefinedFunction
UserDefinedFunction.curried ( ) [static]  :  scala.Function1<Object,scala.Function1<types.DataType,UserDefinedFunction>>
UserDefinedFunction.dataType ( )  :  types.DataType
UserDefinedFunction.equals ( Object p1 )  :  boolean
UserDefinedFunction.f ( )  :  Object
UserDefinedFunction.hashCode ( )  :  int
UserDefinedFunction.productArity ( )  :  int
UserDefinedFunction.productElement ( int p1 )  :  Object
UserDefinedFunction.productIterator ( )  :  scala.collection.Iterator<Object>
UserDefinedFunction.productPrefix ( )  :  String
UserDefinedFunction.toString ( )  :  String
UserDefinedFunction.tupled ( ) [static]  :  scala.Function1<scala.Tuple2<Object,types.DataType>,UserDefinedFunction>
UserDefinedFunction.UserDefinedFunction ( Object f, types.DataType dataType )

spark-sql_2.10-1.3.0.jar, UserDefinedPythonFunction.class
package org.apache.spark.sql
UserDefinedPythonFunction.accumulator ( )  :  org.apache.spark.Accumulator<java.util.List<byte[ ]>>
UserDefinedPythonFunction.apply ( scala.collection.Seq<Column> exprs )  :  Column
UserDefinedPythonFunction.broadcastVars ( )  :  java.util.List<org.apache.spark.broadcast.Broadcast<org.apache.spark.api.python.PythonBroadcast>>
UserDefinedPythonFunction.canEqual ( Object p1 )  :  boolean
UserDefinedPythonFunction.command ( )  :  byte[ ]
UserDefinedPythonFunction.copy ( String name, byte[ ] command, java.util.Map<String,String> envVars, java.util.List<String> pythonIncludes, String pythonExec, java.util.List<org.apache.spark.broadcast.Broadcast<org.apache.spark.api.python.PythonBroadcast>> broadcastVars, org.apache.spark.Accumulator<java.util.List<byte[ ]>> accumulator, types.DataType dataType )  :  UserDefinedPythonFunction
UserDefinedPythonFunction.curried ( ) [static]  :  scala.Function1<String,scala.Function1<byte[ ],scala.Function1<java.util.Map<String,String>,scala.Function1<java.util.List<String>,scala.Function1<String,scala.Function1<java.util.List<org.apache.spark.broadcast.Broadcast<org.apache.spark.api.python.PythonBroadcast>>,scala.Function1<org.apache.spark.Accumulator<java.util.List<byte[ ]>>,scala.Function1<types.DataType,UserDefinedPythonFunction>>>>>>>>
UserDefinedPythonFunction.dataType ( )  :  types.DataType
UserDefinedPythonFunction.envVars ( )  :  java.util.Map<String,String>
UserDefinedPythonFunction.equals ( Object p1 )  :  boolean
UserDefinedPythonFunction.hashCode ( )  :  int
UserDefinedPythonFunction.name ( )  :  String
UserDefinedPythonFunction.productArity ( )  :  int
UserDefinedPythonFunction.productElement ( int p1 )  :  Object
UserDefinedPythonFunction.productIterator ( )  :  scala.collection.Iterator<Object>
UserDefinedPythonFunction.productPrefix ( )  :  String
UserDefinedPythonFunction.pythonExec ( )  :  String
UserDefinedPythonFunction.pythonIncludes ( )  :  java.util.List<String>
UserDefinedPythonFunction.toString ( )  :  String
UserDefinedPythonFunction.tupled ( ) [static]  :  scala.Function1<scala.Tuple8<String,byte[ ],java.util.Map<String,String>,java.util.List<String>,String,java.util.List<org.apache.spark.broadcast.Broadcast<org.apache.spark.api.python.PythonBroadcast>>,org.apache.spark.Accumulator<java.util.List<byte[ ]>>,types.DataType>,UserDefinedPythonFunction>
UserDefinedPythonFunction.UserDefinedPythonFunction ( String name, byte[ ] command, java.util.Map<String,String> envVars, java.util.List<String> pythonIncludes, String pythonExec, java.util.List<org.apache.spark.broadcast.Broadcast<org.apache.spark.api.python.PythonBroadcast>> broadcastVars, org.apache.spark.Accumulator<java.util.List<byte[ ]>> accumulator, types.DataType dataType )

to the top

Problems with Data Types, High Severity (75)


spark-sql_2.10-1.3.0.jar
package org.apache.spark.sql
[+] CacheManager (1)
[+] Column (1)
[+] ColumnName (1)
[+] DataFrame (1)
[+] DataFrameHolder (1)
[+] ExperimentalMethods (1)
[+] GroupedData (1)
[+] SaveMode (1)
[+] SparkSQLParser (1)
[+] SQLConf (1)
[+] SQLContext.QueryExecution (1)
[+] UserDefinedFunction (1)
[+] UserDefinedPythonFunction (1)

package org.apache.spark.sql.columnar
[+] ColumnBuilder (1)
[+] ColumnStats (2)
[+] NullableColumnBuilder (2)

package org.apache.spark.sql.columnar.compression
[+] Encoder<T> (1)

package org.apache.spark.sql.execution
[+] CacheTableCommand (2)
[+] DescribeCommand (2)
[+] Expand (1)
[+] ExplainCommand (2)
[+] JavaBigDecimalSerializer (1)
[+] LocalTableScan (1)
[+] LogicalLocalTable (1)
[+] ScalaBigDecimalSerializer (1)
[+] SetCommand (2)
[+] ShowTablesCommand (1)
[+] UncacheTableCommand (2)

package org.apache.spark.sql.execution.joins
[+] BroadcastLeftSemiJoinHash (1)
[+] HashedRelation (1)

package org.apache.spark.sql.jdbc
[+] DriverQuirks (1)
[+] JDBCPartition (1)
[+] JDBCPartitioningInfo (1)
[+] JDBCRDD (1)
[+] JDBCRelation (1)
[+] MySQLQuirks (1)
[+] NoQuirks (1)
[+] PostgresQuirks (1)

package org.apache.spark.sql.json
[+] JSONRelation (2)

package org.apache.spark.sql.parquet
[+] CatalystPrimitiveStringConverter (1)
[+] ParquetRelation2 (3)
[+] ParquetTest (1)
[+] PartitionSpec (1)

package org.apache.spark.sql.parquet.timestamp
[+] NanoTime (1)

package org.apache.spark.sql.sources
[+] And (1)
[+] BaseRelation (1)
[+] CaseInsensitiveMap (1)
[+] CatalystScan (1)
[+] CreatableRelationProvider (1)
[+] CreateTableUsingAsSelect (1)
[+] CreateTempTableUsing (1)
[+] CreateTempTableUsingAsSelect (1)
[+] DescribeCommand (1)
[+] InsertableRelation (1)
[+] InsertIntoDataSource (1)
[+] IsNotNull (1)
[+] IsNull (1)
[+] Not (1)
[+] Or (1)
[+] PreWriteCheck (1)
[+] PrunedFilteredScan (1)
[+] RefreshTable (1)
[+] ResolvedDataSource (1)
[+] SchemaRelationProvider (1)
[+] TableScan (1)

to the top

Problems with Methods, High Severity (1)


spark-sql_2.10-1.3.0.jar, ParquetRelation2
package org.apache.spark.sql.parquet
[+] ParquetRelation2.partitions ( )  :  scala.collection.Seq<Partition> (1)

to the top

Problems with Data Types, Medium Severity (38)


spark-sql_2.10-1.3.0.jar
package org.apache.spark.sql.columnar
[+] BinaryColumnAccessor (1)
[+] BinaryColumnBuilder (1)
[+] BooleanColumnAccessor (1)
[+] BooleanColumnBuilder (1)
[+] ByteColumnAccessor (1)
[+] ByteColumnBuilder (1)
[+] DateColumnAccessor (1)
[+] DateColumnBuilder (1)
[+] DateColumnStats (1)
[+] DoubleColumnAccessor (1)
[+] DoubleColumnBuilder (1)
[+] FloatColumnAccessor (1)
[+] FloatColumnBuilder (1)
[+] GenericColumnAccessor (1)
[+] GenericColumnBuilder (1)
[+] IntColumnAccessor (1)
[+] IntColumnBuilder (1)
[+] LongColumnAccessor (1)
[+] LongColumnBuilder (1)
[+] ShortColumnAccessor (1)
[+] ShortColumnBuilder (1)
[+] StringColumnAccessor (1)
[+] StringColumnBuilder (1)
[+] TimestampColumnAccessor (1)
[+] TimestampColumnBuilder (1)

package org.apache.spark.sql.execution
[+] CacheTableCommand (1)
[+] DescribeCommand (1)
[+] ExplainCommand (1)
[+] SetCommand (1)
[+] UncacheTableCommand (1)

package org.apache.spark.sql.json
[+] JSONRelation (1)

package org.apache.spark.sql.parquet
[+] AppendingParquetOutputFormat (1)
[+] ParquetRelation2 (1)
[+] RowReadSupport (1)
[+] RowRecordMaterializer (1)
[+] RowWriteSupport (1)

package org.apache.spark.sql.sources
[+] BaseRelation (1)
[+] DDLParser (1)

to the top

Problems with Data Types, Low Severity (12)


spark-sql_2.10-1.3.0.jar
package org.apache.spark.sql.columnar
[+] InMemoryColumnarTableScan (1)

package org.apache.spark.sql.execution
[+] CacheTableCommand (1)
[+] DescribeCommand (1)
[+] ExecutedCommand (1)
[+] ExplainCommand (1)
[+] Limit (1)
[+] SetCommand (1)
[+] TakeOrdered (1)
[+] UncacheTableCommand (1)

package org.apache.spark.sql.json
[+] JSONRelation (1)

package org.apache.spark.sql.parquet
[+] CatalystGroupConverter (1)
[+] ParquetRelation2 (1)

to the top

Other Changes in Data Types (8)


spark-sql_2.10-1.3.0.jar
package org.apache.spark.sql.columnar
[+] ColumnBuilder (1)
[+] ColumnStats (2)
[+] NullableColumnBuilder (2)

package org.apache.spark.sql.columnar.compression
[+] Encoder<T> (1)

package org.apache.spark.sql.execution
[+] RunnableCommand (1)

package org.apache.spark.sql.execution.joins
[+] HashedRelation (1)

to the top

Java ARchives (1)


spark-sql_2.10-1.3.0.jar

to the top




Generated on Wed Oct 28 11:08:19 2015 for succinct-0.1.2 by Java API Compliance Checker 1.4.1  
A tool for checking backward compatibility of a Java library API