Binary compatibility report for the spark-iqmulus-0.1.0-s_2.10 library between 1.5.0 and 1.3.0 versions (relating to the portability of client application spark-iqmulus-0.1.0-s_2.10.jar)
Test Info
Library Name | spark-iqmulus-0.1.0-s_2.10 |
Version #1 | 1.5.0 |
Version #2 | 1.3.0 |
Java Version | 1.7.0_85 |
Test Results
Total Java ARchives | 2 |
---|
Total Methods / Classes | 371 / 2659 |
---|
Verdict | Incompatible (54.9%) |
Problem Summary
| Severity | Count |
---|
Added Methods | - | 13 |
---|
Removed Methods | High | 163 |
---|
Problems with Data Types | High | 10 |
---|
Medium | 1 |
Low | 1 |
Problems with Methods | High | 1 |
---|
Medium | 0 |
Low | 0 |
Added Methods (13)
spark-sql_2.10-1.3.0.jar, DataFrame.class
package org.apache.spark.sql
DataFrame.cache ( ) : RDDApi
[mangled: org/apache/spark/sql/DataFrame.cache:()Lorg/apache/spark/sql/RDDApi;]
DataFrame.collect ( ) : Object
[mangled: org/apache/spark/sql/DataFrame.collect:()Ljava/lang/Object;]
DataFrame.first ( ) : Object
[mangled: org/apache/spark/sql/DataFrame.first:()Ljava/lang/Object;]
DataFrame.persist ( ) : RDDApi
[mangled: org/apache/spark/sql/DataFrame.persist:()Lorg/apache/spark/sql/RDDApi;]
DataFrame.persist ( org.apache.spark.storage.StorageLevel newLevel ) : RDDApi
[mangled: org/apache/spark/sql/DataFrame.persist:(Lorg/apache/spark/storage/StorageLevel;)Lorg/apache/spark/sql/RDDApi;]
DataFrame.showString ( int numRows ) : String
[mangled: org/apache/spark/sql/DataFrame.showString:(I)Ljava/lang/String;]
DataFrame.take ( int n ) : Object
[mangled: org/apache/spark/sql/DataFrame.take:(I)Ljava/lang/Object;]
DataFrame.unpersist ( ) : RDDApi
[mangled: org/apache/spark/sql/DataFrame.unpersist:()Lorg/apache/spark/sql/RDDApi;]
DataFrame.unpersist ( boolean blocking ) : RDDApi
[mangled: org/apache/spark/sql/DataFrame.unpersist:(Z)Lorg/apache/spark/sql/RDDApi;]
spark-sql_2.10-1.3.0.jar, SQLContext.class
package org.apache.spark.sql
SQLContext.cacheManager ( ) : CacheManager
[mangled: org/apache/spark/sql/SQLContext.cacheManager:()Lorg/apache/spark/sql/CacheManager;]
SQLContext.checkAnalysis ( ) : catalyst.analysis.CheckAnalysis
[mangled: org/apache/spark/sql/SQLContext.checkAnalysis:()Lorg/apache/spark/sql/catalyst/analysis/CheckAnalysis;]
SQLContext.createDataFrame ( org.apache.spark.api.java.JavaRDD<Row> rowRDD, java.util.List<String> columns ) : DataFrame
[mangled: org/apache/spark/sql/SQLContext.createDataFrame:(Lorg/apache/spark/api/java/JavaRDD;Ljava/util/List;)Lorg/apache/spark/sql/DataFrame;]
SQLContext.ddlParser ( ) : sources.DDLParser
[mangled: org/apache/spark/sql/SQLContext.ddlParser:()Lorg/apache/spark/sql/sources/DDLParser;]
to the top
Removed Methods (163)
spark-core_2.10-1.5.0.jar, SparkHadoopUtil.class
package org.apache.spark.deploy
SparkHadoopUtil.listLeafDirStatuses ( org.apache.hadoop.fs.FileSystem fs, org.apache.hadoop.fs.Path basePath ) : scala.collection.Seq<org.apache.hadoop.fs.FileStatus>
[mangled: org/apache/spark/deploy/SparkHadoopUtil.listLeafDirStatuses:(Lorg/apache/hadoop/fs/FileSystem;Lorg/apache/hadoop/fs/Path;)Lscala/collection/Seq;]
spark-sql_2.10-1.5.0.jar, DataFrame.class
package org.apache.spark.sql
DataFrame.coalesce ( int numPartitions ) : DataFrame
[mangled: org/apache/spark/sql/DataFrame.coalesce:(I)Lorg/apache/spark/sql/DataFrame;]
DataFrame.cube ( Column... cols ) : GroupedData
[mangled: org/apache/spark/sql/DataFrame.cube:([Lorg/apache/spark/sql/Column;)Lorg/apache/spark/sql/GroupedData;]
DataFrame.cube ( scala.collection.Seq<Column> cols ) : GroupedData
[mangled: org/apache/spark/sql/DataFrame.cube:(Lscala/collection/Seq;)Lorg/apache/spark/sql/GroupedData;]
DataFrame.cube ( String col1, scala.collection.Seq<String> cols ) : GroupedData
[mangled: org/apache/spark/sql/DataFrame.cube:(Ljava/lang/String;Lscala/collection/Seq;)Lorg/apache/spark/sql/GroupedData;]
DataFrame.cube ( String col1, String... cols ) : GroupedData
[mangled: org/apache/spark/sql/DataFrame.cube:(Ljava/lang/String;[Ljava/lang/String;)Lorg/apache/spark/sql/GroupedData;]
DataFrame.describe ( scala.collection.Seq<String> cols ) : DataFrame
[mangled: org/apache/spark/sql/DataFrame.describe:(Lscala/collection/Seq;)Lorg/apache/spark/sql/DataFrame;]
DataFrame.describe ( String... cols ) : DataFrame
[mangled: org/apache/spark/sql/DataFrame.describe:([Ljava/lang/String;)Lorg/apache/spark/sql/DataFrame;]
DataFrame.drop ( Column col ) : DataFrame
[mangled: org/apache/spark/sql/DataFrame.drop:(Lorg/apache/spark/sql/Column;)Lorg/apache/spark/sql/DataFrame;]
DataFrame.drop ( String colName ) : DataFrame
[mangled: org/apache/spark/sql/DataFrame.drop:(Ljava/lang/String;)Lorg/apache/spark/sql/DataFrame;]
DataFrame.dropDuplicates ( ) : DataFrame
[mangled: org/apache/spark/sql/DataFrame.dropDuplicates:()Lorg/apache/spark/sql/DataFrame;]
DataFrame.dropDuplicates ( scala.collection.Seq<String> colNames ) : DataFrame
[mangled: org/apache/spark/sql/DataFrame.dropDuplicates:(Lscala/collection/Seq;)Lorg/apache/spark/sql/DataFrame;]
DataFrame.dropDuplicates ( String[ ] colNames ) : DataFrame
[mangled: org/apache/spark/sql/DataFrame.dropDuplicates:([Ljava/lang/String;)Lorg/apache/spark/sql/DataFrame;]
DataFrame.inputFiles ( ) : String[ ]
[mangled: org/apache/spark/sql/DataFrame.inputFiles:()[Ljava/lang/String;]
DataFrame.join ( DataFrame right, scala.collection.Seq<String> usingColumns ) : DataFrame
[mangled: org/apache/spark/sql/DataFrame.join:(Lorg/apache/spark/sql/DataFrame;Lscala/collection/Seq;)Lorg/apache/spark/sql/DataFrame;]
DataFrame.join ( DataFrame right, String usingColumn ) : DataFrame
[mangled: org/apache/spark/sql/DataFrame.join:(Lorg/apache/spark/sql/DataFrame;Ljava/lang/String;)Lorg/apache/spark/sql/DataFrame;]
DataFrame.na ( ) : DataFrameNaFunctions
[mangled: org/apache/spark/sql/DataFrame.na:()Lorg/apache/spark/sql/DataFrameNaFunctions;]
DataFrame.DataFrame..logicalPlanToDataFrame ( catalyst.plans.logical.LogicalPlan logicalPlan ) : DataFrame
[mangled: org/apache/spark/sql/DataFrame.org.apache.spark.sql.DataFrame..logicalPlanToDataFrame:(Lorg/apache/spark/sql/catalyst/plans/logical/LogicalPlan;)Lorg/apache/spark/sql/DataFrame;]
DataFrame.randomSplit ( double[ ] weights ) : DataFrame[ ]
[mangled: org/apache/spark/sql/DataFrame.randomSplit:([D)[Lorg/apache/spark/sql/DataFrame;]
DataFrame.randomSplit ( double[ ] weights, long seed ) : DataFrame[ ]
[mangled: org/apache/spark/sql/DataFrame.randomSplit:([DJ)[Lorg/apache/spark/sql/DataFrame;]
DataFrame.randomSplit ( scala.collection.immutable.List<Object> weights, long seed ) : DataFrame[ ]
[mangled: org/apache/spark/sql/DataFrame.randomSplit:(Lscala/collection/immutable/List;J)[Lorg/apache/spark/sql/DataFrame;]
DataFrame.rollup ( Column... cols ) : GroupedData
[mangled: org/apache/spark/sql/DataFrame.rollup:([Lorg/apache/spark/sql/Column;)Lorg/apache/spark/sql/GroupedData;]
DataFrame.rollup ( scala.collection.Seq<Column> cols ) : GroupedData
[mangled: org/apache/spark/sql/DataFrame.rollup:(Lscala/collection/Seq;)Lorg/apache/spark/sql/GroupedData;]
DataFrame.rollup ( String col1, scala.collection.Seq<String> cols ) : GroupedData
[mangled: org/apache/spark/sql/DataFrame.rollup:(Ljava/lang/String;Lscala/collection/Seq;)Lorg/apache/spark/sql/GroupedData;]
DataFrame.rollup ( String col1, String... cols ) : GroupedData
[mangled: org/apache/spark/sql/DataFrame.rollup:(Ljava/lang/String;[Ljava/lang/String;)Lorg/apache/spark/sql/GroupedData;]
DataFrame.show ( boolean truncate ) : void
[mangled: org/apache/spark/sql/DataFrame.show:(Z)V]
DataFrame.show ( int numRows, boolean truncate ) : void
[mangled: org/apache/spark/sql/DataFrame.show:(IZ)V]
DataFrame.showString ( int _numRows, boolean truncate ) : String
[mangled: org/apache/spark/sql/DataFrame.showString:(IZ)Ljava/lang/String;]
DataFrame.stat ( ) : DataFrameStatFunctions
[mangled: org/apache/spark/sql/DataFrame.stat:()Lorg/apache/spark/sql/DataFrameStatFunctions;]
DataFrame.where ( String conditionExpr ) : DataFrame
[mangled: org/apache/spark/sql/DataFrame.where:(Ljava/lang/String;)Lorg/apache/spark/sql/DataFrame;]
DataFrame.withNewExecutionId ( scala.Function0<T> body ) : T
[mangled: org/apache/spark/sql/DataFrame.withNewExecutionId:(Lscala/Function0;)Ljava/lang/Object;]
DataFrame.write ( ) : DataFrameWriter
[mangled: org/apache/spark/sql/DataFrame.write:()Lorg/apache/spark/sql/DataFrameWriter;]
spark-sql_2.10-1.5.0.jar, DataFrameReader.class
package org.apache.spark.sql
DataFrameReader.DataFrameReader ( SQLContext sqlContext )
[mangled: org/apache/spark/sql/DataFrameReader."<init>":(Lorg/apache/spark/sql/SQLContext;)V]
DataFrameReader.format ( String source ) : DataFrameReader
[mangled: org/apache/spark/sql/DataFrameReader.format:(Ljava/lang/String;)Lorg/apache/spark/sql/DataFrameReader;]
DataFrameReader.isTraceEnabled ( ) : boolean
[mangled: org/apache/spark/sql/DataFrameReader.isTraceEnabled:()Z]
DataFrameReader.jdbc ( String url, String table, java.util.Properties properties ) : DataFrame
[mangled: org/apache/spark/sql/DataFrameReader.jdbc:(Ljava/lang/String;Ljava/lang/String;Ljava/util/Properties;)Lorg/apache/spark/sql/DataFrame;]
DataFrameReader.jdbc ( String url, String table, String columnName, long lowerBound, long upperBound, int numPartitions, java.util.Properties connectionProperties ) : DataFrame
[mangled: org/apache/spark/sql/DataFrameReader.jdbc:(Ljava/lang/String;Ljava/lang/String;Ljava/lang/String;JJILjava/util/Properties;)Lorg/apache/spark/sql/DataFrame;]
DataFrameReader.jdbc ( String url, String table, String[ ] predicates, java.util.Properties connectionProperties ) : DataFrame
[mangled: org/apache/spark/sql/DataFrameReader.jdbc:(Ljava/lang/String;Ljava/lang/String;[Ljava/lang/String;Ljava/util/Properties;)Lorg/apache/spark/sql/DataFrame;]
DataFrameReader.json ( org.apache.spark.api.java.JavaRDD<String> jsonRDD ) : DataFrame
[mangled: org/apache/spark/sql/DataFrameReader.json:(Lorg/apache/spark/api/java/JavaRDD;)Lorg/apache/spark/sql/DataFrame;]
DataFrameReader.json ( org.apache.spark.rdd.RDD<String> jsonRDD ) : DataFrame
[mangled: org/apache/spark/sql/DataFrameReader.json:(Lorg/apache/spark/rdd/RDD;)Lorg/apache/spark/sql/DataFrame;]
DataFrameReader.json ( String path ) : DataFrame
[mangled: org/apache/spark/sql/DataFrameReader.json:(Ljava/lang/String;)Lorg/apache/spark/sql/DataFrame;]
DataFrameReader.load ( ) : DataFrame
[mangled: org/apache/spark/sql/DataFrameReader.load:()Lorg/apache/spark/sql/DataFrame;]
DataFrameReader.load ( String path ) : DataFrame
[mangled: org/apache/spark/sql/DataFrameReader.load:(Ljava/lang/String;)Lorg/apache/spark/sql/DataFrame;]
DataFrameReader.log ( ) : org.slf4j.Logger
[mangled: org/apache/spark/sql/DataFrameReader.log:()Lorg/slf4j/Logger;]
DataFrameReader.logDebug ( scala.Function0<String> msg ) : void
[mangled: org/apache/spark/sql/DataFrameReader.logDebug:(Lscala/Function0;)V]
DataFrameReader.logDebug ( scala.Function0<String> msg, Throwable throwable ) : void
[mangled: org/apache/spark/sql/DataFrameReader.logDebug:(Lscala/Function0;Ljava/lang/Throwable;)V]
DataFrameReader.logError ( scala.Function0<String> msg ) : void
[mangled: org/apache/spark/sql/DataFrameReader.logError:(Lscala/Function0;)V]
DataFrameReader.logError ( scala.Function0<String> msg, Throwable throwable ) : void
[mangled: org/apache/spark/sql/DataFrameReader.logError:(Lscala/Function0;Ljava/lang/Throwable;)V]
DataFrameReader.logInfo ( scala.Function0<String> msg ) : void
[mangled: org/apache/spark/sql/DataFrameReader.logInfo:(Lscala/Function0;)V]
DataFrameReader.logInfo ( scala.Function0<String> msg, Throwable throwable ) : void
[mangled: org/apache/spark/sql/DataFrameReader.logInfo:(Lscala/Function0;Ljava/lang/Throwable;)V]
DataFrameReader.logName ( ) : String
[mangled: org/apache/spark/sql/DataFrameReader.logName:()Ljava/lang/String;]
DataFrameReader.logTrace ( scala.Function0<String> msg ) : void
[mangled: org/apache/spark/sql/DataFrameReader.logTrace:(Lscala/Function0;)V]
DataFrameReader.logTrace ( scala.Function0<String> msg, Throwable throwable ) : void
[mangled: org/apache/spark/sql/DataFrameReader.logTrace:(Lscala/Function0;Ljava/lang/Throwable;)V]
DataFrameReader.logWarning ( scala.Function0<String> msg ) : void
[mangled: org/apache/spark/sql/DataFrameReader.logWarning:(Lscala/Function0;)V]
DataFrameReader.logWarning ( scala.Function0<String> msg, Throwable throwable ) : void
[mangled: org/apache/spark/sql/DataFrameReader.logWarning:(Lscala/Function0;Ljava/lang/Throwable;)V]
DataFrameReader.option ( String key, String value ) : DataFrameReader
[mangled: org/apache/spark/sql/DataFrameReader.option:(Ljava/lang/String;Ljava/lang/String;)Lorg/apache/spark/sql/DataFrameReader;]
DataFrameReader.options ( java.util.Map<String,String> options ) : DataFrameReader
[mangled: org/apache/spark/sql/DataFrameReader.options:(Ljava/util/Map;)Lorg/apache/spark/sql/DataFrameReader;]
DataFrameReader.options ( scala.collection.Map<String,String> options ) : DataFrameReader
[mangled: org/apache/spark/sql/DataFrameReader.options:(Lscala/collection/Map;)Lorg/apache/spark/sql/DataFrameReader;]
DataFrameReader.orc ( String path ) : DataFrame
[mangled: org/apache/spark/sql/DataFrameReader.orc:(Ljava/lang/String;)Lorg/apache/spark/sql/DataFrame;]
DataFrameReader.org.apache.spark.Logging..log_ ( ) : org.slf4j.Logger
[mangled: org/apache/spark/sql/DataFrameReader.org.apache.spark.Logging..log_:()Lorg/slf4j/Logger;]
DataFrameReader.org.apache.spark.Logging..log__.eq ( org.slf4j.Logger p1 ) : void
[mangled: org/apache/spark/sql/DataFrameReader.org.apache.spark.Logging..log__.eq:(Lorg/slf4j/Logger;)V]
DataFrameReader.parquet ( scala.collection.Seq<String> paths ) : DataFrame
[mangled: org/apache/spark/sql/DataFrameReader.parquet:(Lscala/collection/Seq;)Lorg/apache/spark/sql/DataFrame;]
DataFrameReader.parquet ( String... paths ) : DataFrame
[mangled: org/apache/spark/sql/DataFrameReader.parquet:([Ljava/lang/String;)Lorg/apache/spark/sql/DataFrame;]
DataFrameReader.schema ( types.StructType schema ) : DataFrameReader
[mangled: org/apache/spark/sql/DataFrameReader.schema:(Lorg/apache/spark/sql/types/StructType;)Lorg/apache/spark/sql/DataFrameReader;]
DataFrameReader.table ( String tableName ) : DataFrame
[mangled: org/apache/spark/sql/DataFrameReader.table:(Ljava/lang/String;)Lorg/apache/spark/sql/DataFrame;]
spark-sql_2.10-1.5.0.jar, DataFrameWriter.class
package org.apache.spark.sql
DataFrameWriter.DataFrameWriter ( DataFrame df )
[mangled: org/apache/spark/sql/DataFrameWriter."<init>":(Lorg/apache/spark/sql/DataFrame;)V]
DataFrameWriter.format ( String source ) : DataFrameWriter
[mangled: org/apache/spark/sql/DataFrameWriter.format:(Ljava/lang/String;)Lorg/apache/spark/sql/DataFrameWriter;]
DataFrameWriter.insertInto ( String tableName ) : void
[mangled: org/apache/spark/sql/DataFrameWriter.insertInto:(Ljava/lang/String;)V]
DataFrameWriter.jdbc ( String url, String table, java.util.Properties connectionProperties ) : void
[mangled: org/apache/spark/sql/DataFrameWriter.jdbc:(Ljava/lang/String;Ljava/lang/String;Ljava/util/Properties;)V]
DataFrameWriter.json ( String path ) : void
[mangled: org/apache/spark/sql/DataFrameWriter.json:(Ljava/lang/String;)V]
DataFrameWriter.mode ( SaveMode saveMode ) : DataFrameWriter
[mangled: org/apache/spark/sql/DataFrameWriter.mode:(Lorg/apache/spark/sql/SaveMode;)Lorg/apache/spark/sql/DataFrameWriter;]
DataFrameWriter.mode ( String saveMode ) : DataFrameWriter
[mangled: org/apache/spark/sql/DataFrameWriter.mode:(Ljava/lang/String;)Lorg/apache/spark/sql/DataFrameWriter;]
DataFrameWriter.option ( String key, String value ) : DataFrameWriter
[mangled: org/apache/spark/sql/DataFrameWriter.option:(Ljava/lang/String;Ljava/lang/String;)Lorg/apache/spark/sql/DataFrameWriter;]
DataFrameWriter.options ( java.util.Map<String,String> options ) : DataFrameWriter
[mangled: org/apache/spark/sql/DataFrameWriter.options:(Ljava/util/Map;)Lorg/apache/spark/sql/DataFrameWriter;]
DataFrameWriter.options ( scala.collection.Map<String,String> options ) : DataFrameWriter
[mangled: org/apache/spark/sql/DataFrameWriter.options:(Lscala/collection/Map;)Lorg/apache/spark/sql/DataFrameWriter;]
DataFrameWriter.orc ( String path ) : void
[mangled: org/apache/spark/sql/DataFrameWriter.orc:(Ljava/lang/String;)V]
DataFrameWriter.parquet ( String path ) : void
[mangled: org/apache/spark/sql/DataFrameWriter.parquet:(Ljava/lang/String;)V]
DataFrameWriter.partitionBy ( scala.collection.Seq<String> colNames ) : DataFrameWriter
[mangled: org/apache/spark/sql/DataFrameWriter.partitionBy:(Lscala/collection/Seq;)Lorg/apache/spark/sql/DataFrameWriter;]
DataFrameWriter.partitionBy ( String... colNames ) : DataFrameWriter
[mangled: org/apache/spark/sql/DataFrameWriter.partitionBy:([Ljava/lang/String;)Lorg/apache/spark/sql/DataFrameWriter;]
DataFrameWriter.save ( ) : void
[mangled: org/apache/spark/sql/DataFrameWriter.save:()V]
DataFrameWriter.save ( String path ) : void
[mangled: org/apache/spark/sql/DataFrameWriter.save:(Ljava/lang/String;)V]
DataFrameWriter.saveAsTable ( String tableName ) : void
[mangled: org/apache/spark/sql/DataFrameWriter.saveAsTable:(Ljava/lang/String;)V]
spark-sql_2.10-1.5.0.jar, HadoopFsRelation.class
package org.apache.spark.sql.sources
HadoopFsRelation.buildScan ( org.apache.hadoop.fs.FileStatus[ ] inputFiles ) : org.apache.spark.rdd.RDD<org.apache.spark.sql.Row>
[mangled: org/apache/spark/sql/sources/HadoopFsRelation.buildScan:([Lorg/apache/hadoop/fs/FileStatus;)Lorg/apache/spark/rdd/RDD;]
HadoopFsRelation.buildScan ( String[ ] requiredColumns, org.apache.hadoop.fs.FileStatus[ ] inputFiles ) : org.apache.spark.rdd.RDD<org.apache.spark.sql.Row>
[mangled: org/apache/spark/sql/sources/HadoopFsRelation.buildScan:([Ljava/lang/String;[Lorg/apache/hadoop/fs/FileStatus;)Lorg/apache/spark/rdd/RDD;]
HadoopFsRelation.buildScan ( String[ ] requiredColumns, Filter[ ] filters, org.apache.hadoop.fs.FileStatus[ ] inputFiles ) : org.apache.spark.rdd.RDD<org.apache.spark.sql.Row>
[mangled: org/apache/spark/sql/sources/HadoopFsRelation.buildScan:([Ljava/lang/String;[Lorg/apache/spark/sql/sources/Filter;[Lorg/apache/hadoop/fs/FileStatus;)Lorg/apache/spark/rdd/RDD;]
HadoopFsRelation.buildScan ( String[ ] requiredColumns, Filter[ ] filters, org.apache.hadoop.fs.FileStatus[ ] inputFiles, org.apache.spark.broadcast.Broadcast<org.apache.spark.util.SerializableConfiguration> broadcastedConf ) : org.apache.spark.rdd.RDD<org.apache.spark.sql.Row>
[mangled: org/apache/spark/sql/sources/HadoopFsRelation.buildScan:([Ljava/lang/String;[Lorg/apache/spark/sql/sources/Filter;[Lorg/apache/hadoop/fs/FileStatus;Lorg/apache/spark/broadcast/Broadcast;)Lorg/apache/spark/rdd/RDD;]
HadoopFsRelation.buildScan ( String[ ] requiredColumns, Filter[ ] filters, String[ ] inputPaths, org.apache.spark.broadcast.Broadcast<org.apache.spark.util.SerializableConfiguration> broadcastedConf ) : org.apache.spark.rdd.RDD<org.apache.spark.sql.Row>
[mangled: org/apache/spark/sql/sources/HadoopFsRelation.buildScan:([Ljava/lang/String;[Lorg/apache/spark/sql/sources/Filter;[Ljava/lang/String;Lorg/apache/spark/broadcast/Broadcast;)Lorg/apache/spark/rdd/RDD;]
HadoopFsRelation.cachedLeafStatuses ( ) : scala.collection.immutable.Set<org.apache.hadoop.fs.FileStatus>
[mangled: org/apache/spark/sql/sources/HadoopFsRelation.cachedLeafStatuses:()Lscala/collection/immutable/Set;]
HadoopFsRelation.dataSchema ( ) [abstract] : org.apache.spark.sql.types.StructType
[mangled: org/apache/spark/sql/sources/HadoopFsRelation.dataSchema:()Lorg/apache/spark/sql/types/StructType;]
HadoopFsRelation.HadoopFsRelation ( )
[mangled: org/apache/spark/sql/sources/HadoopFsRelation."<init>":()V]
HadoopFsRelation.HadoopFsRelation ( scala.Option<org.apache.spark.sql.execution.datasources.PartitionSpec> maybePartitionSpec )
[mangled: org/apache/spark/sql/sources/HadoopFsRelation."<init>":(Lscala/Option;)V]
HadoopFsRelation.inputFiles ( ) : String[ ]
[mangled: org/apache/spark/sql/sources/HadoopFsRelation.inputFiles:()[Ljava/lang/String;]
HadoopFsRelation.isTraceEnabled ( ) : boolean
[mangled: org/apache/spark/sql/sources/HadoopFsRelation.isTraceEnabled:()Z]
HadoopFsRelation.listLeafFiles ( org.apache.hadoop.fs.FileSystem p1, org.apache.hadoop.fs.FileStatus p2 ) [static] : org.apache.hadoop.fs.FileStatus[ ]
[mangled: org/apache/spark/sql/sources/HadoopFsRelation.listLeafFiles:(Lorg/apache/hadoop/fs/FileSystem;Lorg/apache/hadoop/fs/FileStatus;)[Lorg/apache/hadoop/fs/FileStatus;]
HadoopFsRelation.listLeafFilesInParallel ( String[ ] p1, org.apache.hadoop.conf.Configuration p2, org.apache.spark.SparkContext p3 ) [static] : scala.collection.immutable.Set<org.apache.hadoop.fs.FileStatus>
[mangled: org/apache/spark/sql/sources/HadoopFsRelation.listLeafFilesInParallel:([Ljava/lang/String;Lorg/apache/hadoop/conf/Configuration;Lorg/apache/spark/SparkContext;)Lscala/collection/immutable/Set;]
HadoopFsRelation.log ( ) : org.slf4j.Logger
[mangled: org/apache/spark/sql/sources/HadoopFsRelation.log:()Lorg/slf4j/Logger;]
HadoopFsRelation.logDebug ( scala.Function0<String> msg ) : void
[mangled: org/apache/spark/sql/sources/HadoopFsRelation.logDebug:(Lscala/Function0;)V]
HadoopFsRelation.logDebug ( scala.Function0<String> msg, Throwable throwable ) : void
[mangled: org/apache/spark/sql/sources/HadoopFsRelation.logDebug:(Lscala/Function0;Ljava/lang/Throwable;)V]
HadoopFsRelation.logError ( scala.Function0<String> msg ) : void
[mangled: org/apache/spark/sql/sources/HadoopFsRelation.logError:(Lscala/Function0;)V]
HadoopFsRelation.logError ( scala.Function0<String> msg, Throwable throwable ) : void
[mangled: org/apache/spark/sql/sources/HadoopFsRelation.logError:(Lscala/Function0;Ljava/lang/Throwable;)V]
HadoopFsRelation.logInfo ( scala.Function0<String> msg ) : void
[mangled: org/apache/spark/sql/sources/HadoopFsRelation.logInfo:(Lscala/Function0;)V]
HadoopFsRelation.logInfo ( scala.Function0<String> msg, Throwable throwable ) : void
[mangled: org/apache/spark/sql/sources/HadoopFsRelation.logInfo:(Lscala/Function0;Ljava/lang/Throwable;)V]
HadoopFsRelation.logName ( ) : String
[mangled: org/apache/spark/sql/sources/HadoopFsRelation.logName:()Ljava/lang/String;]
HadoopFsRelation.logTrace ( scala.Function0<String> msg ) : void
[mangled: org/apache/spark/sql/sources/HadoopFsRelation.logTrace:(Lscala/Function0;)V]
HadoopFsRelation.logTrace ( scala.Function0<String> msg, Throwable throwable ) : void
[mangled: org/apache/spark/sql/sources/HadoopFsRelation.logTrace:(Lscala/Function0;Ljava/lang/Throwable;)V]
HadoopFsRelation.logWarning ( scala.Function0<String> msg ) : void
[mangled: org/apache/spark/sql/sources/HadoopFsRelation.logWarning:(Lscala/Function0;)V]
HadoopFsRelation.logWarning ( scala.Function0<String> msg, Throwable throwable ) : void
[mangled: org/apache/spark/sql/sources/HadoopFsRelation.logWarning:(Lscala/Function0;Ljava/lang/Throwable;)V]
HadoopFsRelation.org.apache.spark.Logging..log_ ( ) : org.slf4j.Logger
[mangled: org/apache/spark/sql/sources/HadoopFsRelation.org.apache.spark.Logging..log_:()Lorg/slf4j/Logger;]
HadoopFsRelation.org.apache.spark.Logging..log__.eq ( org.slf4j.Logger p1 ) : void
[mangled: org/apache/spark/sql/sources/HadoopFsRelation.org.apache.spark.Logging..log__.eq:(Lorg/slf4j/Logger;)V]
HadoopFsRelation.HadoopFsRelation..discoverPartitions ( ) : org.apache.spark.sql.execution.datasources.PartitionSpec
[mangled: org/apache/spark/sql/sources/HadoopFsRelation.org.apache.spark.sql.sources.HadoopFsRelation..discoverPartitions:()Lorg/apache/spark/sql/execution/datasources/PartitionSpec;]
HadoopFsRelation.HadoopFsRelation..fileStatusCache ( ) : HadoopFsRelation.FileStatusCache
[mangled: org/apache/spark/sql/sources/HadoopFsRelation.org.apache.spark.sql.sources.HadoopFsRelation..fileStatusCache:()Lorg/apache/spark/sql/sources/HadoopFsRelation$FileStatusCache;]
HadoopFsRelation.HadoopFsRelation..hadoopConf ( ) : org.apache.hadoop.conf.Configuration
[mangled: org/apache/spark/sql/sources/HadoopFsRelation.org.apache.spark.sql.sources.HadoopFsRelation..hadoopConf:()Lorg/apache/hadoop/conf/Configuration;]
HadoopFsRelation.partitionColumns ( ) : org.apache.spark.sql.types.StructType
[mangled: org/apache/spark/sql/sources/HadoopFsRelation.partitionColumns:()Lorg/apache/spark/sql/types/StructType;]
HadoopFsRelation.partitionSpec ( ) : org.apache.spark.sql.execution.datasources.PartitionSpec
[mangled: org/apache/spark/sql/sources/HadoopFsRelation.partitionSpec:()Lorg/apache/spark/sql/execution/datasources/PartitionSpec;]
HadoopFsRelation.paths ( ) [abstract] : String[ ]
[mangled: org/apache/spark/sql/sources/HadoopFsRelation.paths:()[Ljava/lang/String;]
HadoopFsRelation.prepareJobForWrite ( org.apache.hadoop.mapreduce.Job p1 ) [abstract] : OutputWriterFactory
[mangled: org/apache/spark/sql/sources/HadoopFsRelation.prepareJobForWrite:(Lorg/apache/hadoop/mapreduce/Job;)Lorg/apache/spark/sql/sources/OutputWriterFactory;]
HadoopFsRelation.refresh ( ) : void
[mangled: org/apache/spark/sql/sources/HadoopFsRelation.refresh:()V]
HadoopFsRelation.schema ( ) : org.apache.spark.sql.types.StructType
[mangled: org/apache/spark/sql/sources/HadoopFsRelation.schema:()Lorg/apache/spark/sql/types/StructType;]
HadoopFsRelation.sizeInBytes ( ) : long
[mangled: org/apache/spark/sql/sources/HadoopFsRelation.sizeInBytes:()J]
HadoopFsRelation.toString ( ) : String
[mangled: org/apache/spark/sql/sources/HadoopFsRelation.toString:()Ljava/lang/String;]
HadoopFsRelation.userDefinedPartitionColumns ( ) : scala.Option<org.apache.spark.sql.types.StructType>
[mangled: org/apache/spark/sql/sources/HadoopFsRelation.userDefinedPartitionColumns:()Lscala/Option;]
spark-sql_2.10-1.5.0.jar, HadoopFsRelationProvider.class
package org.apache.spark.sql.sources
HadoopFsRelationProvider.createRelation ( org.apache.spark.sql.SQLContext p1, String[ ] p2, scala.Option<org.apache.spark.sql.types.StructType> p3, scala.Option<org.apache.spark.sql.types.StructType> p4, scala.collection.immutable.Map<String,String> p5 ) [abstract] : HadoopFsRelation
[mangled: org/apache/spark/sql/sources/HadoopFsRelationProvider.createRelation:(Lorg/apache/spark/sql/SQLContext;[Ljava/lang/String;Lscala/Option;Lscala/Option;Lscala/collection/immutable/Map;)Lorg/apache/spark/sql/sources/HadoopFsRelation;]
spark-sql_2.10-1.5.0.jar, OutputWriter.class
package org.apache.spark.sql.sources
OutputWriter.close ( ) [abstract] : void
[mangled: org/apache/spark/sql/sources/OutputWriter.close:()V]
OutputWriter.initConverter ( org.apache.spark.sql.types.StructType dataSchema ) : void
[mangled: org/apache/spark/sql/sources/OutputWriter.initConverter:(Lorg/apache/spark/sql/types/StructType;)V]
OutputWriter.OutputWriter ( )
[mangled: org/apache/spark/sql/sources/OutputWriter."<init>":()V]
OutputWriter.write ( org.apache.spark.sql.Row p1 ) [abstract] : void
[mangled: org/apache/spark/sql/sources/OutputWriter.write:(Lorg/apache/spark/sql/Row;)V]
OutputWriter.writeInternal ( org.apache.spark.sql.catalyst.InternalRow row ) : void
[mangled: org/apache/spark/sql/sources/OutputWriter.writeInternal:(Lorg/apache/spark/sql/catalyst/InternalRow;)V]
spark-sql_2.10-1.5.0.jar, OutputWriterFactory.class
package org.apache.spark.sql.sources
OutputWriterFactory.newInstance ( String p1, org.apache.spark.sql.types.StructType p2, org.apache.hadoop.mapreduce.TaskAttemptContext p3 ) [abstract] : OutputWriter
[mangled: org/apache/spark/sql/sources/OutputWriterFactory.newInstance:(Ljava/lang/String;Lorg/apache/spark/sql/types/StructType;Lorg/apache/hadoop/mapreduce/TaskAttemptContext;)Lorg/apache/spark/sql/sources/OutputWriter;]
OutputWriterFactory.OutputWriterFactory ( )
[mangled: org/apache/spark/sql/sources/OutputWriterFactory."<init>":()V]
spark-sql_2.10-1.5.0.jar, SparkPlan.class
package org.apache.spark.sql.execution
SparkPlan.canProcessSafeRows ( ) : boolean
[mangled: org/apache/spark/sql/execution/SparkPlan.canProcessSafeRows:()Z]
SparkPlan.canProcessUnsafeRows ( ) : boolean
[mangled: org/apache/spark/sql/execution/SparkPlan.canProcessUnsafeRows:()Z]
SparkPlan.doExecute ( ) [abstract] : org.apache.spark.rdd.RDD<org.apache.spark.sql.catalyst.InternalRow>
[mangled: org/apache/spark/sql/execution/SparkPlan.doExecute:()Lorg/apache/spark/rdd/RDD;]
SparkPlan.doPrepare ( ) : void
[mangled: org/apache/spark/sql/execution/SparkPlan.doPrepare:()V]
SparkPlan.longMetric ( String name ) : metric.LongSQLMetric
[mangled: org/apache/spark/sql/execution/SparkPlan.longMetric:(Ljava/lang/String;)Lorg/apache/spark/sql/execution/metric/LongSQLMetric;]
SparkPlan.metrics ( ) : scala.collection.immutable.Map<String,metric.SQLMetric<?,?>>
[mangled: org/apache/spark/sql/execution/SparkPlan.metrics:()Lscala/collection/immutable/Map;]
SparkPlan.newNaturalAscendingOrdering ( scala.collection.Seq<org.apache.spark.sql.types.DataType> dataTypes ) : scala.math.Ordering<org.apache.spark.sql.catalyst.InternalRow>
[mangled: org/apache/spark/sql/execution/SparkPlan.newNaturalAscendingOrdering:(Lscala/collection/Seq;)Lscala/math/Ordering;]
SparkPlan.outputOrdering ( ) : scala.collection.Seq<org.apache.spark.sql.catalyst.expressions.SortOrder>
[mangled: org/apache/spark/sql/execution/SparkPlan.outputOrdering:()Lscala/collection/Seq;]
SparkPlan.outputsUnsafeRows ( ) : boolean
[mangled: org/apache/spark/sql/execution/SparkPlan.outputsUnsafeRows:()Z]
SparkPlan.prepare ( ) : void
[mangled: org/apache/spark/sql/execution/SparkPlan.prepare:()V]
SparkPlan.requiredChildOrdering ( ) : scala.collection.Seq<scala.collection.Seq<org.apache.spark.sql.catalyst.expressions.SortOrder>>
[mangled: org/apache/spark/sql/execution/SparkPlan.requiredChildOrdering:()Lscala/collection/Seq;]
SparkPlan.unsafeEnabled ( ) : boolean
[mangled: org/apache/spark/sql/execution/SparkPlan.unsafeEnabled:()Z]
spark-sql_2.10-1.5.0.jar, SQLContext.class
package org.apache.spark.sql
SQLContext.cacheManager ( ) : execution.CacheManager
[mangled: org/apache/spark/sql/SQLContext.cacheManager:()Lorg/apache/spark/sql/execution/CacheManager;]
SQLContext.createDataFrame ( org.apache.spark.rdd.RDD<Row> rowRDD, types.StructType schema, boolean needsConversion ) : DataFrame
[mangled: org/apache/spark/sql/SQLContext.createDataFrame:(Lorg/apache/spark/rdd/RDD;Lorg/apache/spark/sql/types/StructType;Z)Lorg/apache/spark/sql/DataFrame;]
SQLContext.createSession ( ) : SQLContext.SQLSession
[mangled: org/apache/spark/sql/SQLContext.createSession:()Lorg/apache/spark/sql/SQLContext$SQLSession;]
SQLContext.currentSession ( ) : SQLContext.SQLSession
[mangled: org/apache/spark/sql/SQLContext.currentSession:()Lorg/apache/spark/sql/SQLContext$SQLSession;]
SQLContext.ddlParser ( ) : execution.datasources.DDLParser
[mangled: org/apache/spark/sql/SQLContext.ddlParser:()Lorg/apache/spark/sql/execution/datasources/DDLParser;]
SQLContext.defaultSession ( ) : SQLContext.SQLSession
[mangled: org/apache/spark/sql/SQLContext.defaultSession:()Lorg/apache/spark/sql/SQLContext$SQLSession;]
SQLContext.detachSession ( ) : void
[mangled: org/apache/spark/sql/SQLContext.detachSession:()V]
SQLContext.dialectClassName ( ) : String
[mangled: org/apache/spark/sql/SQLContext.dialectClassName:()Ljava/lang/String;]
SQLContext.getConf ( SQLConf.SQLConfEntry<T> entry ) : T
[mangled: org/apache/spark/sql/SQLContext.getConf:(Lorg/apache/spark/sql/SQLConf$SQLConfEntry;)Ljava/lang/Object;]
SQLContext.getConf ( SQLConf.SQLConfEntry<T> entry, T defaultValue ) : T
[mangled: org/apache/spark/sql/SQLContext.getConf:(Lorg/apache/spark/sql/SQLConf$SQLConfEntry;Ljava/lang/Object;)Ljava/lang/Object;]
SQLContext.getOrCreate ( org.apache.spark.SparkContext p1 ) [static] : SQLContext
[mangled: org/apache/spark/sql/SQLContext.getOrCreate:(Lorg/apache/spark/SparkContext;)Lorg/apache/spark/sql/SQLContext;]
SQLContext.getSQLDialect ( ) : catalyst.ParserDialect
[mangled: org/apache/spark/sql/SQLContext.getSQLDialect:()Lorg/apache/spark/sql/catalyst/ParserDialect;]
SQLContext.internalCreateDataFrame ( org.apache.spark.rdd.RDD<catalyst.InternalRow> catalystRows, types.StructType schema ) : DataFrame
[mangled: org/apache/spark/sql/SQLContext.internalCreateDataFrame:(Lorg/apache/spark/rdd/RDD;Lorg/apache/spark/sql/types/StructType;)Lorg/apache/spark/sql/DataFrame;]
SQLContext.listener ( ) : execution.ui.SQLListener
[mangled: org/apache/spark/sql/SQLContext.listener:()Lorg/apache/spark/sql/execution/ui/SQLListener;]
SQLContext.openSession ( ) : SQLContext.SQLSession
[mangled: org/apache/spark/sql/SQLContext.openSession:()Lorg/apache/spark/sql/SQLContext$SQLSession;]
SQLContext.range ( long end ) : DataFrame
[mangled: org/apache/spark/sql/SQLContext.range:(J)Lorg/apache/spark/sql/DataFrame;]
SQLContext.range ( long start, long end ) : DataFrame
[mangled: org/apache/spark/sql/SQLContext.range:(JJ)Lorg/apache/spark/sql/DataFrame;]
SQLContext.range ( long start, long end, long step, int numPartitions ) : DataFrame
[mangled: org/apache/spark/sql/SQLContext.range:(JJJI)Lorg/apache/spark/sql/DataFrame;]
SQLContext.read ( ) : DataFrameReader
[mangled: org/apache/spark/sql/SQLContext.read:()Lorg/apache/spark/sql/DataFrameReader;]
SQLContext.setConf ( SQLConf.SQLConfEntry<T> entry, T value ) : void
[mangled: org/apache/spark/sql/SQLContext.setConf:(Lorg/apache/spark/sql/SQLConf$SQLConfEntry;Ljava/lang/Object;)V]
SQLContext.setSession ( SQLContext.SQLSession session ) : void
[mangled: org/apache/spark/sql/SQLContext.setSession:(Lorg/apache/spark/sql/SQLContext$SQLSession;)V]
SQLContext.tlSession ( ) : ThreadLocal<SQLContext.SQLSession>
[mangled: org/apache/spark/sql/SQLContext.tlSession:()Ljava/lang/ThreadLocal;]
to the top
Problems with Data Types, High Severity (10)
spark-sql_2.10-1.5.0.jar
package org.apache.spark.sql
[+] Column (1)
| Change | Effect |
---|
1 | Removed super-interface org.apache.spark.Logging. | A client program may be interrupted by NoSuchMethodError exception. |
[+] affected methods (9)
agg ( Column, Column... )1st parameter 'expr' of this method has type 'Column'.
agg ( Column, scala.collection.Seq<Column> )1st parameter 'expr' of this method has type 'Column'.
apply ( java.lang.String )Return value of this method has type 'Column'.
col ( java.lang.String )Return value of this method has type 'Column'.
filter ( Column )1st parameter 'condition' of this method has type 'Column'.
join ( DataFrame, Column )2nd parameter 'joinExprs' of this method has type 'Column'.
join ( DataFrame, Column, java.lang.String )2nd parameter 'joinExprs' of this method has type 'Column'.
where ( Column )1st parameter 'condition' of this method has type 'Column'.
withColumn ( java.lang.String, Column )2nd parameter 'col' of this method has type 'Column'.
[+] DataFrameReader (1)
| Change | Effect |
---|
1 | This class has been removed. | A client program may be interrupted by NoClassDefFoundError exception. |
[+] affected methods (33)
DataFrameReader ( SQLContext )This constructor is from 'DataFrameReader' class.
format ( java.lang.String )This method is from 'DataFrameReader' class.
isTraceEnabled ( )This method is from 'DataFrameReader' class.
jdbc ( java.lang.String, java.lang.String, java.lang.String, long, long, int, java.util.Properties )This method is from 'DataFrameReader' class.
jdbc ( java.lang.String, java.lang.String, java.lang.String[ ], java.util.Properties )This method is from 'DataFrameReader' class.
jdbc ( java.lang.String, java.lang.String, java.util.Properties )This method is from 'DataFrameReader' class.
json ( java.lang.String )This method is from 'DataFrameReader' class.
json ( org.apache.spark.api.java.JavaRDD<java.lang.String> )This method is from 'DataFrameReader' class.
json ( org.apache.spark.rdd.RDD<java.lang.String> )This method is from 'DataFrameReader' class.
load ( )This method is from 'DataFrameReader' class.
load ( java.lang.String )This method is from 'DataFrameReader' class.
log ( )This method is from 'DataFrameReader' class.
logDebug ( scala.Function0<java.lang.String> )This method is from 'DataFrameReader' class.
logDebug ( scala.Function0<java.lang.String>, java.lang.Throwable )This method is from 'DataFrameReader' class.
logError ( scala.Function0<java.lang.String> )This method is from 'DataFrameReader' class.
logError ( scala.Function0<java.lang.String>, java.lang.Throwable )This method is from 'DataFrameReader' class.
logInfo ( scala.Function0<java.lang.String> )This method is from 'DataFrameReader' class.
logInfo ( scala.Function0<java.lang.String>, java.lang.Throwable )This method is from 'DataFrameReader' class.
logName ( )This method is from 'DataFrameReader' class.
logTrace ( scala.Function0<java.lang.String> )This method is from 'DataFrameReader' class.
logTrace ( scala.Function0<java.lang.String>, java.lang.Throwable )This method is from 'DataFrameReader' class.
logWarning ( scala.Function0<java.lang.String> )This method is from 'DataFrameReader' class.
logWarning ( scala.Function0<java.lang.String>, java.lang.Throwable )This method is from 'DataFrameReader' class.
option ( java.lang.String, java.lang.String )This method is from 'DataFrameReader' class.
options ( java.util.Map<java.lang.String,java.lang.String> )This method is from 'DataFrameReader' class.
options ( scala.collection.Map<java.lang.String,java.lang.String> )This method is from 'DataFrameReader' class.
orc ( java.lang.String )This method is from 'DataFrameReader' class.
org.apache.spark.Logging..log_ ( )This method is from 'DataFrameReader' class.
org.apache.spark.Logging..log__.eq ( org.slf4j.Logger )This method is from 'DataFrameReader' class.
parquet ( java.lang.String... )This method is from 'DataFrameReader' class.
parquet ( scala.collection.Seq<java.lang.String> )This method is from 'DataFrameReader' class.
schema ( types.StructType )This method is from 'DataFrameReader' class.
table ( java.lang.String )This method is from 'DataFrameReader' class.
[+] DataFrameWriter (1)
| Change | Effect |
---|
1 | This class has been removed. | A client program may be interrupted by NoClassDefFoundError exception. |
[+] affected methods (17)
DataFrameWriter ( DataFrame )This constructor is from 'DataFrameWriter' class.
format ( java.lang.String )This method is from 'DataFrameWriter' class.
insertInto ( java.lang.String )This method is from 'DataFrameWriter' class.
jdbc ( java.lang.String, java.lang.String, java.util.Properties )This method is from 'DataFrameWriter' class.
json ( java.lang.String )This method is from 'DataFrameWriter' class.
mode ( java.lang.String )This method is from 'DataFrameWriter' class.
mode ( SaveMode )This method is from 'DataFrameWriter' class.
option ( java.lang.String, java.lang.String )This method is from 'DataFrameWriter' class.
options ( java.util.Map<java.lang.String,java.lang.String> )This method is from 'DataFrameWriter' class.
options ( scala.collection.Map<java.lang.String,java.lang.String> )This method is from 'DataFrameWriter' class.
orc ( java.lang.String )This method is from 'DataFrameWriter' class.
parquet ( java.lang.String )This method is from 'DataFrameWriter' class.
partitionBy ( java.lang.String... )This method is from 'DataFrameWriter' class.
partitionBy ( scala.collection.Seq<java.lang.String> )This method is from 'DataFrameWriter' class.
save ( )This method is from 'DataFrameWriter' class.
save ( java.lang.String )This method is from 'DataFrameWriter' class.
saveAsTable ( java.lang.String )This method is from 'DataFrameWriter' class.
[+] SQLConf (1)
| Change | Effect |
---|
1 | Removed super-interface catalyst.CatalystConf. | A client program may be interrupted by NoSuchMethodError exception. |
[+] affected methods (1)
conf ( )Return value of this method has type 'SQLConf'.
package org.apache.spark.sql.execution
[+] SparkPlan (2)
| Change | Effect |
---|
1 | Method execute ( ) became abstract. | A client program may be interrupted by InstantiationError exception. |
2 | Abstract method doExecute ( ) has been removed from this class. | A client program may be interrupted by NoSuchMethodError exception. |
[+] affected methods (30)
codegenEnabled ( )This method is from 'SparkPlan' abstract class.
execute ( )This method is from 'SparkPlan' abstract class.
executeCollect ( )This method is from 'SparkPlan' abstract class.
executeTake ( int )This method is from 'SparkPlan' abstract class.
isTraceEnabled ( )This method is from 'SparkPlan' abstract class.
log ( )This method is from 'SparkPlan' abstract class.
logDebug ( scala.Function0<java.lang.String> )This method is from 'SparkPlan' abstract class.
logDebug ( scala.Function0<java.lang.String>, java.lang.Throwable )This method is from 'SparkPlan' abstract class.
logError ( scala.Function0<java.lang.String> )This method is from 'SparkPlan' abstract class.
logError ( scala.Function0<java.lang.String>, java.lang.Throwable )This method is from 'SparkPlan' abstract class.
logInfo ( scala.Function0<java.lang.String> )This method is from 'SparkPlan' abstract class.
logInfo ( scala.Function0<java.lang.String>, java.lang.Throwable )This method is from 'SparkPlan' abstract class.
logName ( )This method is from 'SparkPlan' abstract class.
logTrace ( scala.Function0<java.lang.String> )This method is from 'SparkPlan' abstract class.
logTrace ( scala.Function0<java.lang.String>, java.lang.Throwable )This method is from 'SparkPlan' abstract class.
logWarning ( scala.Function0<java.lang.String> )This method is from 'SparkPlan' abstract class.
logWarning ( scala.Function0<java.lang.String>, java.lang.Throwable )This method is from 'SparkPlan' abstract class.
makeCopy ( java.lang.Object[ ] )This method is from 'SparkPlan' abstract class.
makeCopy ( java.lang.Object[ ] )Return value of this method has type 'SparkPlan'.
newMutableProjection ( scala.collection.Seq<org.apache.spark.sql.catalyst.expressions.Expression>, scala.collection.Seq<org.apache.spark.sql.catalyst.expressions.Attribute> )This method is from 'SparkPlan' abstract class.
newOrdering ( scala.collection.Seq<org.apache.spark.sql.catalyst.expressions.SortOrder>, scala.collection.Seq<org.apache.spark.sql.catalyst.expressions.Attribute> )This method is from 'SparkPlan' abstract class.
newPredicate ( org.apache.spark.sql.catalyst.expressions.Expression, scala.collection.Seq<org.apache.spark.sql.catalyst.expressions.Attribute> )This method is from 'SparkPlan' abstract class.
newProjection ( scala.collection.Seq<org.apache.spark.sql.catalyst.expressions.Expression>, scala.collection.Seq<org.apache.spark.sql.catalyst.expressions.Attribute> )This method is from 'SparkPlan' abstract class.
org.apache.spark.Logging..log_ ( )This method is from 'SparkPlan' abstract class.
org.apache.spark.Logging..log__.eq ( org.slf4j.Logger )This method is from 'SparkPlan' abstract class.
outputPartitioning ( )This method is from 'SparkPlan' abstract class.
requiredChildDistribution ( )This method is from 'SparkPlan' abstract class.
sparkContext ( )This method is from 'SparkPlan' abstract class.
SparkPlan ( )This constructor is from 'SparkPlan' abstract class.
sqlContext ( )This method is from 'SparkPlan' abstract class.
package org.apache.spark.sql.sources
[+] HadoopFsRelation (1)
| Change | Effect |
---|
1 | This class has been removed. | A client program may be interrupted by NoClassDefFoundError exception. |
[+] affected methods (39)
buildScan ( java.lang.String[ ], org.apache.hadoop.fs.FileStatus[ ] )This method is from 'HadoopFsRelation' abstract class.
buildScan ( java.lang.String[ ], Filter[ ], java.lang.String[ ], org.apache.spark.broadcast.Broadcast<org.apache.spark.util.SerializableConfiguration> )This method is from 'HadoopFsRelation' abstract class.
buildScan ( java.lang.String[ ], Filter[ ], org.apache.hadoop.fs.FileStatus[ ] )This method is from 'HadoopFsRelation' abstract class.
buildScan ( java.lang.String[ ], Filter[ ], org.apache.hadoop.fs.FileStatus[ ], org.apache.spark.broadcast.Broadcast<org.apache.spark.util.SerializableConfiguration> )This method is from 'HadoopFsRelation' abstract class.
buildScan ( org.apache.hadoop.fs.FileStatus[ ] )This method is from 'HadoopFsRelation' abstract class.
cachedLeafStatuses ( )This method is from 'HadoopFsRelation' abstract class.
dataSchema ( )This abstract method is from 'HadoopFsRelation' abstract class.
HadoopFsRelation ( )This constructor is from 'HadoopFsRelation' abstract class.
HadoopFsRelation ( scala.Option<org.apache.spark.sql.execution.datasources.PartitionSpec> )This constructor is from 'HadoopFsRelation' abstract class.
inputFiles ( )This method is from 'HadoopFsRelation' abstract class.
isTraceEnabled ( )This method is from 'HadoopFsRelation' abstract class.
listLeafFiles ( org.apache.hadoop.fs.FileSystem, org.apache.hadoop.fs.FileStatus )This method is from 'HadoopFsRelation' abstract class.
listLeafFilesInParallel ( java.lang.String[ ], org.apache.hadoop.conf.Configuration, org.apache.spark.SparkContext )This method is from 'HadoopFsRelation' abstract class.
log ( )This method is from 'HadoopFsRelation' abstract class.
logDebug ( scala.Function0<java.lang.String> )This method is from 'HadoopFsRelation' abstract class.
logDebug ( scala.Function0<java.lang.String>, java.lang.Throwable )This method is from 'HadoopFsRelation' abstract class.
logError ( scala.Function0<java.lang.String> )This method is from 'HadoopFsRelation' abstract class.
logError ( scala.Function0<java.lang.String>, java.lang.Throwable )This method is from 'HadoopFsRelation' abstract class.
logInfo ( scala.Function0<java.lang.String> )This method is from 'HadoopFsRelation' abstract class.
logInfo ( scala.Function0<java.lang.String>, java.lang.Throwable )This method is from 'HadoopFsRelation' abstract class.
logName ( )This method is from 'HadoopFsRelation' abstract class.
logTrace ( scala.Function0<java.lang.String> )This method is from 'HadoopFsRelation' abstract class.
logTrace ( scala.Function0<java.lang.String>, java.lang.Throwable )This method is from 'HadoopFsRelation' abstract class.
logWarning ( scala.Function0<java.lang.String> )This method is from 'HadoopFsRelation' abstract class.
logWarning ( scala.Function0<java.lang.String>, java.lang.Throwable )This method is from 'HadoopFsRelation' abstract class.
org.apache.spark.Logging..log_ ( )This method is from 'HadoopFsRelation' abstract class.
org.apache.spark.Logging..log__.eq ( org.slf4j.Logger )This method is from 'HadoopFsRelation' abstract class.
HadoopFsRelation..discoverPartitions ( )This method is from 'HadoopFsRelation' abstract class.
HadoopFsRelation..fileStatusCache ( )This method is from 'HadoopFsRelation' abstract class.
HadoopFsRelation..hadoopConf ( )This method is from 'HadoopFsRelation' abstract class.
partitionColumns ( )This method is from 'HadoopFsRelation' abstract class.
partitionSpec ( )This method is from 'HadoopFsRelation' abstract class.
paths ( )This abstract method is from 'HadoopFsRelation' abstract class.
prepareJobForWrite ( org.apache.hadoop.mapreduce.Job )This abstract method is from 'HadoopFsRelation' abstract class.
refresh ( )This method is from 'HadoopFsRelation' abstract class.
schema ( )This method is from 'HadoopFsRelation' abstract class.
sizeInBytes ( )This method is from 'HadoopFsRelation' abstract class.
toString ( )This method is from 'HadoopFsRelation' abstract class.
userDefinedPartitionColumns ( )This method is from 'HadoopFsRelation' abstract class.
[+] HadoopFsRelationProvider (1)
| Change | Effect |
---|
1 | This interface has been removed. | A client program may be interrupted by NoClassDefFoundError exception. |
[+] affected methods (1)
createRelation ( org.apache.spark.sql.SQLContext, java.lang.String[ ], scala.Option<org.apache.spark.sql.types.StructType>, scala.Option<org.apache.spark.sql.types.StructType>, scala.collection.immutable.Map<java.lang.String,java.lang.String> )This abstract method is from 'HadoopFsRelationProvider' interface.
[+] OutputWriter (1)
| Change | Effect |
---|
1 | This class has been removed. | A client program may be interrupted by NoClassDefFoundError exception. |
[+] affected methods (5)
close ( )This abstract method is from 'OutputWriter' abstract class.
initConverter ( org.apache.spark.sql.types.StructType )This method is from 'OutputWriter' abstract class.
OutputWriter ( )This constructor is from 'OutputWriter' abstract class.
write ( org.apache.spark.sql.Row )This abstract method is from 'OutputWriter' abstract class.
writeInternal ( org.apache.spark.sql.catalyst.InternalRow )This method is from 'OutputWriter' abstract class.
[+] OutputWriterFactory (1)
| Change | Effect |
---|
1 | This class has been removed. | A client program may be interrupted by NoClassDefFoundError exception. |
[+] affected methods (2)
newInstance ( java.lang.String, org.apache.spark.sql.types.StructType, org.apache.hadoop.mapreduce.TaskAttemptContext )This abstract method is from 'OutputWriterFactory' abstract class.
OutputWriterFactory ( )This constructor is from 'OutputWriterFactory' abstract class.
to the top
Problems with Methods, High Severity (1)
spark-sql_2.10-1.5.0.jar, SparkPlan
package org.apache.spark.sql.execution
[+] SparkPlan.execute ( ) : org.apache.spark.rdd.RDD<org.apache.spark.sql.catalyst.InternalRow> (1)
[mangled: org/apache/spark/sql/execution/SparkPlan.execute:()Lorg/apache/spark/rdd/RDD;]
| Change | Effect |
---|
1 | Method became abstract.
| A client program trying to create an instance of the method's class may be interrupted by InstantiationError exception. |
to the top
Problems with Data Types, Medium Severity (1)
spark-sql_2.10-1.5.0.jar
package org.apache.spark.sql
[+] SQLContext.implicits. (1)
| Change | Effect |
---|
1 | Removed super-class SQLImplicits. | Access of a client program to the fields or methods of the old super-class may be interrupted by NoSuchFieldError or NoSuchMethodError exceptions. |
[+] affected methods (1)
implicits ( )Return value of this method has type 'SQLContext.implicits.'.
to the top
Problems with Data Types, Low Severity (1)
spark-sql_2.10-1.5.0.jar
package org.apache.spark.sql
[+] DataFrame (1)
| Change | Effect |
---|
1 | Added super-class java.lang.Object. | A static field from a super-interface of a client class may hide a field (with the same name) inherited from new super-class and cause IncompatibleClassChangeError exception. |
[+] affected methods (110)
agg ( java.util.Map<java.lang.String,java.lang.String> )Return value of this method has type 'DataFrame'.
agg ( Column, Column... )Return value of this method has type 'DataFrame'.
agg ( Column, scala.collection.Seq<Column> )Return value of this method has type 'DataFrame'.
agg ( scala.collection.immutable.Map<java.lang.String,java.lang.String> )Return value of this method has type 'DataFrame'.
agg ( scala.Tuple2<java.lang.String,java.lang.String>, scala.collection.Seq<scala.Tuple2<java.lang.String,java.lang.String>> )Return value of this method has type 'DataFrame'.
apply ( java.lang.String )This method is from 'DataFrame' class.
as ( java.lang.String )Return value of this method has type 'DataFrame'.
as ( scala.Symbol )Return value of this method has type 'DataFrame'.
cache ( )Return value of this method has type 'DataFrame'.
col ( java.lang.String )This method is from 'DataFrame' class.
collect ( )This method is from 'DataFrame' class.
collectAsList ( )This method is from 'DataFrame' class.
columns ( )This method is from 'DataFrame' class.
count ( )This method is from 'DataFrame' class.
DataFrame ( SQLContext, catalyst.plans.logical.LogicalPlan )This constructor is from 'DataFrame' class.
DataFrame ( SQLContext, SQLContext.QueryExecution )This constructor is from 'DataFrame' class.
distinct ( )Return value of this method has type 'DataFrame'.
dtypes ( )This method is from 'DataFrame' class.
except ( DataFrame )1st parameter 'other' of this method has type 'DataFrame'.
explain ( )This method is from 'DataFrame' class.
explain ( boolean )This method is from 'DataFrame' class.
explode ( java.lang.String, java.lang.String, scala.Function1<A,scala.collection.TraversableOnce<B>>, scala.reflect.api.TypeTags.TypeTag<B> )Return value of this method has type 'DataFrame'.
explode ( scala.collection.Seq<Column>, scala.Function1<Row,scala.collection.TraversableOnce<A>>, scala.reflect.api.TypeTags.TypeTag<A> )Return value of this method has type 'DataFrame'.
filter ( java.lang.String )Return value of this method has type 'DataFrame'.
filter ( Column )Return value of this method has type 'DataFrame'.
first ( )This method is from 'DataFrame' class.
flatMap ( scala.Function1<Row,scala.collection.TraversableOnce<R>>, scala.reflect.ClassTag<R> )This method is from 'DataFrame' class.
foreach ( scala.Function1<Row,scala.runtime.BoxedUnit> )This method is from 'DataFrame' class.
foreachPartition ( scala.Function1<scala.collection.Iterator<Row>,scala.runtime.BoxedUnit> )This method is from 'DataFrame' class.
groupBy ( java.lang.String, java.lang.String... )This method is from 'DataFrame' class.
groupBy ( java.lang.String, scala.collection.Seq<java.lang.String> )This method is from 'DataFrame' class.
groupBy ( Column... )This method is from 'DataFrame' class.
groupBy ( scala.collection.Seq<Column> )This method is from 'DataFrame' class.
head ( )This method is from 'DataFrame' class.
head ( int )This method is from 'DataFrame' class.
intersect ( DataFrame )1st parameter 'other' of this method has type 'DataFrame'.
isLocal ( )This method is from 'DataFrame' class.
javaRDD ( )This method is from 'DataFrame' class.
javaToPython ( )This method is from 'DataFrame' class.
join ( DataFrame )1st parameter 'right' of this method has type 'DataFrame'.
join ( DataFrame, Column )1st parameter 'right' of this method has type 'DataFrame'.
join ( DataFrame, Column, java.lang.String )1st parameter 'right' of this method has type 'DataFrame'.
limit ( int )Return value of this method has type 'DataFrame'.
logicalPlan ( )This method is from 'DataFrame' class.
map ( scala.Function1<Row,R>, scala.reflect.ClassTag<R> )This method is from 'DataFrame' class.
mapPartitions ( scala.Function1<scala.collection.Iterator<Row>,scala.collection.Iterator<R>>, scala.reflect.ClassTag<R> )This method is from 'DataFrame' class.
numericColumns ( )This method is from 'DataFrame' class.
orderBy ( java.lang.String, java.lang.String... )Return value of this method has type 'DataFrame'.
orderBy ( java.lang.String, scala.collection.Seq<java.lang.String> )Return value of this method has type 'DataFrame'.
orderBy ( Column... )Return value of this method has type 'DataFrame'.
orderBy ( scala.collection.Seq<Column> )Return value of this method has type 'DataFrame'.
persist ( )Return value of this method has type 'DataFrame'.
persist ( org.apache.spark.storage.StorageLevel )Return value of this method has type 'DataFrame'.
printSchema ( )This method is from 'DataFrame' class.
queryExecution ( )This method is from 'DataFrame' class.
rdd ( )This method is from 'DataFrame' class.
registerTempTable ( java.lang.String )This method is from 'DataFrame' class.
repartition ( int )Return value of this method has type 'DataFrame'.
resolve ( java.lang.String )This method is from 'DataFrame' class.
sample ( boolean, double )Return value of this method has type 'DataFrame'.
sample ( boolean, double, long )Return value of this method has type 'DataFrame'.
schema ( )This method is from 'DataFrame' class.
select ( java.lang.String, java.lang.String... )Return value of this method has type 'DataFrame'.
select ( java.lang.String, scala.collection.Seq<java.lang.String> )Return value of this method has type 'DataFrame'.
select ( Column... )Return value of this method has type 'DataFrame'.
select ( scala.collection.Seq<Column> )Return value of this method has type 'DataFrame'.
selectExpr ( java.lang.String... )Return value of this method has type 'DataFrame'.
selectExpr ( scala.collection.Seq<java.lang.String> )Return value of this method has type 'DataFrame'.
show ( )This method is from 'DataFrame' class.
show ( int )This method is from 'DataFrame' class.
sort ( java.lang.String, java.lang.String... )Return value of this method has type 'DataFrame'.
sort ( java.lang.String, scala.collection.Seq<java.lang.String> )Return value of this method has type 'DataFrame'.
sort ( Column... )Return value of this method has type 'DataFrame'.
sort ( scala.collection.Seq<Column> )Return value of this method has type 'DataFrame'.
sqlContext ( )This method is from 'DataFrame' class.
take ( int )This method is from 'DataFrame' class.
toDF ( )Return value of this method has type 'DataFrame'.
toDF ( java.lang.String... )Return value of this method has type 'DataFrame'.
toDF ( scala.collection.Seq<java.lang.String> )Return value of this method has type 'DataFrame'.
toJavaRDD ( )This method is from 'DataFrame' class.
toJSON ( )This method is from 'DataFrame' class.
toString ( )This method is from 'DataFrame' class.
unionAll ( DataFrame )1st parameter 'other' of this method has type 'DataFrame'.
unpersist ( )Return value of this method has type 'DataFrame'.
unpersist ( boolean )Return value of this method has type 'DataFrame'.
where ( Column )Return value of this method has type 'DataFrame'.
withColumn ( java.lang.String, Column )Return value of this method has type 'DataFrame'.
withColumnRenamed ( java.lang.String, java.lang.String )Return value of this method has type 'DataFrame'.
applySchemaToPythonRDD ( org.apache.spark.rdd.RDD<java.lang.Object[ ]>, java.lang.String )Return value of this method has type 'DataFrame'.
applySchemaToPythonRDD ( org.apache.spark.rdd.RDD<java.lang.Object[ ]>, types.StructType )Return value of this method has type 'DataFrame'.
baseRelationToDataFrame ( sources.BaseRelation )Return value of this method has type 'DataFrame'.
createDataFrame ( org.apache.spark.api.java.JavaRDD<?>, java.lang.Class<?> )Return value of this method has type 'DataFrame'.
createDataFrame ( org.apache.spark.api.java.JavaRDD<Row>, types.StructType )Return value of this method has type 'DataFrame'.
createDataFrame ( org.apache.spark.rdd.RDD<?>, java.lang.Class<?> )Return value of this method has type 'DataFrame'.
createDataFrame ( org.apache.spark.rdd.RDD<A>, scala.reflect.api.TypeTags.TypeTag<A> )Return value of this method has type 'DataFrame'.
createDataFrame ( org.apache.spark.rdd.RDD<Row>, types.StructType )Return value of this method has type 'DataFrame'.
createDataFrame ( scala.collection.Seq<A>, scala.reflect.api.TypeTags.TypeTag<A> )Return value of this method has type 'DataFrame'.
createExternalTable ( java.lang.String, java.lang.String )Return value of this method has type 'DataFrame'.
createExternalTable ( java.lang.String, java.lang.String, java.lang.String )Return value of this method has type 'DataFrame'.
createExternalTable ( java.lang.String, java.lang.String, java.util.Map<java.lang.String,java.lang.String> )Return value of this method has type 'DataFrame'.
createExternalTable ( java.lang.String, java.lang.String, types.StructType, java.util.Map<java.lang.String,java.lang.String> )Return value of this method has type 'DataFrame'.
createExternalTable ( java.lang.String, java.lang.String, types.StructType, scala.collection.immutable.Map<java.lang.String,java.lang.String> )Return value of this method has type 'DataFrame'.
createExternalTable ( java.lang.String, java.lang.String, scala.collection.immutable.Map<java.lang.String,java.lang.String> )Return value of this method has type 'DataFrame'.
emptyDataFrame ( )Return value of this method has type 'DataFrame'.
parquetFile ( java.lang.String... )Return value of this method has type 'DataFrame'.
registerDataFrameAsTable ( DataFrame, java.lang.String )1st parameter 'df' of this method has type 'DataFrame'.
sql ( java.lang.String )Return value of this method has type 'DataFrame'.
table ( java.lang.String )Return value of this method has type 'DataFrame'.
tables ( )Return value of this method has type 'DataFrame'.
tables ( java.lang.String )Return value of this method has type 'DataFrame'.
to the top
Java ARchives (2)
spark-core_2.10-1.5.0.jar
spark-sql_2.10-1.5.0.jar
to the top
Generated on Tue Dec 8 14:49:40 2015 for spark-iqmulus-0.1.0-s_2.10 by Java API Compliance Checker 1.4.1
A tool for checking backward compatibility of a Java library API