def _to_java(self): """ Transfer this instance to a Java PipelineModel. Used for ML persistence. :return: Java object equivalent to this instance. """ gateway = SparkContext._gateway cls = SparkContext._jvm.org.apache.spark.ml.Transformer java_stages = gateway.new_array(cls, len(self.stages)) for idx, stage in enumerate(self.stages): java_stages[idx] = stage._to_java() _java_obj =\ JavaParams._new_java_obj("org.apache.spark.ml.PipelineModel", self.uid, java_stages) return _java_obj
def _to_java(self): """ Transfer this instance to a Java CrossValidator. Used for ML persistence. :return: Java object equivalent to this instance. """ estimator, epms, evaluator = super(CrossValidator, self)._to_java_impl() _java_obj = JavaParams._new_java_obj("org.apache.spark.ml.tuning.CrossValidator", self.uid) _java_obj.setEstimatorParamMaps(epms) _java_obj.setEvaluator(evaluator) _java_obj.setEstimator(estimator) _java_obj.setSeed(self.getSeed()) _java_obj.setNumFolds(self.getNumFolds()) return _java_obj
def _to_java(self): """ Transfer this instance to a Java CrossValidatorModel. Used for ML persistence. :return: Java object equivalent to this instance. """ _java_obj = JavaParams._new_java_obj("org.apache.spark.ml.tuning.CrossValidatorModel", self.uid, self.bestModel._to_java(), self.avgMetrics) estimator, epms, evaluator = super(CrossValidatorModel, self)._to_java_impl() _java_obj.set("evaluator", evaluator) _java_obj.set("estimator", estimator) _java_obj.set("estimatorParamMaps", epms) return _java_obj
def _to_java(self): """ Convert this instance to a dill dump, then to a list of strings with the unicode integer values of each character. Use this list as a set of dumby stopwords and store in a StopWordsRemover instance :return: Java object equivalent to this instance. """ dmp = dill.dumps(self) pylist = [str(ord(d)) for d in dmp] # convert byes to string integer list pylist.append(PysparkObjId._getPyObjId()) # add our id so PysparkPipelineWrapper can id us. sc = SparkContext._active_spark_context java_class = sc._gateway.jvm.java.lang.String java_array = sc._gateway.new_array(java_class, len(pylist)) for i in xrange(len(pylist)): java_array[i] = pylist[i] _java_obj = JavaParams._new_java_obj(PysparkObjId._getCarrierClass(javaName=True), self.uid) _java_obj.setStopWords(java_array) return _java_obj
def _to_java(self): """ Transfer this instance to a Java TrainValidationSplit. Used for ML persistence. :return: Java object equivalent to this instance. """ estimator, epms, evaluator = super(TrainValidationSplit, self)._to_java_impl() _java_obj = JavaParams._new_java_obj("org.apache.spark.ml.tuning.TrainValidationSplit", self.uid) _java_obj.setEstimatorParamMaps(epms) _java_obj.setEvaluator(evaluator) _java_obj.setEstimator(estimator) _java_obj.setTrainRatio(self.getTrainRatio()) _java_obj.setSeed(self.getSeed()) _java_obj.setParallelism(self.getParallelism()) return _java_obj
def _to_java(self): """ Transfer this instance to a Java TrainValidationSplitModel. Used for ML persistence. :return: Java object equivalent to this instance. """ sc = SparkContext._active_spark_context # TODO: persst validation metrics as well _java_obj = JavaParams._new_java_obj( "org.apache.spark.ml.tuning.TrainValidationSplitModel", self.uid, self.bestModel._to_java(), _py2java(sc, [])) estimator, epms, evaluator = super(TrainValidationSplitModel, self)._to_java_impl() _java_obj.set("evaluator", evaluator) _java_obj.set("estimator", estimator) _java_obj.set("estimatorParamMaps", epms) return _java_obj
def _to_java(self): """ Transfer this instance to a Java TrainValidationSplit. Used for ML persistence. :return: Java object equivalent to this instance. """ estimator, epms, evaluator = super(RankingTrainValidationSplit, self)._to_java_impl() _java_obj = JavaParams._new_java_obj("com.microsoft.ml.spark.RankingTrainValidationSplit", self.uid) _java_obj.setEstimatorParamMaps(epms) _java_obj.setEvaluator(evaluator) _java_obj.setEstimator(estimator) _java_obj.setTrainRatio(self.getTrainRatio()) _java_obj.setSeed(self.getSeed()) _java_obj.setItemCol(self.getItemCol()) _java_obj.setUserCol(self.getUserCol()) _java_obj.setRatingCol(self.getRatingCol()) return _java_obj
def _to_java(self): """ Transfer this instance to a Java Pipeline. Used for ML persistence. Returns ------- py4j.java_gateway.JavaObject Java object equivalent to this instance. """ gateway = SparkContext._gateway cls = SparkContext._jvm.org.apache.spark.ml.PipelineStage java_stages = gateway.new_array(cls, len(self.getStages())) for idx, stage in enumerate(self.getStages()): java_stages[idx] = stage._to_java() _java_obj = JavaParams._new_java_obj("org.apache.spark.ml.Pipeline", self.uid) _java_obj.setStages(java_stages) return _java_obj
def _to_java(self): """ Transfer this instance to a Java TrainValidationSplitModel. Used for ML persistence. :return: Java object equivalent to this instance. """ sc = SparkContext._active_spark_context _java_obj = JavaParams._new_java_obj( "org.apache.spark.ml.tuning.TrainValidationSplitModel", self.uid, self.bestModel._to_java(), _py2java(sc, self.validationMetrics)) estimator, epms, evaluator = super(TrainValidationSplitModel, self)._to_java_impl() _java_obj.set("evaluator", evaluator) _java_obj.set("estimator", estimator) _java_obj.set("estimatorParamMaps", epms) if self.subModels is not None: java_sub_models = [sub_model._to_java() for sub_model in self.subModels] _java_obj.setSubModels(java_sub_models) return _java_obj
def _bucketize(df, input_cols): def j_str_arr(arr): gateway = SparkContext._gateway j_str = gateway.jvm.java.lang.String j_arr = gateway.new_array(j_str, len(arr)) for i, val in enumerate(arr): j_arr[i] = val return j_arr output_cols = ['{}-bucketed'.format(x) for x in input_cols] # Sadly the multi-col versions are only in scala, pyspark doesn't # have them yet. j_bucketizer = (JavaParams._new_java_obj( "org.apache.spark.ml.feature.QuantileDiscretizer").setInputCols( j_str_arr(input_cols)).setOutputCols( j_str_arr(output_cols)).setNumBuckets(254).setRelativeError( 1 / 2550).setHandleInvalid('error').fit(df._jdf)) j_df_bucketized = j_bucketizer.transform(df._jdf) df_bucketized = DataFrame(j_df_bucketized, df.sql_ctx).drop(*input_cols) # Now we need to assemble the bucketized values into vector # form for the feature selector to work with. assembler = VectorAssembler(inputCols=output_cols, outputCol='features') return assembler.transform(df_bucketized).drop(*output_cols)
def _to_java(self): """ Transfer this instance to a Java CrossValidatorModel. Used for ML persistence. Returns ------- py4j.java_gateway.JavaObject Java object equivalent to this instance. """ sc = SparkContext._active_spark_context _java_obj = JavaParams._new_java_obj( "org.apache.spark.ml.tuning.CrossValidatorModel", self.uid, self.bestModel._to_java(), _py2java(sc, self.avgMetrics)) estimator, epms, evaluator = super(CrossValidatorModel, self)._to_java_impl() params = { "evaluator": evaluator, "estimator": estimator, "estimatorParamMaps": epms, "numFolds": self.getNumFolds(), "foldCol": self.getFoldCol(), "seed": self.getSeed(), } for param_name, param_val in params.items(): java_param = _java_obj.getParam(param_name) pair = java_param.w(param_val) _java_obj.set(pair) if self.subModels is not None: java_sub_models = [[ sub_model._to_java() for sub_model in fold_sub_models ] for fold_sub_models in self.subModels] _java_obj.setSubModels(java_sub_models) return _java_obj
def _to_java(self): """ Transfer this instance to a Java CrossValidatorModel. Used for ML persistence. :return: Java object equivalent to this instance. """ sc = SparkContext._active_spark_context # TODO: persist average metrics as well _java_obj = JavaParams._new_java_obj("org.apache.spark.ml.tuning.CrossValidatorModel", self.uid, self.bestModel._to_java(), _py2java(sc, [])) estimator, epms, evaluator = super(CrossValidatorModel, self)._to_java_impl() _java_obj.set("evaluator", evaluator) _java_obj.set("estimator", estimator) _java_obj.set("estimatorParamMaps", epms) if self.subModels is not None: java_sub_models = [[sub_model._to_java() for sub_model in fold_sub_models] for fold_sub_models in self.subModels] _java_obj.setSubModels(java_sub_models) return _java_obj