Exemplo n.º 1
0
    def test_nested_pipeline_persistence(self):
        """
        Pipeline[HashingTF, Pipeline[PCA]]
        """
        temp_path = tempfile.mkdtemp()

        try:
            df = self.spark.createDataFrame([(["a", "b", "c"], ),
                                             (["c", "d", "e"], )], ["words"])
            tf = HashingTF(numFeatures=10,
                           inputCol="words",
                           outputCol="features")
            pca = PCA(k=2, inputCol="features", outputCol="pca_features")
            p0 = Pipeline(stages=[pca])
            pl = Pipeline(stages=[tf, p0])
            model = pl.fit(df)

            pipeline_path = temp_path + "/pipeline"
            pl.save(pipeline_path)
            loaded_pipeline = Pipeline.load(pipeline_path)
            self._compare_pipelines(pl, loaded_pipeline)

            model_path = temp_path + "/pipeline-model"
            model.save(model_path)
            loaded_model = PipelineModel.load(model_path)
            self._compare_pipelines(model, loaded_model)
        finally:
            try:
                rmtree(temp_path)
            except OSError:
                pass
Exemplo n.º 2
0
    def test_identity_pipeline(self):
        dataset = MockDataset()

        def doTransform(pipeline):
            pipeline_model = pipeline.fit(dataset)
            return pipeline_model.transform(dataset)

        # check that empty pipeline did not perform any transformation
        self.assertEqual(dataset.index, doTransform(Pipeline(stages=[])).index)
        # check that failure to set stages param will raise KeyError for missing param
        self.assertRaises(KeyError, lambda: doTransform(Pipeline()))
Exemplo n.º 3
0
def main(business_id_arg):
    concat_list = udf(lambda lst: ", ".join(lst), types.StringType())

    reviews_df = spark.read.format("org.apache.spark.sql.cassandra") \
        .options(table=TABLE_REVIEW, keyspace=KEY_SPACE) \
        .load()

    review_filter = reviews_df.filter(reviews_df.business_id == business_id_arg)
    review_concatenate = review_filter.groupby('business_id').agg(collect_list('review').alias("review"))
    review_concatenate.show()
    train_fin = review_concatenate.withColumn("review", concat_list("review"))
    train_fin = train_fin.withColumn("review", functions.regexp_replace(train_fin.review, "[^0-9A-Za-z ,]", ""))

    # Create a new pipeline to create Tokenizer and Lemmatizer
    documentAssembler = DocumentAssembler().setInputCol("review").setOutputCol("document")
    tokenizer = Tokenizer().setInputCols(["document"]).setOutputCol("token")
    lemmatizer = Lemmatizer().setInputCols(["token"]).setOutputCol("lemma") \
        .setDictionary("lemmas001.txt", key_delimiter=" ", value_delimiter="\t")

    pipeline = Pipeline(stages=[documentAssembler, tokenizer, lemmatizer])
    pipelineFit = pipeline.fit(train_fin)

    train_df = pipelineFit.transform(train_fin)
    train_df.select('lemma').show(truncate=False)
    price_range_udf = functions.UserDefinedFunction(lambda attributes: get_attributes(attributes), types.StringType())
    train_df = train_df.withColumn('lemma', price_range_udf(train_df['lemma']))
    train_df = train_df.withColumn('lemma', functions.split(train_df['lemma'], ",").cast('array<string>'))

    # Create a new pipeline to remove the stop words
    test_review = train_df.select("lemma")
    stop_words_remover = StopWordsRemover(inputCol="lemma", outputCol="filtered")
    hash_tf = HashingTF(numFeatures=2 ** 16, inputCol="lemma", outputCol='tf')
    pipeline_too_remove_stop_words = Pipeline(stages=[hash_tf, stop_words_remover])
    pipeline_fit = pipeline_too_remove_stop_words.fit(train_df)
    test_df = pipeline_fit.transform(test_review)
    test_df.show()

    token_array = test_df.select('filtered').rdd.flatMap(lambda row: row).collect()

    counts = Counter(token_array[0])
    word_cloud = WordCloud(
        background_color='white',
        max_words=100,
        max_font_size=50,
        min_font_size=10,
        random_state=40
    ).fit_words(counts)

    plt.imshow(word_cloud)
    plt.axis('off')  # remove axis
    plt.show()
Exemplo n.º 4
0
def test_simple_csv_loader_pipeline(spark_session: SparkSession) -> None:
    # Arrange
    data_dir: Path = Path(__file__).parent.joinpath('./')
    flights_path: str = f"file://{data_dir.joinpath('flights.csv')}"

    schema = StructType([])

    df: DataFrame = spark_session.createDataFrame(
        spark_session.sparkContext.emptyRDD(), schema)

    # noinspection SqlDialectInspection,SqlNoDataSourceInspection
    spark_session.sql("DROP TABLE IF EXISTS default.flights")

    # Act
    # parameters = Dict[str, Any]({
    # })

    stages: List[Union[Estimator, Transformer]] = [
        FrameworkCsvLoader(
            view="flights",
            path_to_csv=flights_path
        ),
        SQLTransformer(statement="SELECT * FROM flights"),
    ]

    pipeline: Pipeline = Pipeline(stages=stages)

    transformer = pipeline.fit(df)
    result_df: DataFrame = transformer.transform(df)

    # Assert
    result_df.show()

    assert result_df.count() > 0
Exemplo n.º 5
0
    def test_python_transformer_pipeline_persistence(self):
        """
        Pipeline[MockUnaryTransformer, Binarizer]
        """
        temp_path = tempfile.mkdtemp()

        try:
            df = self.spark.range(0, 10).toDF("input")
            tf = MockUnaryTransformer(
                shiftVal=2).setInputCol("input").setOutputCol("shiftedInput")
            tf2 = Binarizer(threshold=6,
                            inputCol="shiftedInput",
                            outputCol="binarized")
            pl = Pipeline(stages=[tf, tf2])
            model = pl.fit(df)

            pipeline_path = temp_path + "/pipeline"
            pl.save(pipeline_path)
            loaded_pipeline = Pipeline.load(pipeline_path)
            self._compare_pipelines(pl, loaded_pipeline)

            model_path = temp_path + "/pipeline-model"
            model.save(model_path)
            loaded_model = PipelineModel.load(model_path)
            self._compare_pipelines(model, loaded_model)
        finally:
            try:
                rmtree(temp_path)
            except OSError:
                pass
Exemplo n.º 6
0
def test_simple_csv_and_sql_pipeline(spark_session: SparkSession) -> None:
    # Arrange
    data_dir: Path = Path(__file__).parent.joinpath("./")
    flights_path: str = f"file://{data_dir.joinpath('flights.csv')}"

    schema = StructType([])

    df: DataFrame = spark_session.createDataFrame(
        spark_session.sparkContext.emptyRDD(), schema)

    spark_session.sql("DROP TABLE IF EXISTS default.flights")

    # Act
    parameters: Dict[str, Any] = {}

    stages: List[Transformer] = create_steps([
        FrameworkCsvLoader(view="flights", filepath=flights_path),
        FeaturesCarriersV1(parameters=parameters),
    ])

    pipeline: Pipeline = Pipeline(stages=stages)  # type: ignore
    transformer = pipeline.fit(df)
    transformer.transform(df)

    # Assert
    result_df: DataFrame = spark_session.sql("SELECT * FROM flights2")
    result_df.show()

    assert result_df.count() > 0
 def pipeline_dataframe(self, stages, dataframe):
     print(stages)
     dataframe.printSchema()
     pipeline = Pipeline(stages=stages)
     pipelineModel = pipeline.fit(dataframe)
     model = pipelineModel.transform(dataframe)
     return model
Exemplo n.º 8
0
def buildModel(data, label):
    """
    Build a pipeline to classify `label` against the rest of classes using Binary Regression Classification

    :param data: the training data as a DF
    :param label: 0..C-1 where C is the number of classes
    :param shouldDisplayGraph: True to plot the graph illustrating the classification
    :return: the model as a Transformer
    """
    logging.info('building model for label = %d, type = %s' %
                 (label, type(label)))
    lr = LogisticRegression()
    pipeline = Pipeline(stages=[lr])

    paramGrid = ParamGridBuilder()\
        .addGrid(lr.maxIter, [100])\
        .addGrid(lr.elasticNetParam, [0.0, 1.0])\
        .addGrid(lr.fitIntercept, [True, False])\
        .build()
    crossValidator = CrossValidator(estimator=pipeline,
                                    estimatorParamMaps=paramGrid,
                                    evaluator=BinaryClassificationEvaluator(),
                                    numFolds=15)

    dataDF = data.map(lambda point: LabeledPoint(
        0 if point.label == label else 1, point.features)).toDF()
    model = crossValidator.fit(dataDF)

    return model
Exemplo n.º 9
0
def test_confusion_matrix(sdf):
    assem = VectorAssembler(inputCols=['Fare', 'Pclass', 'Age'],
                            outputCol='features')
    rf = RandomForestClassifier(featuresCol='features',
                                labelCol='Survived',
                                numTrees=20)
    pipeline = Pipeline(stages=[assem, rf])
    model = pipeline.fit(sdf.fillna(0.0))
    predictions = model.transform(sdf.fillna(0.0)).select(
        'probability', 'Survived')
    bcm = BinaryClassificationMetrics(predictions,
                                      scoreCol='probability',
                                      labelCol='Survived')

    predictions = predictions.toHandy().to_metrics_RDD('probability',
                                                       'Survived')
    predictions = np.array(predictions.collect())

    scm = bcm.confusionMatrix().toArray()
    pcm = confusion_matrix(predictions[:, 1], predictions[:, 0] > .5)
    npt.assert_array_almost_equal(scm, pcm)

    scm = bcm.confusionMatrix(.3).toArray()
    pcm = confusion_matrix(predictions[:, 1], predictions[:, 0] > .3)
    npt.assert_array_almost_equal(scm, pcm)
Exemplo n.º 10
0
def main(spark, logger, **kwargs):
    logger.info("Creating a simple DataFrame ...")
    schema_names = ["id", "german_text"]
    fields = [
        T.StructField(field_name, T.StringType(), True) for field_name in schema_names
    ]
    schema = T.StructType(fields)
    data = [
        ("abc", "Hallo Herr Mustermann"),
        ("xyz", "Deutsch ist das Ding!"),
    ]
    df = spark.createDataFrame(data, schema)
    df.show()

    logger.info("Building the ML pipeline ...")
    tokenizer = RegexTokenizer(
        inputCol="german_text", outputCol="tokens", pattern="\\s+"
    )
    stemmer = SnowballStemmer(
        inputCol="tokens", outputCol="stemmed_tokens", language="German"
    )
    stemming_pipeline = Pipeline(
        stages=[
            tokenizer,
            stemmer,
        ]
    )

    logger.info("Running the stemming ML pipeline ...")
    stemmed_df = stemming_pipeline.fit(df).transform(df)
    stemmed_df.show()
Exemplo n.º 11
0
def model_train(input,model_path):
    tmax_schema = types.StructType([
        types.StructField('station', types.StringType()),
        types.StructField('date', types.DateType()),
        types.StructField('latitude', types.FloatType()),
        types.StructField('longitude', types.FloatType()),
        types.StructField('elevation', types.FloatType()),
        types.StructField('tmax', types.FloatType()),
    ])
    data = spark.read.csv(input,schema= tmax_schema)
    train, validation = data.randomSplit([0.75,0.25])
    train = train.cache()
    validation = validation.cache()

    sql_query = """SELECT today.latitude, today.longitude, today.elevation, dayofyear(today.date) AS dy,yesterday.tmax AS yesterday_tmax, today.tmax
                     FROM __THIS__ as today
               INNER JOIN __THIS__ as yesterday
                       ON date_sub(today.date, 1) = yesterday.date
                      AND today.station = yesterday.station"""
    transformer = SQLTransformer(statement=sql_query)
    assemble_features = VectorAssembler(inputCols=['latitude','longitude','elevation','dy','yesterday_tmax'],outputCol='features')
    regressor = DecisionTreeRegressor(featuresCol='features',labelCol='tmax')
    weather_pipeline = Pipeline(stages=[transformer,assemble_features,regressor])
    model = weather_pipeline.fit(train)
    model.write().overwrite().save(model_path)

    prediction = model.transform(validation)
    #Scoring the model
    evaluator = RegressionEvaluator(predictionCol='prediction',labelCol='tmax',metricName='rmse')
    score = evaluator.evaluate(prediction)
    print("Score of the weather model is",score)
Exemplo n.º 12
0
 def test_pipeline(self):
     dataset = MockDataset()
     estimator0 = MockEstimator()
     transformer1 = MockTransformer()
     estimator2 = MockEstimator()
     transformer3 = MockTransformer()
     pipeline = Pipeline(
         stages=[estimator0, transformer1, estimator2, transformer3])
     pipeline_model = pipeline.fit(dataset, {
         estimator0.fake: 0,
         transformer1.fake: 1
     })
     model0, transformer1, model2, transformer3 = pipeline_model.stages
     self.assertEqual(0, model0.dataset_index)
     self.assertEqual(0, model0.getFake())
     self.assertEqual(1, transformer1.dataset_index)
     self.assertEqual(1, transformer1.getFake())
     self.assertEqual(2, dataset.index)
     self.assertIsNone(model2.dataset_index,
                       "The last model shouldn't be called in fit.")
     self.assertIsNone(transformer3.dataset_index,
                       "The last transformer shouldn't be called in fit.")
     dataset = pipeline_model.transform(dataset)
     self.assertEqual(2, model0.dataset_index)
     self.assertEqual(3, transformer1.dataset_index)
     self.assertEqual(4, model2.dataset_index)
     self.assertEqual(5, transformer3.dataset_index)
     self.assertEqual(6, dataset.index)
Exemplo n.º 13
0
def main():
    data = spark.range(100000)
    data = data.select(
        (functions.rand()*100).alias('length'),
        (functions.rand()*100).alias('width'),
        (functions.rand()*100).alias('height'),
    )
    data = data.withColumn('volume', data['length']*data['width']*data['height'])
    
    training, validation = data.randomSplit([0.75, 0.25], seed=42)
    
    assemble_features = VectorAssembler(
        inputCols=['length', 'width', 'height'],
        outputCol='features')
    classifier = GBTRegressor(
        featuresCol='features', labelCol='volume')
    pipeline = Pipeline(stages=[assemble_features, classifier])
    
    model = pipeline.fit(training)
    predictions = model.transform(validation)
    predictions.show()
    
    r2_evaluator = RegressionEvaluator(
        predictionCol='prediction', labelCol='volume',
        metricName='r2')
    r2 = r2_evaluator.evaluate(predictions)
    print(r2)
Exemplo n.º 14
0
def define_pipeline():
    temp_indexer = StringIndexer(inputCol="dtemp", outputCol="idtemp")
    nausea_indexer = StringIndexer(inputCol="nausea", outputCol="inausea")
    lumbar_indexer = StringIndexer(inputCol="lumbar", outputCol="ilumbar")
    urine_indexer = StringIndexer(inputCol="urine", outputCol="iurine")
    micturition_indexer = StringIndexer(inputCol="micturition",
                                        outputCol="imicturition")
    urethra_indexer = StringIndexer(inputCol="urethra", outputCol="iurethra")
    bladder_indexer = StringIndexer(inputCol="bladder", outputCol="label")
    vector_assembler = VectorAssembler(inputCols=[
        "idtemp", "inausea", "ilumbar", "iurine", "imicturition", "iurethra"
    ],
                                       outputCol="features")

    label_indexer = StringIndexer(inputCol="bladder",
                                  outputCol="label_bladder")

    rf = RandomForestClassifier(predictionCol="rf_prediction",
                                probabilityCol="rf_probability",
                                rawPredictionCol="rf_rawPrediction",
                                numTrees=10)
    dt = DecisionTreeClassifier(predictionCol="dt_prediction",
                                probabilityCol="dt_probability",
                                rawPredictionCol="dt_rawPrediction")

    res_pipeline = Pipeline(stages=[
        temp_indexer, nausea_indexer, lumbar_indexer, urine_indexer,
        micturition_indexer, urethra_indexer, bladder_indexer,
        vector_assembler, label_indexer, rf, dt
    ])

    return res_pipeline
Exemplo n.º 15
0
 def test_pipeline(self):
     dataset = MockDataset()
     estimator0 = MockEstimator()
     transformer1 = MockTransformer()
     estimator2 = MockEstimator()
     transformer3 = MockTransformer()
     pipeline = Pipeline() \
         .setStages([estimator0, transformer1, estimator2, transformer3])
     pipeline_model = pipeline.fit(dataset, {
         estimator0.fake: 0,
         transformer1.fake: 1
     })
     self.assertEqual(0, estimator0.dataset_index)
     self.assertEqual(0, estimator0.fake_param_value)
     model0 = estimator0.model
     self.assertEqual(0, model0.dataset_index)
     self.assertEqual(1, transformer1.dataset_index)
     self.assertEqual(1, transformer1.fake_param_value)
     self.assertEqual(2, estimator2.dataset_index)
     model2 = estimator2.model
     self.assertIsNone(
         model2.dataset_index,
         "The model produced by the last estimator should "
         "not be called during fit.")
     dataset = pipeline_model.transform(dataset)
     self.assertEqual(2, model0.dataset_index)
     self.assertEqual(3, transformer1.dataset_index)
     self.assertEqual(4, model2.dataset_index)
     self.assertEqual(5, transformer3.dataset_index)
     self.assertEqual(6, dataset.index)
Exemplo n.º 16
0
def train_with_tune(input_df):
    # https://spark.apache.org/docs/latest/ml-tuning.html
    # 构建模型训练流程
    lr = LogisticRegression()
    pipeline = Pipeline(stages=[lr])

    # 构建超参空间
    paramGrid = ParamGridBuilder() \
        .addGrid(lr.regParam, [0.1, 0.01]) \
        .build()

    # 只做一次切分
    # tvs = TrainValidationSplit(estimator=pipeline,
    #                            estimatorParamMaps=paramGrid,
    #                            evaluator=BinaryClassificationEvaluator(),
    #                            # 80% of the data will be used for training, 20% for validation.
    #                            trainRatio=0.8)

    # k-fold cross validation
    cross_val = CrossValidator(estimator=pipeline,
                               estimatorParamMaps=paramGrid,
                               evaluator=BinaryClassificationEvaluator(),
                               numFolds=3)

    # train and find the best
    cvModel = cross_val.fit(input_df)
    return cvModel.bestModel
Exemplo n.º 17
0
    def fit(self, sdf):
        """

        :param sdf:
        :return:
        """

        if self.weighter is None:
            raise NotImplementedError(
                "The weighter parameter has not been defined.")

        weights_arr = self.weighter.get_feature_importances(sdf)

        pipeline_lst = [
            VectorAssembler(inputCols=self.input_cols, outputCol="vec"),
            StandardScaler(inputCol="vec", outputCol="standard_vec"),
            ElementwiseProduct(scalingVec=weights_arr,
                               inputCol='standard_vec',
                               outputCol='scaled_vec')
        ]

        _model = Pipeline(stages=pipeline_lst)
        model = _model.fit(sdf)

        self.model = model

        return self
Exemplo n.º 18
0
def test_model_log(tmpdir):
    conda_env = os.path.join(str(tmpdir), "conda_env.yml")
    _mlflow_conda_env(conda_env, additional_pip_deps=["pyspark=={}".format(pyspark_version)])
    iris = datasets.load_iris()
    feature_names = ["0", "1", "2", "3"]
    pandas_df = pd.DataFrame(iris.data, columns=feature_names)  # to make spark_udf work
    pandas_df['label'] = pd.Series(iris.target)
    spark_session = pyspark.sql.SparkSession.builder \
        .config(key="spark_session.python.worker.reuse", value=True) \
        .master("local-cluster[2, 1, 1024]") \
        .getOrCreate()
    spark_df = spark_session.createDataFrame(pandas_df)
    assembler = VectorAssembler(inputCols=feature_names, outputCol="features")
    lr = LogisticRegression(maxIter=50, regParam=0.1, elasticNetParam=0.8)
    pipeline = Pipeline(stages=[assembler, lr])
    # Fit the model
    model = pipeline.fit(spark_df)
    # Print the coefficients and intercept for multinomial logistic regression
    preds_df = model.transform(spark_df)
    preds1 = [x.prediction for x in preds_df.select("prediction").collect()]
    old_tracking_uri = mlflow.get_tracking_uri()
    cnt = 0
    # should_start_run tests whether or not calling log_model() automatically starts a run.
    for should_start_run in [False, True]:
        for dfs_tmp_dir in [None, os.path.join(str(tmpdir), "test")]:
            print("should_start_run =", should_start_run, "dfs_tmp_dir =", dfs_tmp_dir)
            try:
                tracking_dir = os.path.abspath(str(tmpdir.mkdir("mlruns")))
                mlflow.set_tracking_uri("file://%s" % tracking_dir)
                if should_start_run:
                    mlflow.start_run()
                artifact_path = "model%d" % cnt
                cnt += 1
                sparkm.log_model(artifact_path=artifact_path, spark_model=model,
                                 dfs_tmpdir=dfs_tmp_dir)
                run_id = active_run().info.run_uuid
                # test pyfunc
                x = pyfunc.load_pyfunc(artifact_path, run_id=run_id)
                preds2 = x.predict(pandas_df)
                assert preds1 == preds2
                # test load model
                reloaded_model = sparkm.load_model(artifact_path, run_id=run_id,
                                                   dfs_tmpdir=dfs_tmp_dir)
                preds_df_1 = reloaded_model.transform(spark_df)
                preds3 = [x.prediction for x in preds_df_1.select("prediction").collect()]
                assert preds1 == preds3
                # test spar_udf
                preds4 = score_model_as_udf(artifact_path, run_id, pandas_df)
                assert preds1 == preds4
                # We expect not to delete the DFS tempdir.
                x = dfs_tmp_dir or sparkm.DFS_TMP
                assert os.path.exists(x)
                assert os.listdir(x)
                shutil.rmtree(x)
            finally:
                mlflow.end_run()
                mlflow.set_tracking_uri(old_tracking_uri)
                shutil.rmtree(tracking_dir)
Exemplo n.º 19
0
def MachineLearning2(df):
    file_dataSVM = "G:/Projects/Spark-Machine-Learning/Spark Machine Learning/Spark Machine Learning/svm/"
    data = df.select(['Summary','Sentiment']).withColumnRenamed('Sentiment','label')
    data = data.withColumn('length',length(data['Summary']))
    # Basic sentence tokenizer
    tokenizer = Tokenizer(inputCol="Summary", outputCol="words")
   
    #remove stop words
    remover = StopWordsRemover(inputCol="words", outputCol="filtered_features")
   
    #transoform dataset to vectors
    cv = CountVectorizer(inputCol="filtered_features", outputCol="features1",vocabSize=1000, minDF=1000.0)
  
    #calculate IDF for all dataset
    idf = IDF(inputCol= 'features1', outputCol = 'tf_idf')
   
    #prepare data for ML spark library
    cleanUp = VectorAssembler(inputCols =['tf_idf','length'],outputCol='features')
   
    train_data, test_data = data.randomSplit([0.7,0.3],1)


    #we chose naive bayes
    nb = NaiveBayes(smoothing=2.0,featuresCol="features",labelCol='label',predictionCol ="prediction")

    paramGrid = ParamGridBuilder().build()
    numFolds = 10
    evaluator = BinaryClassificationEvaluator(rawPredictionCol="prediction",labelCol="label") # + other params as in Scala    

    #add pipline technique
    pipeline = Pipeline(stages=[tokenizer, remover, cv,idf,cleanUp,nb])
    crossval = CrossValidator(
        estimator=pipeline,
        estimatorParamMaps=paramGrid,
        evaluator=evaluator,
        numFolds=numFolds)

   
    # Fit the pipeline to training documents.
    model = crossval.fit(train_data)
    #fit data to the model
    #model = nb.fit(train_data)
    model_path = 'C:/Users/Shehab/Source/Repos/Text-Classification-with-spark/Text Classifiation using Spark/Text Classifiation using Spark/model/'
    try:
        model.save(model_path )
    except:
        print('Error folder of model already exits')

    #test data send to the final model
    test_results = model.transform(test_data)
    #test_results.show(10)
    results = test_results.select(['prediction', 'label'])
    df_shcema = results.withColumn("prediction",results.prediction.cast("string"))
    #draw ROC curve
    DrawROC(df_shcema)
    #show random 10 rows from results
    test_results.show(10)
Exemplo n.º 20
0
def main(inputs):
    data = spark.read.csv(inputs, schema=colour_schema)
    train, validation = data.randomSplit([0.75, 0.25])
    train = train.cache()
    validation = validation.cache()

    #To convert R,G,B to LabCIE
    rgb_to_lab_query = rgb2lab_query(passthrough_columns=['word'])
    sql_transformed = SQLTransformer(statement=rgb_to_lab_query)

    rgb_assembler = VectorAssembler(inputCols=['R', 'G', 'B'],
                                    outputCol='features')
    lab_assembler = VectorAssembler(inputCols=['labL', 'labA', 'labB'],
                                    outputCol='features')

    word_indexer = StringIndexer(inputCol='word', outputCol='indexed')
    classifier = MultilayerPerceptronClassifier(labelCol='indexed',
                                                layers=[3, 30, 11])

    rgb_pipeline = Pipeline(stages=[rgb_assembler, word_indexer, classifier])
    lab_pipeline = Pipeline(
        stages=[sql_transformed, lab_assembler, word_indexer, classifier])

    rgb_model = rgb_pipeline.fit(train)
    lab_model = lab_pipeline.fit(train)

    prediction = rgb_model.transform(validation)
    prediction_lab = lab_model.transform(validation)
    prediction.show()
    prediction_lab.show()

    #Testing the model
    evaluator = MulticlassClassificationEvaluator(predictionCol='prediction',
                                                  labelCol='indexed',
                                                  metricName='f1')
    lab_evaluator = MulticlassClassificationEvaluator(
        predictionCol='prediction', labelCol='indexed', metricName='f1')
    score = evaluator.evaluate(prediction)
    lab_score = lab_evaluator.evaluate(prediction_lab)
    plot_predictions(rgb_model, 'RGB', labelCol='word')
    plot_predictions(lab_model, 'LAB', labelCol='word')
    print('Validation score for RGB model: %g' % (score, ))
    print('Validation score for LAB model:', lab_score)
Exemplo n.º 21
0
def test_save_with_sample_input_containing_unsupported_data_type_raises_serialization_exception(
        spark_context, model_path):
    sql_context = SQLContext(spark_context)
    unsupported_df = sql_context.createDataFrame([(1, "2016-09-30"), (2, "2017-02-27")])
    unsupported_df = unsupported_df.withColumn("_2", unsupported_df._2.cast(DateType()))
    pipeline = Pipeline(stages=[])
    model = pipeline.fit(unsupported_df)
    # The Spark `DateType` is not supported by MLeap, so we expect serialization to fail.
    with pytest.raises(mleap.MLeapSerializationException):
        sparkm.save_model(spark_model=model, path=model_path, sample_input=unsupported_df)
Exemplo n.º 22
0
def train_model(training_size, mode):
    print('Training model with records: ' + str(training_size))
    spark = pyspark.sql.SparkSession.builder.appName(
        'Model Prep').getOrCreate()
    data_df = model_utils.get_player_df(spark, training_size, mode)

    pipeline = Pipeline().setStages(transform_stages())
    model = pipeline.fit(data_df)

    model.write().overwrite().save(model_constants.MODEL_LOCATION)
Exemplo n.º 23
0
def MachineLearning(df):
    file_dataSVM = "G:/Projects/Spark-Machine-Learning/Spark Machine Learning/Spark Machine Learning/svm/"
    data = df.select(['Summary','Sentiment']).withColumnRenamed('Sentiment','label')
    data = data.withColumn('length',length(data['Summary']))
    # Basic sentence tokenizer
    tokenizer = Tokenizer(inputCol="Summary", outputCol="words")
   
    #remove stop words
    remover = StopWordsRemover(inputCol="words", outputCol="filtered_features")
   
    #transoform dataset to vectors
    cv = HashingTF(inputCol="filtered_features", outputCol="features1", numFeatures=1000)
    
    #calculate IDF for all dataset
    idf = IDF(inputCol= 'features1', outputCol = 'tf_idf')
    
    normalizer = StandardScaler(inputCol="tf_idf", outputCol="normFeatures", withStd=True, withMean=False)
    selector = ChiSqSelector(numTopFeatures=150, featuresCol="normFeatures",
                         outputCol="selectedFeatures", labelCol="label")
    #prepare data for ML spark library
    cleanUp = VectorAssembler(inputCols =['selectedFeatures'],outputCol='features')
    # Normalize each Vector using $L^1$ norm.
    pipeline = Pipeline(stages=[tokenizer, remover, cv, idf,normalizer,selector,cleanUp])
    pipelineModel = pipeline.fit(data)
    data = pipelineModel.transform(data)
    data.printSchema()
    train_data, test_data = data.randomSplit([0.7,0.3],seed=2018)

    lr = LogisticRegression(featuresCol="features", labelCol='label')
    lrModel = lr.fit(train_data)
    beta = np.sort(lrModel.coefficients)
    plt.plot(beta)
    plt.ylabel('Beta Coefficients')
    plt.show()

    trainingSummary = lrModel.summary
    roc = trainingSummary.roc.toPandas()
    plt.plot(roc['FPR'],roc['TPR'])
    plt.ylabel('False Positive Rate')
    plt.xlabel('True Positive Rate')
    plt.title('ROC Curve')
    plt.show()
    print('Training set areaUnderROC: ' + str(trainingSummary.areaUnderROC))



    pr = trainingSummary.pr.toPandas()
    plt.plot(pr['recall'],pr['precision'])
    plt.ylabel('Precision')
    plt.xlabel('Recall')
    plt.show()
    predictions = lrModel.transform(test_data)
    evaluator = BinaryClassificationEvaluator()
    print('Test Area Under ROC', evaluator.evaluate(predictions))
Exemplo n.º 24
0
def strat_scatterplot(sdf, col1, col2, n=30):
    stages = []
    for col in [col1, col2]:
        splits = get_buckets(sdf.select(col).rdd.map(itemgetter(0)), n)
        stages.append(Bucketizer(splits=splits,
                                 inputCol=col,
                                 outputCol="__{}_bucket".format(col),
                                 handleInvalid="skip"))

    pipeline = Pipeline(stages=stages)
    model = pipeline.fit(sdf)
    return model, sdf.count()
Exemplo n.º 25
0
def spark_model_iris(iris_df):
    feature_names, iris_pandas_df, iris_spark_df = iris_df
    assembler = VectorAssembler(inputCols=feature_names, outputCol="features")
    lr = LogisticRegression(maxIter=50, regParam=0.1, elasticNetParam=0.8)
    pipeline = Pipeline(stages=[assembler, lr])
    # Fit the model
    model = pipeline.fit(iris_spark_df)
    preds_df = model.transform(iris_spark_df)
    preds = [x.prediction for x in preds_df.select("prediction").collect()]
    return SparkModelWithData(
        model=model, spark_df=iris_spark_df, pandas_df=iris_pandas_df, predictions=preds
    )
Exemplo n.º 26
0
def test_mleap_module_model_save_with_unsupported_transformer_raises_serialization_exception(
        spark_model_iris, model_path):
    class CustomTransformer(JavaModel):
        def _transform(self, dataset):
            return dataset

    unsupported_pipeline = Pipeline(stages=[CustomTransformer()])
    unsupported_model = unsupported_pipeline.fit(spark_model_iris.spark_df)

    with pytest.raises(mlflow.mleap.MLeapSerializationException):
        mlflow.mleap.save_model(spark_model=unsupported_model,
                                path=model_path,
                                sample_input=spark_model_iris.spark_df)
Exemplo n.º 27
0
    def test_pipeline(self, bag):
        from pyspark.ml.pipeline import Pipeline
        # create and save and load
        pth = "/tmp/spatial-join"
        new_p = Pipeline().setStages([bag["transformer"]])
        new_p.write().overwrite().save(pth)
        saved_p = Pipeline.load(pth)

        # check transformations
        inp = bag["input"]
        exp = bag["expected"]
        check(new_p.fit(inp), inp, exp)
        check(saved_p.fit(inp), inp, exp)
Exemplo n.º 28
0
def test_spark_module_model_save_with_mleap_and_unsupported_transformer_raises_exception(
        spark_model_iris, model_path):
    class CustomTransformer(JavaModel):
        def _transform(self, dataset):
            return dataset

    unsupported_pipeline = Pipeline(stages=[CustomTransformer()])
    unsupported_model = unsupported_pipeline.fit(spark_model_iris.spark_df)

    with pytest.raises(ValueError):
        sparkm.save_model(spark_model=unsupported_model,
                          path=model_path,
                          sample_input=spark_model_iris.spark_df)
Exemplo n.º 29
0
def oneHotEncoding(clickDF  , columns):
    """
    ohe = OneHotEncoderEstimator
    """
    
    allStages = [StringIndexer(inputCol=column, outputCol=column+STRING_INDEXER_OUT_SUFFIX).setHandleInvalid("skip") for column in columns]
    oneHotEncodeInputOutputNames = [(column+STRING_INDEXER_OUT_SUFFIX , column+ONE_HOT_ENCODER_OUT_SUFFIX) for column in columns]
    oneHotEncodeInputOutputNames = list(zip(*oneHotEncodeInputOutputNames))
    ohe = OneHotEncoderEstimator(inputCols=oneHotEncodeInputOutputNames[0] , outputCols=oneHotEncodeInputOutputNames[1])
    allStages.append(ohe);
    pipeline = Pipeline(stages=allStages)
    clickDF =  pipeline.fit(clickDF).transform(clickDF)
    deletedColumns = list(oneHotEncodeInputOutputNames[0])+columns; 
    return clickDF;
Exemplo n.º 30
0
def dataToVectorForTree(clickDF,categoricalColumnsNames , numericColumnNames):
  print ("===== Imputing=======") 
  clickDF , imputedColumnNames = impute(clickDF,numericColumnNames)
  
  print ("===== String Indexer=======") 
  
  allStages = [StringIndexer(inputCol=column, outputCol=column+STRING_INDEXER_OUT_SUFFIX).setHandleInvalid("skip") for column in categoricalColumnsNames]
  stringIndexderColumnsNames = [(column+STRING_INDEXER_OUT_SUFFIX , column+ONE_HOT_ENCODER_OUT_SUFFIX) for column in categoricalColumnsNames] 
  stringIndexderColumnsNames = list(zip(*stringIndexderColumnsNames))
  pipeline = Pipeline(stages=allStages)
  clickDF =  pipeline.fit(clickDF).transform(clickDF)
  all_feature_columns = imputedColumnNames + list(stringIndexderColumnsNames[0]);
  print ("===== Assambler =======")
  feature_assembler = VectorAssembler(inputCols=all_feature_columns,outputCol="features")
  return feature_assembler.transform(clickDF);