def test_pipeline_same_results(self):
        X, y, Z = self.make_classification(2, 10000, 2000)

        loc_clf = LogisticRegression()
        loc_filter = VarianceThreshold()
        loc_pipe = Pipeline([
            ('threshold', loc_filter),
            ('logistic', loc_clf)
        ])

        dist_clf = SparkLogisticRegression()
        dist_filter = SparkVarianceThreshold()
        dist_pipe = SparkPipeline([
            ('threshold', dist_filter),
            ('logistic', dist_clf)
        ])

        dist_filter.fit(Z)
        loc_pipe.fit(X, y)
        dist_pipe.fit(Z, logistic__classes=np.unique(y))

        assert_true(np.mean(np.abs(
            loc_pipe.predict(X) -
            np.concatenate(dist_pipe.predict(Z[:, 'X']).collect())
        )) < 0.1)
Esempio n. 2
0
    def test_pipeline_same_results(self):
        X, y, Z = self.make_classification(2, 10000, 2000)

        loc_clf = LogisticRegression()
        loc_filter = VarianceThreshold()
        loc_pipe = Pipeline([
            ('threshold', loc_filter),
            ('logistic', loc_clf)
        ])

        dist_clf = SparkLogisticRegression()
        dist_filter = SparkVarianceThreshold()
        dist_pipe = SparkPipeline([
            ('threshold', dist_filter),
            ('logistic', dist_clf)
        ])

        dist_filter.fit(Z)
        loc_pipe.fit(X, y)
        dist_pipe.fit(Z, logistic__classes=np.unique(y))

        assert_true(np.mean(np.abs(
            loc_pipe.predict(X) -
            np.concatenate(dist_pipe.predict(Z[:, 'X']).collect())
        )) < 0.1)
Esempio n. 3
0
    ('tfidf', SparkTfidfTransformer()),  # IDF
    ('clf', SparkMultinomialNB(alpha=0.05))  # NB
))

# fit
dist_pipeline.fit(Z, clf__classes=np.array([0, 1]))

# test data to RDD
test_x = ArrayRDD(sc.parallelize(data_test))
test_y = ArrayRDD(sc.parallelize(target_test))
test_Z = DictRDD((test_x, test_y),
                 columns=('X', 'y'),
                 dtype=[np.ndarray, np.ndarray])

# predict test data
predicts = dist_pipeline.predict(test_Z[:, 'X'])

# metrics(accuracy, precision, recall, f1)
data_size = len(test)
array_y = traget_test
array_pred = predicts.toarray()
y_and_pred = zip(array_y, array_pred)

#calculate accuracy
pos_size = sum(array_y)
neg_size = data_size - pos_size

pos_pred_size = sum(array_pred)
neg_pred_size = data_size - pos_pred_size

pos_acc_size = len(filter(lambda x: x[0] == 1 and x[0] == x[1], y_and_pred))