def load_parameters(self):
        self.amount_prediction_method = self.load_data_from_file(data_type=self.SAVE_TYPE_MODEL,
                                                                 file_name='amount_method')
        self.trend_prediction_method = self.load_data_from_file(data_type=self.SAVE_TYPE_MODEL,
                                                                file_name='trend_method')
        self.data_features = self.load_data_from_file(data_type=self.SAVE_TYPE_MODEL, file_name='features')
        self.stock_symbol = self.load_data_from_file(data_type=self.SAVE_TYPE_MODEL, file_name='symbol')
        self.data_parser = self.load_data_from_file(data_type=self.SAVE_TYPE_MODEL, file_name='data_parser')
        amount_model_path = os.path.join(os.path.abspath(self.model_path), 'amount_model')
        trend_model_path = os.path.join(os.path.abspath(self.model_path), 'trend_model')

        if self.amount_prediction_method == self.RANDOM_FOREST:
            amount_model = RandomForestModel.load(sc=self.sc, path=amount_model_path)
        elif self.amount_prediction_method == self.LINEAR_REGRESSION:
            amount_model = LinearRegressionModel.load(sc=self.sc, path=amount_model_path)
        else:
            amount_model = self.load_data_from_file(data_type=self.SAVE_TYPE_MODEL, file_name='amount_model')

        if self.trend_prediction_method == self.RANDOM_FOREST:
            trend_model = RandomForestModel.load(sc=self.sc, path=trend_model_path)
        elif self.trend_prediction_method == self.LOGISTIC_REGRESSION:
            trend_model = LogisticRegressionModel.load(sc=self.sc, path=trend_model_path)
        elif self.trend_prediction_method == self.NAIVE_BAYES:
            trend_model = NaiveBayesModel.load(sc=self.sc, path=trend_model_path)
        elif self.trend_prediction_method == self.SVM:
            trend_model = SVMModel.load(sc=self.sc, path=trend_model_path)
        else:
            trend_model = self.load_data_from_file(data_type=self.SAVE_TYPE_MODEL, file_name='trend_model')

        return trend_model, amount_model
Esempio n. 2
0
def random_forest():
    conf = SparkConf().setAppName('RF')
    sc = SparkContext(conf=conf)
    # print("\npyspark version:" + str(sc.version) + "\n")

    data = MLUtils.loadLibSVMFile(sc, './data/sample_libsvm_data.txt')
    (trainingData, testData) = data.randomSplit([0.7, 0.3])

    model = RandomForest.trainClassifier(trainingData,
                                         numClasses=2,
                                         categoricalFeaturesInfo={},
                                         numTrees=3,
                                         featureSubsetStrategy="auto",
                                         impurity='gini',
                                         maxDepth=4,
                                         maxBins=32)

    predictions = model.predict(testData.map(lambda x: x.features))
    labelsAndPredictions = testData.map(lambda lp: lp.label).zip(predictions)
    testErr = labelsAndPredictions.filter(lambda v, p: v != p).count() / float(
        testData.count())
    print('Test Error = ' + str(testErr))
    print('Learned classification forest model:')
    print(model.toDebugString())
    # Save and load model
    model.save(sc, ".model/myRandomForestClassificationModel")
    sameModel = RandomForestModel.load(
        sc, "./model/myRandomForestClassificationModel")
Esempio n. 3
0
def get_rfc_model(filename, bucket, client, sc):

    # get download_url
    objects_dict = client.list_objects(Bucket=BUCKET_NAME)
    filenames = map(lambda x: x["Key"], objects_dict["Contents"])
    filenames = filter(
        lambda x: True
        if re.match(filename + '-v' + "[0-9]+", x) else False, filenames)
    if len(filenames) == 0:
        print "NO RFC MODELS FOUND"
        return False
    else:
        versions = map(lambda x: int(re.search("[0-9]+", x).group()),
                       filenames)
        download_url = filename + "-v" + str(max(versions)) + '.zip'

    # download zip and unzip it
    local_url = '/tmp/' + download_url
    os.system("rm -r " + local_url)  # remove zip just in case
    os.system("rm -r " + local_url[:-4])  # remove unzipped folder just in case
    if download_file(download_url, bucket):
        with zipfile.ZipFile(local_url, "r") as z:
            z.extractall(local_url[:-4])
        return RandomForestModel.load(sc, local_url[:-4])  #-4 for zip
    else:
        print "ZIP DOWNLOAD FAILED"
        return False
def train():
    data = MLUtils.loadLibSVMFile(sc, TEST_DATA_PATH)
    print("[INFO] load complete.")
    # 划分训练集
    data = data.randomSplit([0.2, 0.8])[0]
    (trainingData, testData) = data.randomSplit([0.7, 0.3])

    # Train a RandomForest model.
    #  Empty categoricalFeaturesInfo indicates all features are continuous.
    #  Note: Use larger numTrees in practice.
    #  Setting featureSubsetStrategy="auto" lets the algorithm choose.
    model = RandomForest.trainClassifier(trainingData,
                                         numClasses=NUM_OF_CLASSES,
                                         categoricalFeaturesInfo={},
                                         numTrees=NUM_OF_TREES,
                                         featureSubsetStrategy="auto",
                                         impurity='gini',
                                         maxDepth=MAXDEPTH,
                                         maxBins=MAXBINS)

    # Evaluate model on test instances and compute test error
    predictions = model.predict(testData.map(lambda x: x.features))
    labelsAndPredictions = testData.map(lambda lp: lp.label).zip(predictions)
    testErr = labelsAndPredictions.filter(
        lambda lp: lp[0] != lp[1]).count() / float(testData.count())
    print('[INFO] Test Error = ' + str(testErr))
    print('[INFO] Learned classification forest model:')
    print(model.toDebugString())

    # Save and load model
    model.save(sc, TEST_MODEL_PATH)
    sameModel = RandomForestModel.load(sc, TEST_MODEL_PATH)
Esempio n. 5
0
def main():
    options = parse_args()

    sc = SparkContext(appName="PythonRandomForestClassificationExample")
    # $example on$
    # Load and parse the data file into an RDD of LabeledPoint.
    data = MLUtils.loadLibSVMFile(sc, options.data_file)
    # Split the data into training and test sets (30% held out for testing)
    (trainingData, testData) = data.randomSplit([0.7, 0.3])

    # Train a RandomForest model.
    #  Empty categoricalFeaturesInfo indicates all features are continuous.
    #  Note: Use larger numTrees in practice.
    #  Setting featureSubsetStrategy="auto" lets the algorithm choose.
    model = RandomForest.trainClassifier(trainingData,
                                         numClasses=2,
                                         categoricalFeaturesInfo={},
                                         numTrees=3,
                                         featureSubsetStrategy="auto",
                                         impurity='gini',
                                         maxDepth=4,
                                         maxBins=32)

    # Evaluate model on test instances and compute test error
    predictions = model.predict(testData.map(lambda x: x.features))
    labelsAndPredictions = testData.map(lambda lp: lp.label).zip(predictions)
    testErr = labelsAndPredictions.filter(
        lambda lp: lp[0] != lp[1]).count() / float(testData.count())
    print('Test Error = ' + str(testErr))
    print('Learned classification forest model:')
    print(model.toDebugString())

    # Save and load model
    model.save(sc, options.output_model)
    sameModel = RandomForestModel.load(sc, options.output_model)
Esempio n. 6
0
 def Prediction(self, modelType):
     data_point = self.Features
     if modelType == 'RF':
         model = RandomForestModel.load(
             self.sc, self.baseDir + '/fraudModel/Model/' + modelType)
         result = np.array(
             model.predict(self.sc.parallelize(data_point)).collect())
         self.df_PD.insert(len(list(self.df_PD.columns)), 'result', result)
     elif modelType == 'GBDT':
         model = GradientBoostedTreesModel.load(
             self.sc, self.baseDir + '/fraudModel/Model/' + modelType)
         result = np.array(
             model.predict(self.sc.parallelize(data_point)).collect())
         self.df_PD.insert(len(list(self.df_PD.columns)), 'result', result)
     elif modelType == 'LRsgd':
         model = LogisticRegressionModel.load(
             self.sc, self.baseDir + '/fraudModel/Model/' + modelType)
         result = np.array(
             model.predict(self.sc.parallelize(data_point)).collect())
         self.df_PD.insert(len(list(self.df_PD.columns)), 'result', result)
     elif modelType == 'LRlbfgs':
         model = LogisticRegressionModel.load(
             self.sc, self.baseDir + '/fraudModel/Model/' + modelType)
         result = np.array(
             model.predict(self.sc.parallelize(data_point)).collect())
         self.df_PD.insert(len(list(self.df_PD.columns)), 'result', result)
     elif modelType == 'SVM':
         model = SVMModel.load(
             self.sc, self.baseDir + '/fraudModel/Model/' + modelType)
         result = np.array(
             model.predict(self.sc.parallelize(data_point)).collect())
         self.df_PD.insert(len(list(self.df_PD.columns)), 'result', result)
     else:
         pass
Esempio n. 7
0
def evaluate_model(type):
    if type == 'logistic':
        model = LogisticRegressionModel.load(sc, "logit_model.model")
    elif type == 'tree':
        model = DecisionTreeModel.load(sc, "dt_model.model")
    elif type == 'rf':
        model = RandomForestModel.load(sc, "rf_model.model")
Esempio n. 8
0
def predict():

	testData = MLUtils.loadLibSVMFile(sc,INPUT_DATA_PATH)
	print("[INFO] load complete.")

	model = RandomForestModel.load(sc,TEST_MODEL_PATH)

	# Evaluate model on test instances and compute test error
	predictions = model.predict(testData.map(lambda x: x.features))

	lst = predictions.collect()
	with open(TEST_PREDICT_PATH+"/"+time.strftime("%Y-%m-%d-%H:%M:%S", time.localtime())+".txt",'w') as f:
		for k in lst:
			f.write(str(k)+"\n")

	labelsAndPredictions = testData.map(lambda lp: tobin(lp.label)).zip(predictions.map(lambda lp: tobin(lp)))

	#print(labelsAndPredictions.collect())

	metrics = BinaryClassificationMetrics(labelsAndPredictions)

	# Area under precision-recall curve
	print("Area under PR = %s" % metrics.areaUnderPR)

	# Area under ROC curve
	print("Area under ROC = %s" % metrics.areaUnderROC)
	#print(labelsAndPredictions.collect())

	testErr = labelsAndPredictions.filter(lambda lp: lp[0] != lp[1]).count() / float(testData.count())
	print('[INFO] Test Error = ' + str(testErr))
def prediction(parameter_list):
    #function
    sameModel = RandomForestModel.load(sc, filename)#"path/myrfModel"
    print "read model success"
    prediction = sameModel.predict(parameter_list)
    #example
    #[60.0,37.307741,-121.89593,17108.0,0.0,8400.0,0.0,1938.0,1320.0,6.0,2.0,1.0,95125.0,713.0,753543.2])
    return prediction
Esempio n. 10
0
def main(sc, filename):
    '''
    The driver for the spark scoring application, it generates predictions for
    a given file of features and target variables
    '''

    rawDataRdd = sc.textFile(filename)
    print "Data Size: {}".format(rawDataRdd.count())

    labeledPointsRdd = rawDataRdd.map(parse_lines)

    #load models
    logit_model = LogisticRegressionModel.load(sc, "logit_model.model")
    dt_model = DecisionTreeModel.load(sc, "dt_model.model")
    rf_model = RandomForestModel.load(sc, "rf_model.model")

    #logistic predictions
    labels_and_preds = labeledPointsRdd.map(lambda p: (float(logit_model.predict(p.features)), p.label  ))
    labels_and_preds_collected = labels_and_preds.collect()
    print "\n"
    print "Predictions: Logistic Regression"
    y_true = []
    y_pred = []
    for row in labels_and_preds_collected:
        y_true.append(row[1])
        y_pred.append(row[0])
        # print "predicted: {0} - actual: {1}\n".format(row[0], row[1])


    accuracy = labels_and_preds.filter(lambda (v,p): v == p).count() / float(labeledPointsRdd.count())

    print_box()
    print "Prediction Accuracy (Logistic): {}".format(round(accuracy, 4))
    print_box()
    print "\n"

    #decision tree predictions
    predictions = dt_model.predict(labeledPointsRdd.map(lambda p: p.features))
    labels_and_preds_dt = labeledPointsRdd.map(lambda p: p.label).zip(predictions)
    labels_and_preds_dt_collected = labels_and_preds.collect()


    accuracy_dt = labels_and_preds_dt.filter(lambda (v, p): v == p).count() / float(labeledPointsRdd.count())

    print_box()
    print "Prediction Accuracy (Decision Tree): {}".format(round(accuracy_dt, 4))
    print_box()
    print "\n"

    #random forest predictions
    predictions_rf = rf_model.predict(labeledPointsRdd.map(lambda p: p.features))
    labels_and_preds_rf = labeledPointsRdd.map(lambda p: p.label).zip(predictions_rf)
    accuracy_rf = labels_and_preds_rf.filter(lambda (v, p): v == p).count() / float(labeledPointsRdd.count())
    print_box()
    print "Prediction Accuracy (Random Forest): {}".format(round(accuracy_rf, 4))
    print_box()
Esempio n. 11
0
 def __init__(self, path):
     conf = SparkConf() \
         .setAppName("crankshaw-pyspark") \
         .set("spark.executor.memory", "2g") \
         .set("spark.kryoserializer.buffer.mb", "128") \
         .set("master", "local")
     sc = SparkContext(conf=conf, batchSize=10)
     self.model = RandomForestModel.load(sc, path)
     self.path = path
     print("started spark")
def get_best_classifier(sc, data_dir):
    # model_randfor_class_t04_d04_b024_s7593
    # model_randfor_class_t04_d04_b024_s7593_params.pckl
    from os import listdir
    from os.path import isfile, join
    models = [(f, int(f[-4:])) for f in listdir(data_dir)
              if f[:6] == 'model_' and len(f) == 39]
    best_model = max(models, key=lambda item: item[1])
    print('best classifier found is |{0}| score = {1} '.format(*best_model))
    params = load_pickle_file(data_dir + best_model[0] + '_params.pckl')
    print(params)
    #return (RandomForestClassificationModel.load(data_dir + best_model[0]), params)
    return (RandomForestModel.load(sc=sc,
                                   path=data_dir + best_model[0]), params)
def main():
    parser = OptionParser()
    parser.add_option('', '--enriched_data_path', action='store', dest='enriched_data_path', help='path to write enriched data')
    parser.add_option('', '--model_path', action='store', dest='model_path', help='path for model data')
    parser.add_option('', '--kafka_zookeeper_hosts', action='store', dest='kafka_zookeeper_hosts', help='list of Zookeeper hosts (host:port)')
    parser.add_option('', '--kafka_broker_list', action='store', dest='kafka_broker_list', help='list of Kafka brokers (host:port)')
    parser.add_option('', '--kafka_message_topic', action='store', dest='kafka_message_topic', help='topic to consume input messages from')
    parser.add_option('', '--kafka_alert_topic', action='store', dest='kafka_alert_topic', help='topic to produce alert messages to')
    parser.add_option('', '--kafka_enriched_data_topic', action='store', dest='kafka_enriched_data_topic', help='topic to produce enriched data to')
    parser.add_option('', '--streaming_batch_duration_sec', type='float', default=15.0,
        action='store', dest='streaming_batch_duration_sec', help='Streaming batch duration in seconds')
    parser.add_option('', '--max_batches', type='int', default=0,
        action='store', dest='max_batches', help='Number of batches to process (0 means forever)')
    options, args = parser.parse_args()

    sc = SparkContext()
    ssc = StreamingContext(sc, options.streaming_batch_duration_sec)
    sqlContext = getSqlContextInstance(sc)

    # Load saved model.
    model = None
    if options.model_path:
        model = RandomForestModel.load(sc, options.model_path)
    else:
        print('No model loaded.')

    # Create Kafka stream to receive new messages.
    kvs = KafkaUtils.createDirectStream(ssc, [options.kafka_message_topic], {
        'metadata.broker.list': options.kafka_broker_list,
        'group.id': 'spark_streaming_processor.py'})

    # Take only the 2nd element of the tuple.
    messages = kvs.map(lambda x: x[1])

    # Convert RDD of JSON strings to RDD of Rows.
    rows = messages.map(json_to_row)

    # Process messages.
    rows.foreachRDD(lambda time, rdd: 
        process_messages(time, rdd,
            ssc=ssc,
            model=model,
            enriched_data_path=options.enriched_data_path,
            zookeeper_hosts=options.kafka_zookeeper_hosts,
            kafka_alert_topic=options.kafka_alert_topic,
            kafka_enriched_data_topic=options.kafka_enriched_data_topic,
            max_batches=options.max_batches))

    ssc.start()
    ssc.awaitTermination()
Esempio n. 14
0
def get_rf_model(sc, train=None):
    model_path = 'rf.model'
    if train is None:
        model = RandomForestModel.load(sc, model_path)
    else:
        model = RandomForest.trainClassifier(train,
                                             numClasses=2,
                                             numTrees=10,
                                             categoricalFeaturesInfo={},
                                             featureSubsetStrategy="auto",
                                             impurity='gini',
                                             maxDepth=10,
                                             maxBins=100)
        model.save(sc, model_path)

    return model
Esempio n. 15
0
def predict():
    conf = SparkConf().setMaster("local").setAppName("My App")
    sc = SparkContext(conf=conf)

    testData = MLUtils.loadLibSVMFile(sc, TEST_DATA_PATH)

    model = RandomForestModel.load(sc, TEST_MODEL_PATH)

    predictions = model.predict(testData.map(lambda x: x.features))

    predictlabel_list = predictions.collect()

    rateOFeachSort_dict = analyse_result(predictlabel_list)

    save(predictlabel_list)

    return rateOFeachSort_dict
Esempio n. 16
0
def test(sc):
    files = ["sounds/flushing/20150227_193109-flushing-04.wav",
             "sounds/bike/20150227_193806-bici-14.wav",
             "sounds/blender/20150227_193606-licuadora-14.wav"
             ]

    rfmodel = RandomForestModel.load(sc, RF_PATH)
    dtmodel = DecisionTreeModel.load(sc, DT_PATH)

    print dtmodel.toDebugString()
    for f in files:
        vec = audio.showFeatures(f)
        testfeatures = Vectors.dense([float(x) for x in vec.split(' ')])
        print(vec)
        pred = dtmodel.predict(testfeatures)
        print("DT Prediction is " + str(pred), classes[int(pred)])
        pred = rfmodel.predict(testfeatures)
        print("RF Prediction is " + str(pred), classes[int(pred)])
Esempio n. 17
0
def predict(sc, data):

    #sc = SparkContext(appName="PythonRandomForestClassificationExample")
    # $example on$
    # Load and parse the data file into an RDD of LabeledPoint.
    #data = MLUtils.loadLibSVMFile(sc, 'deathproject/test1.txt')
    #data = [1, 0, 5, 1, 1, 65, 1, 2, 0, 1, 0, 6, 3450]
    # Split the data into training and test sets (30% held out for testing)
    # (trainingData, testData) = data.randomSplit([0.7, 0.3])

    # Train a RandomForest model.
    #  Empty categoricalFeaturesInfo indicates all features are continuous.
    #  Note: Use larger numTrees in practice.
    #  Setting featureSubsetStrategy="auto" lets the algorithm choose.
    print('Starting...')

    #model = RandomForest.trainClassifier(trainingData, numClasses=8, categoricalFeaturesInfo={},
    #                                     numTrees=5, featureSubsetStrategy="auto",
    #                                     impurity='gini', maxDepth=4, maxBins=32)



    sameModel = RandomForestModel.load(sc, "target/tmp/myRandomForestClassificationModel2")

    print('Predicting...')
    # Evaluate model on test instances and compute test error
   
    #predictions = sameModel.predict(data.map(lambda x: x.features))
    predictions = sameModel.predict(data)

    #labelsAndPredictions = data.map(lambda lp: lp.label).zip(predictions)
    #testErr = labelsAndPredictions.filter(lambda (v, p): v != p).count() / float(testData.count())
    #print('Test Error = ' + str(testErr))
    #print('Learned classification forest model:')
    #print(sameModel.toDebugString())
    print(predictions)
    return int(predictions)
Esempio n. 18
0
def random_forest():
    """
    使用mllib对Spark安装包mllib的测试数据集做随机森林测试
    80%数据作为训练数据  20%数据作为测试数据
    :return:
    """
    data_rdd = MLUtils.loadLibSVMFile(
        sc, '{}/mllib/sample_libsvm_data.txt'.format(current_dir))

    train_data_rdd, test_data_rdd = data_rdd.randomSplit([0.8, 0.2])
    model = RandomForest.trainClassifier(train_data_rdd,
                                         numClasses=2,
                                         categoricalFeaturesInfo={},
                                         numTrees=3)

    # 根据测试集的features预测laber值为0还是1
    predict_rdd = model.predict(test_data_rdd.map(lambda x: x.features))

    # 测试集实际的laber值
    labels_rdd = test_data_rdd.map(lambda lp: lp.label).zip(predict_rdd)

    # 测试样本中预测值与实际值不符的百分比(错误率)
    print(labels_rdd.filter(lambda x: x[0] != x[1]).count())
    test_err = labels_rdd.filter(lambda x: x[0] != x[1]).count() / float(
        test_data_rdd.count())
    print("test error rate:{}".format(test_err))

    # 保存 训练好的模型
    model_path = "{}/my_random_forest_model".format(current_dir)
    if not os.path.exists(model_path):
        model.save(sc, model_path)

    trained_model = RandomForestModel.load(
        sc, "{}/my_random_forest_model".format(current_dir))
    print(trained_model.toDebugString())
    return trained_model
def get_rfc_model(filename, bucket, client, sc):

    # get download_url
    objects_dict = client.list_objects(Bucket=BUCKET_NAME)
    filenames = map(lambda x: x["Key"], objects_dict["Contents"])
    filenames = filter(lambda x: True if re.match(filename + '-v' + "[0-9]+", x) else False, filenames)
    if len(filenames) == 0:
        print "NO RFC MODELS FOUND"
        return False
    else:
        versions = map(lambda x: int(re.search("[0-9]+", x).group()), filenames)
        download_url = filename + "-v" + str(max(versions)) + '.zip'

    # download zip and unzip it
    local_url = '/tmp/' + download_url
    os.system("rm -r " + local_url) # remove zip just in case
    os.system("rm -r " + local_url[:-4]) # remove unzipped folder just in case
    if download_file(download_url, bucket):
        with zipfile.ZipFile(local_url, "r") as z:
            z.extractall(local_url[:-4])
        return RandomForestModel.load(sc, local_url[:-4]) #-4 for zip
    else:
        print "ZIP DOWNLOAD FAILED"
        return False
Esempio n. 20
0
from pyspark.mllib.regression import LabeledPoint
from pyspark import SparkContext, SparkConf
from pyspark.sql.session import SparkSession	
from pyspark.ml.classification import RandomForestClassifier
from pyspark.mllib.tree import RandomForestModel
from pyspark.mllib.tree import RandomForest
from pyspark.mllib.evaluation import MulticlassMetrics
from prettytable import PrettyTable

sc = SparkContext()
spark = SparkSession(sc)
inputDF = spark.read.csv('s3://assignmentcs643/TrainingDataset.csv',header='true', inferSchema='true', sep=';')


datadf= inputDF.rdd.map(lambda row: LabeledPoint(row[-1], Vectors.dense(row[0:-1])))
model = RandomForestModel.load(sc,"s3://assignmentcs643/randomforestmodel.model")

predictions = model.predict(datadf.map(lambda x: x.features))

labels_and_predictions = datadf.map(lambda x: x.label).zip(predictions)
acc = labels_and_predictions.filter(lambda x: x[0] == x[1]).count() / float(datadf.count())


metrics = MulticlassMetrics(labels_and_predictions)
f1 = metrics.fMeasure()
recall = metrics.recall()
precision = metrics.precision()

#evaluation values 
print("Model accuracy: %.3f%%" % (acc * 100))
print("Recall Value = %s" % recall)
    # Split the data into training and test sets (30% held out for testing)
    (trainingData, testData) = data.randomSplit([0.7, 0.3])

    # Train a RandomForest model.
    #  Empty categoricalFeaturesInfo indicates all features are continuous.
    #  Note: Use larger numTrees in practice.
    #  Setting featureSubsetStrategy="auto" lets the algorithm choose.
    model = RandomForest.trainClassifier(trainingData,
                                         numClasses=2,
                                         categoricalFeaturesInfo={},
                                         numTrees=3,
                                         featureSubsetStrategy="auto",
                                         impurity='gini',
                                         maxDepth=4,
                                         maxBins=32)

    # Evaluate model on test instances and compute test error
    predictions = model.predict(testData.map(lambda x: x.features))
    labelsAndPredictions = testData.map(lambda lp: lp.label).zip(predictions)
    testErr = labelsAndPredictions.filter(
        lambda (v, p): v != p).count() / float(testData.count())
    print('Test Error = ' + str(testErr))
    print('Learned classification forest model:')
    print(model.toDebugString())

    # Save and load model
    model.save(sc, "target/tmp/myRandomForestClassificationModel")
    sameModel = RandomForestModel.load(
        sc, "target/tmp/myRandomForestClassificationModel")
    # $example off$
def writeLumbarTrainingReadings(time, rddTraining):
	try:
		# Convert RDDs of the words DStream to DataFrame and run SQL query
		connectionProperties = MySQLConnection.getDBConnectionProps('/home/erik/mysql_credentials.txt')
		sqlContext = SQLContext(rddTraining.context)
		if rddTraining.isEmpty() == False:
			lumbarTrainingReading = sqlContext.jsonRDD(rddTraining)
			lumbarTrainingReadingFinal = lumbarTrainingReading.selectExpr("deviceID","metricTypeID","uomID","positionID","actual.y AS actualYaw","actual.p AS actualPitch","actual.r AS actualRoll","setPoints.y AS setPointYaw","setPoints.p AS setPointPitch","setPoints.r AS setPointRoll")
			lumbarTrainingReadingFinal.write.jdbc("jdbc:mysql://localhost/biosensor", "SensorTrainingReadings", properties=connectionProperties)
	except:
		pass
		
if __name__ == "__main__":
	sc = SparkContext(appName="Process Lumbar Sensor Readings")
	ssc = StreamingContext(sc, 2) # 2 second batches
	loadedModel = RandomForestModel.load(sc, "../machine_learning/models/IoTBackBraceRandomForest.model")

	#Process Readings
	streamLumbarSensor = KafkaUtils.createDirectStream(ssc, ["LumbarSensorReadings"], {"metadata.broker.list": "localhost:9092"})
	lineSensorReading = streamLumbarSensor.map(lambda x: x[1])
	lineSensorReading.foreachRDD(writeLumbarReadings)
	
	#Process Training Readings
	streamLumbarSensorTraining = KafkaUtils.createDirectStream(ssc, ["LumbarSensorTrainingReadings"], {"metadata.broker.list": "localhost:9092"})
	lineSensorTrainingReading = streamLumbarSensorTraining.map(lambda x: x[1])
	lineSensorTrainingReading.foreachRDD(writeLumbarTrainingReadings)

	# Run and then wait for termination signal
	ssc.start()
	ssc.awaitTermination()
 
Esempio n. 23
0
def setup_model(sc, model_uri):
    model = RandomForestModel.load(sc, model_uri)
    return model
    res.append({'name': 'Root' + head, 'children': parse(data[1:], 0)})
    outstr += json.dumps(res[0])
    with open('../vis/data/structure.json', 'w') as outfile:
        outfile.write(outstr)

    print('Conversion Success !')


if __name__ == "__main__":

    # debug
    # treeStr = open("../vis/data/modelString",'r').read()
    # python3 vis.py ../model 7 3

    try:
        MAX_DEPTH = int(sys.argv[1])
    except:
        print("default depth is 7.")

    try:
        USE_TREE = int(sys.argv[2])
    except:
        print("default tree to convert is num 3")

    sc = SparkContext(appName="RandomForest2Json")
    trees = RandomForestModel.load(sc, TEST_MODEL_PATH)
    treeStr = trees.toDebugString()
    with open("../vis/data/modelString", 'w') as f:
        f.write(treeStr)

    tree2json(treeStr)
Esempio n. 25
0
            paths = paths\
                    .replace('.0','')\
                    .replace('feature ','')\
                    .replace('Predict: ','#')\
                    .replace('not in','5')\
                    .replace(' ','|')\
                    .replace('in','4')\
                    .replace('>=','0')\
                    .replace('<=','1')\
                    .replace('>','2')\
                    .replace('<','3')\
                    .replace('Root:','')
            print >> f2, paths.decode('utf8') 

        else:
            walk(dic['children'], path+dic['name']+':')
            
           

    f2.close()
    
if __name__ == "__main__":
    
    dtModelFile = "output/RFModel"
    dtModelResults = "randomForestModel.txt"

    sc = SparkContext("local[20]","RFClassification")
    dtModel = RandomForestModel.load(sc, dtModelFile)
    dtree = dtModel.toDebugString() 
    print dtree
    tree_json(dtree,dtModelResults)
# $example off$

if __name__ == "__main__":
    sc = SparkContext(appName="PythonRandomForestClassificationExample")
    # $example on$
    # Load and parse the data file into an RDD of LabeledPoint.
    data = MLUtils.loadLibSVMFile(sc, 'data/mllib/sample_libsvm_data.txt')
    # Split the data into training and test sets (30% held out for testing)
    (trainingData, testData) = data.randomSplit([0.7, 0.3])

    # Train a RandomForest model.
    #  Empty categoricalFeaturesInfo indicates all features are continuous.
    #  Note: Use larger numTrees in practice.
    #  Setting featureSubsetStrategy="auto" lets the algorithm choose.
    model = RandomForest.trainClassifier(trainingData, numClasses=2, categoricalFeaturesInfo={},
                                         numTrees=3, featureSubsetStrategy="auto",
                                         impurity='gini', maxDepth=4, maxBins=32)

    # Evaluate model on test instances and compute test error
    predictions = model.predict(testData.map(lambda x: x.features))
    labelsAndPredictions = testData.map(lambda lp: lp.label).zip(predictions)
    testErr = labelsAndPredictions.filter(lambda (v, p): v != p).count() / float(testData.count())
    print('Test Error = ' + str(testErr))
    print('Learned classification forest model:')
    print(model.toDebugString())

    # Save and load model
    model.save(sc, "target/tmp/myRandomForestClassificationModel")
    sameModel = RandomForestModel.load(sc, "target/tmp/myRandomForestClassificationModel")
    # $example off$
Esempio n. 27
0
    def test_classification(self):
        from pyspark.mllib.classification import LogisticRegressionWithSGD, SVMWithSGD, NaiveBayes
        from pyspark.mllib.tree import DecisionTree, DecisionTreeModel, RandomForest,\
            RandomForestModel, GradientBoostedTrees, GradientBoostedTreesModel
        data = [
            LabeledPoint(0.0, [1, 0, 0]),
            LabeledPoint(1.0, [0, 1, 1]),
            LabeledPoint(0.0, [2, 0, 0]),
            LabeledPoint(1.0, [0, 2, 1])
        ]
        rdd = self.sc.parallelize(data)
        features = [p.features.tolist() for p in data]

        temp_dir = tempfile.mkdtemp()

        lr_model = LogisticRegressionWithSGD.train(rdd, iterations=10)
        self.assertTrue(lr_model.predict(features[0]) <= 0)
        self.assertTrue(lr_model.predict(features[1]) > 0)
        self.assertTrue(lr_model.predict(features[2]) <= 0)
        self.assertTrue(lr_model.predict(features[3]) > 0)

        svm_model = SVMWithSGD.train(rdd, iterations=10)
        self.assertTrue(svm_model.predict(features[0]) <= 0)
        self.assertTrue(svm_model.predict(features[1]) > 0)
        self.assertTrue(svm_model.predict(features[2]) <= 0)
        self.assertTrue(svm_model.predict(features[3]) > 0)

        nb_model = NaiveBayes.train(rdd)
        self.assertTrue(nb_model.predict(features[0]) <= 0)
        self.assertTrue(nb_model.predict(features[1]) > 0)
        self.assertTrue(nb_model.predict(features[2]) <= 0)
        self.assertTrue(nb_model.predict(features[3]) > 0)

        categoricalFeaturesInfo = {0: 3}  # feature 0 has 3 categories
        dt_model = DecisionTree.trainClassifier(
            rdd, numClasses=2, categoricalFeaturesInfo=categoricalFeaturesInfo, maxBins=4)
        self.assertTrue(dt_model.predict(features[0]) <= 0)
        self.assertTrue(dt_model.predict(features[1]) > 0)
        self.assertTrue(dt_model.predict(features[2]) <= 0)
        self.assertTrue(dt_model.predict(features[3]) > 0)

        dt_model_dir = os.path.join(temp_dir, "dt")
        dt_model.save(self.sc, dt_model_dir)
        same_dt_model = DecisionTreeModel.load(self.sc, dt_model_dir)
        self.assertEqual(same_dt_model.toDebugString(), dt_model.toDebugString())

        rf_model = RandomForest.trainClassifier(
            rdd, numClasses=2, categoricalFeaturesInfo=categoricalFeaturesInfo, numTrees=10,
            maxBins=4, seed=1)
        self.assertTrue(rf_model.predict(features[0]) <= 0)
        self.assertTrue(rf_model.predict(features[1]) > 0)
        self.assertTrue(rf_model.predict(features[2]) <= 0)
        self.assertTrue(rf_model.predict(features[3]) > 0)

        rf_model_dir = os.path.join(temp_dir, "rf")
        rf_model.save(self.sc, rf_model_dir)
        same_rf_model = RandomForestModel.load(self.sc, rf_model_dir)
        self.assertEqual(same_rf_model.toDebugString(), rf_model.toDebugString())

        gbt_model = GradientBoostedTrees.trainClassifier(
            rdd, categoricalFeaturesInfo=categoricalFeaturesInfo, numIterations=4)
        self.assertTrue(gbt_model.predict(features[0]) <= 0)
        self.assertTrue(gbt_model.predict(features[1]) > 0)
        self.assertTrue(gbt_model.predict(features[2]) <= 0)
        self.assertTrue(gbt_model.predict(features[3]) > 0)

        gbt_model_dir = os.path.join(temp_dir, "gbt")
        gbt_model.save(self.sc, gbt_model_dir)
        same_gbt_model = GradientBoostedTreesModel.load(self.sc, gbt_model_dir)
        self.assertEqual(same_gbt_model.toDebugString(), gbt_model.toDebugString())

        try:
            rmtree(temp_dir)
        except OSError:
            pass
Esempio n. 28
0
if __name__ == "__main__":
    sc = SparkContext(appName="PythonRandomForestRegressionExample")
    # $example on$
    # Load and parse the data file into an RDD of LabeledPoint.
    data = MLUtils.loadLibSVMFile(sc, 'data/mllib/sample_libsvm_data.txt')
    # Split the data into training and test sets (30% held out for testing)
    (trainingData, testData) = data.randomSplit([0.7, 0.3])

    # Train a RandomForest model.
    #  Empty categoricalFeaturesInfo indicates all features are continuous.
    #  Note: Use larger numTrees in practice.
    #  Setting featureSubsetStrategy="auto" lets the algorithm choose.
    model = RandomForest.trainRegressor(trainingData, categoricalFeaturesInfo={},
                                        numTrees=3, featureSubsetStrategy="auto",
                                        impurity='variance', maxDepth=4, maxBins=32)

    # Evaluate model on test instances and compute test error
    predictions = model.predict(testData.map(lambda x: x.features))
    labelsAndPredictions = testData.map(lambda lp: lp.label).zip(predictions)
    testMSE = labelsAndPredictions.map(lambda lp: (lp[0] - lp[1]) * (lp[0] - lp[1])).sum() /\
        float(testData.count())
    print('Test Mean Squared Error = ' + str(testMSE))
    print('Learned regression forest model:')
    print(model.toDebugString())

    # Save and load model
    model.save(sc, "target/tmp/myRandomForestRegressionModel")
    sameModel = RandomForestModel.load(sc, "target/tmp/myRandomForestRegressionModel")
    # $example off$
Esempio n. 29
0
from pyspark.mllib.tree import RandomForest, RandomForestModel
from pyspark.mllib.regression import LabeledPoint
from pyspark.mllib.util import MLUtils

sc = SparkContext("local", "Amadeus Random Forest Run", pyFiles=['run_model.py'])

data = sc.textFile("data_timbre.csv")
zipped_data = data.zipWithIndex()
keyed_data = zipped_data.map(lambda line: (line[-1], line[:-1]))

target = sc.textFile("target_timbre.csv")
zipped_target = target.zipWithIndex()
keyed_target = zipped_target.map(lambda line: (line[-1], line[:-1]))

target_data = keyed_data.join(keyed_target)
labled_point_data = target_data.map(lambda tup: LabeledPoint(tup[1][1][0], tup[1][0][0].split(',')))

# Split the data into training and test sets (30% held out for testing)
print("Creating Training and Test Data Split")
(trainingData, testData) = labled_point_data.randomSplit([0.7, 0.3])

model = RandomForestModel.load(sc, "myRFModel")

predictions = model.predict(testData.map(lambda x: x.features))
labelsAndPredictions = testData.map(lambda lp: lp.label).zip(predictions)
testMSE = labelsAndPredictions.map(lambda (v, p): (v - p) * (v - p)).sum() / float(testData.count())
print('Test Mean Squared Error = ' + str(testMSE))

testAccuracy = labelsAndPredictions.map(lambda (v, p): 1 if (abs(v - p) < 10) else 0).sum() / float(testData.count())
print('Total Accuracy = ' + str(testAccuracy))
Esempio n. 30
0
def load (filename) :
    model = RandomForestModel.load(sc, filename)
    return model
Esempio n. 31
0
from pyspark.mllib.feature import IDF
from pyspark.mllib.tree import RandomForest, RandomForestModel
from pyspark.mllib.util import MLUtils
from pyspark.mllib.regression import LabeledPoint

from gensim.models.doc2vec import Doc2Vec
from math import exp
from threading import Thread, Event

sc = SparkContext()
sqlContext = SQLContext(sc)

# this is a large object we cache it on each worker node
gmod_broadcast = sc.broadcast( Doc2Vec.load("/data/_hndata/doc2vec_model/hn") ) 

tfidf_model = RandomForestModel.load(sc, "hdfs:///hndata/hnrrmodel_tfidf")

doc2vec_model = RandomForestModel.load(sc, "hdfs:///hndata/rrscoremodel")
doc2vec_model2 = RandomForestModel.load(sc, "hdfs:///hndata/rrscoremodel2")

tf = sc.pickleFile("hdfs:///hndata/tf_pickle")
idf = IDF().fit(tf)
hashingTF = HashingTF(1000)



def pred_tfidf(docs):
    sents = sc.parallelize(docs).map(lambda d: d.strip().split())
    new_tf = hashingTF.transform(sents)
    tfidf = idf.transform(new_tf)
    return tfidf_model.predict(tfidf)
Esempio n. 32
0
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt

#figPath = os.path.join('static', 'skillradar')

######################## LOAD MODEL ###########################
# import pre-trained models
import pyspark
from pyspark import SparkContext, SparkConf
from pyspark.mllib.tree import RandomForest, RandomForestModel

conf = SparkConf().setAppName('GameAnalysis').setMaster('local')
sc = SparkContext()
# player skill analyzing model
model_player = RandomForestModel.load(
    sc, 'model/RandomForestModel/myRandomForestRegressionModel')
# match result predicting model
model_match = RandomForestModel.load(sc, "model/Dota2RandomForestModel")

######################## WEB SERVER ###########################
from flask import Flask, render_template, request, redirect, flash, url_for, make_response
from flask_wtf import FlaskForm
from wtforms import StringField, validators, SelectMultipleField, Form, widgets
from wtforms.validators import DataRequired, InputRequired

from flask_bootstrap import Bootstrap

app = Flask(__name__)
Bootstrap(app)
app.secret_key = 'secret-for-dev'
PORT = os.getenv('PORT', '40555')
Esempio n. 33
0
from pyspark.mllib.tree import RandomForest, RandomForestModel
from pyspark import SparkConf, SparkContext
from pyspark.sql.session import SparkSession
from pyspark.sql import SQLContext, HiveContext
from pyspark.mllib.regression import LabeledPoint
import pickle
import pandas as pd

sc = SparkContext('local')
model = RandomForestModel.load(sc, "myFirstWorkModel")

data = pd.read_csv('/home/hadoop/hadooptest/test.csv')

Nattr = ['PassengerId', 'Pclass', 'Age', 'SibSp', 'Parch', 'Fare']
data['Age'].fillna(int(data.Age.mean()), inplace=True)
data['Cabin'] = data['Cabin'].fillna('missing').map(lambda x: x[0]).map(
    lambda x: x if x != 'm' else 'missing')

data['Embarked'].fillna("missing", inplace=True)
for attr in ['Cabin', 'Sex', 'Embarked']:
    dummy = pd.get_dummies(data[attr], prefix=attr)
    data = pd.concat([data, dummy], axis=1)
    data.pop(attr)
'''
[  u'passengerid',      u'survived',        u'pclass',
                 u'age',         u'sibsp',         u'parch',
                u'fare',       u'cabin_A',       u'cabin_B',       u'cabin_C',
             u'cabin_D',       u'cabin_E',       u'cabin_F',       u'cabin_G',
             u'cabin_T', u'cabin_missing',    u'sex_female',      u'sex_male',
           u'embarked_',    u'embarked_C',    u'embarked_Q',    u'embarked_S']
'''
Esempio n. 34
0
    # 7: solr collection/index name
    # 8: number of json doc/item to index per batch
    if len(sys.argv) != 8:
        exit(-1)

    ## initiate spark context
    sc = SparkContext(appName="PythonStreamingDirectKafkaTweetSentiments")
    brokers, topic, solrHost, solrPort, confDirHDFS, solrCollection, solrBatchSize = sys.argv[1:]

    ## load libraries & model ###########
    affinListEng=sc.textFile(confDirHDFS+'/AFINN-111_eng.txt')
    affinListMalay=sc.textFile(confDirHDFS+'/AFINN-111_malay.txt')
    affinEnglish=loadDict(affinListEng.collect(), ',')
    affinMalay=loadDict(affinListMalay.collect(), ',')

    twitterSentimentRF = RandomForestModel.load(sc, confDirHDFS+"/twitterSentimentRF.model")

    ## initiate spark streaming context
    ssc = StreamingContext(sc, 2)

    # parse tweets and collect text
    # brokers, topic, solrHost, solrPort = sys.argv[1:]
    kvs = KafkaUtils.createDirectStream(ssc, [topic], {"metadata.broker.list": brokers, "auto.offset.reset": "smallest"}, keyDecoder=utf8_decoder_ignore_error, valueDecoder=utf8_decoder_ignore_error )
    lines = kvs.map(lambda x: x[1])
    # keep all the texts between widest curly braces
    lines = lines.map(lambda x: "{"+re.findall(r'\{(.+)\}',x)[0]+"}")
    # convert to ascii	
    #texts = lines.map(lambda x: json.loads(x)['text'].encode('ascii', "ignore"))
    tweets = lines.map(lambda x :(\
                          json.loads(x)['id'], 
                          json.loads(x)['created_at'].encode('ascii', "ignore"),\
                "deviceID", "metricTypeID", "uomID", "positionID",
                "actual.y AS actualYaw", "actual.p AS actualPitch",
                "actual.r AS actualRoll", "setPoints.y AS setPointYaw",
                "setPoints.p AS setPointPitch", "setPoints.r AS setPointRoll")
            lumbarTrainingReadingFinal.write.jdbc(
                "jdbc:mysql://localhost/biosensor",
                "SensorTrainingReadings",
                properties=connectionProperties)
    except:
        pass


if __name__ == "__main__":
    sc = SparkContext(appName="Process Lumbar Sensor Readings")
    ssc = StreamingContext(sc, 2)  # 2 second batches
    loadedModel = RandomForestModel.load(
        sc, "../machine_learning/models/IoTBackBraceRandomForest.model")

    #Process Readings
    streamLumbarSensor = KafkaUtils.createDirectStream(
        ssc, ["LumbarSensorReadings"],
        {"metadata.broker.list": "localhost:9092"})
    lineSensorReading = streamLumbarSensor.map(lambda x: x[1])
    lineSensorReading.foreachRDD(writeLumbarReadings)

    #Process Training Readings
    streamLumbarSensorTraining = KafkaUtils.createDirectStream(
        ssc, ["LumbarSensorTrainingReadings"],
        {"metadata.broker.list": "localhost:9092"})
    lineSensorTrainingReading = streamLumbarSensorTraining.map(lambda x: x[1])
    lineSensorTrainingReading.foreachRDD(writeLumbarTrainingReadings)
Esempio n. 36
0
    return prediction


@app.route("/spark_rf", methods=["GET"])
def spark_rest_info():
    return "hello, spark random forest! please post with json data.", 200


@app.route("/spark_rf", methods=["POST"])
def rest_prediction():
    req_post = json.loads(request.data)
    resp = dict()
    for i in range(max_para_number):
        if standard_info_headers[i] in req_post:
            parameter_list[i] = req_post[standard_info_headers[i]]
            resp[standard_info_headers[i]] = parameter_list[i]

    resp['price'] = prediction(parameter_list)
    return jsonify(resp), 200


if __name__ == '__main__':
    conf = SparkConf().setAppName(APP_NAME)
    conf = conf.setMaster("local[*]")
    sc = SparkContext(conf=conf)
    global sameModel
    sameModel = RandomForestModel.load(sc, filename)
    if len(sys.argv) > 1:
        filename = sys.argv[1]  #pass model path
    app.run(host='0.0.0.0', port=10003)
    from pyspark.mllib.linalg import Vectors

    print ("Successfully imported Spark Modules")
except ImportError as e:
    print ("Can not import Spark Modules", e)
    sys.exit(1)

import functools
import itertools
from kafka import KafkaConsumer

def parseData(line):
    splittedLine = line.split(",")
    values = [float(s) for s in splittedLine[4:-1]]
    label = splittedLine[-1]
    featuresVector = Vectors.dense(values)
    return LabeledPoint(label, featuresVector)

if __name__ == "__main__":
    conf = SparkConf().setAppName("RandomForest_Anomaly_Detection_Kafka_Consumer")
    sc = SparkContext(conf=conf)
    savedModel = RandomForestModel.load(sc, "../train_model/model")
    consumer = KafkaConsumer('test', group_id='my_group', bootstrap_servers=['localhost:9092'])
    print("Waiting for messages...")
    for message in consumer:
    	print("%s:%d:%d: key=%s value=%s" % (message.topic, message.partition, message.offset, message.key, message.value))
        data = sc.parallelize([message.value])
        testData = data.map(parseData)
        predictions = savedModel.predict(testData.map(lambda x: x.features))
        print("Prediction: ")
        print(predictions.first())
Esempio n. 38
0
    'week_start', date_add(col('week_start_origin'),
                           7 * (num_weeks_ahead + 1)))
## only do optimization for stores in the treatment group
df_test = df_test.filter(col('group_val') == 'treatment').drop('group_val')
df_test = df_test.select([
    'store_id', 'product_id', 'department_id', 'brand_id', 'msrp', 'cost',
    'avg_hhi', 'avg_traffic', 'week_start'
]).distinct()

# In[21]:

################################################## 3: load the model built last time
dirfilename_load = spark.read.parquet(modelDir + "model_name").filter(
    '_1 == "model_name"').rdd.map(lambda p: p[1]).collect()
dirfilename_load = dirfilename_load[0]
rfModel = RandomForestModel.load(sc, dirfilename_load)

# In[34]:

################################################## 4: optimization
##define categorical features, which used in modeling
features_categorical_train_and_test = ["department_id", "brand_id"]
features_numerical_train_and_test = [
    "price", "avg_hhi", "avg_traffic", "rl_price", "discount"
]
##feature index the categorical features
for s in features_categorical_train_and_test:
    s_StringIndexed = s + "_StringIndexed"
    indexer = StringIndexer(inputCol=s, outputCol=s_StringIndexed)
    indexer_trans = indexer.fit(df_train)
    df_test = indexer_trans.transform(df_test)
Esempio n. 39
0
  dict_add = pickle.load(handle)

with open('dict_city.pickle', 'rb') as handle:
  dict_city = pickle.load(handle)

with open('dict_fac.pickle', 'rb') as handle:
  dict_fac = pickle.load(handle)

with open('dict_status.pickle', 'rb') as handle:
  dict_status = pickle.load(handle)

with open('dict_bor.pickle', 'rb') as handle:
  dict_bor = pickle.load(handle)


model = RandomForestModel.load(sc, "modelCritical")

predictList = []

f = open("outFile3.csv", 'rU')

for line in csv.reader(f, delimiter=","):
	predictRow = list(line)
'''
agency = predictRow[agency_index]
complaint = predictRow[comp_index]
location = predictRow[loc_index]
incident = predictRow[incident_index]
address = predictRow[add_index]
city = predictRow[city_index]
facility = predictRow[fac_index]
if __name__ == "__main__":
    sc = SparkContext(appName="PythonRandomForestRegressionExample")
    # $example on$
    # Load and parse the data file into an RDD of LabeledPoint.
    data = MLUtils.loadLibSVMFile(sc, 'data/mllib/sample_libsvm_data.txt')
    # Split the data into training and test sets (30% held out for testing)
    (trainingData, testData) = data.randomSplit([0.7, 0.3])

    # Train a RandomForest model.
    #  Empty categoricalFeaturesInfo indicates all features are continuous.
    #  Note: Use larger numTrees in practice.
    #  Setting featureSubsetStrategy="auto" lets the algorithm choose.
    model = RandomForest.trainRegressor(trainingData, categoricalFeaturesInfo={},
                                        numTrees=3, featureSubsetStrategy="auto",
                                        impurity='variance', maxDepth=4, maxBins=32)

    # Evaluate model on test instances and compute test error
    predictions = model.predict(testData.map(lambda x: x.features))
    labelsAndPredictions = testData.map(lambda lp: lp.label).zip(predictions)
    testMSE = labelsAndPredictions.map(lambda lp: (lp[0] - lp[1]) * (lp[0] - lp[1])).sum() /\
        float(testData.count())
    print('Test Mean Squared Error = ' + str(testMSE))
    print('Learned regression forest model:')
    print(model.toDebugString())

    # Save and load model
    model.save(sc, "target/tmp/myRandomForestRegressionModel")
    sameModel = RandomForestModel.load(sc, "target/tmp/myRandomForestRegressionModel")
    # $example off$
Esempio n. 41
0
def main():
    sc = SparkContext(conf=SparkConf().setAppName("Random Forest"))
    bytePath = "/Users/priyanka/Desktop/project2files/train"
    byteTestPath = "/Users/priyanka/Desktop/project2files/test"
    namePath = "/Users/priyanka/Desktop/X_train_small.txt"
    nameTestPath = "/Users/priyanka/Desktop/X_test_small.txt"
    classPath = "/Users/priyanka/Desktop/y_train_small.txt"
    classTestPath = "/Users/priyanka/Desktop/y_test_small.txt"
    docData = sc.wholeTextFiles(
        bytePath,
        25).map(lambda (x, y): (x.encode("utf-8"), y.encode("utf-8")))
    print("docData frankie")
    docData.take(1)
    #clean docData here - remove 1st word from line and remove
    cleanDocData = docData.map(lambda (x, y): (x, clean(y.split())))
    nameData = sc.textFile(
        namePath, 25).map(lambda x: "file:" + bytePath + "/" + x + ".bytes"
                          ).zipWithIndex().map(lambda (x, y): (y, x))
    #nameData.take(5)
    labelData = sc.textFile(
        classPath, 25).zipWithIndex().map(lambda (x, y): (y, str(int(x) - 1)))
    joinNameLabel = nameData.join(labelData).map(lambda (x, y): y)
    #joinNameLabel.take(5)
    joinCleanDocLabel = joinNameLabel.join(cleanDocData).map(lambda (x, y): y)
    print("starting hashingTF rosie")
    #joinCleanDocLabel.persist()
    #tfFormat = joinDocLabel.map(lambda (x,y): (x,y.split()))
    hashingTF = HashingTF()
    hashData = joinCleanDocLabel.map(
        lambda (label, text): LabeledPoint(label, hashingTF.transform(text)))
    hashData.persist()
    print "hashing TF done"
    #    model = NaiveBayes.train(hashData)

    print("generating model fliss")
    model1 = RandomForest.trainClassifier(hashData,
                                          numClasses=10,
                                          categoricalFeaturesInfo={},
                                          numTrees=50,
                                          featureSubsetStrategy="auto",
                                          impurity='gini',
                                          maxDepth=4,
                                          maxBins=32)

    #==============================================================================
    # Testing starts here
    #==============================================================================
    docTestData = sc.wholeTextFiles(
        byteTestPath,
        25).map(lambda (x, y): (x.encode("utf-8"), y.encode("utf-8")))
    #docTestData.take(1)

    cleanDocTestData = docTestData.map(lambda (x, y): (x, clean(y.split())))
    nameTestData = sc.textFile(
        nameTestPath,
        25).map(lambda x: "file:" + byteTestPath + "/" + x + ".bytes"
                ).zipWithIndex().map(lambda (x, y): (y, x))
    labelTestData = sc.textFile(
        classTestPath,
        25).zipWithIndex().map(lambda (x, y): (y, str(int(x) - 1)))
    #joinTestNameLabel=nameTestData.join(labelTestData).map(lambda (x,y):y).cache()

    joinTestDocLabel = joinTestNameLabel.join(cleanDocTestData).map(lambda
                                                                    (x, y): y)
    #joinTestDocLabel.persist()
    #tfTestFormat = joinTestDocLabel.map(lambda (x,y): (x,y.split()))
    print("hashing test kenny")
    hashTestData = joinTestDocLabel.map(
        lambda (label, text): LabeledPoint(label, hashingTF.transform(text)))
    sc.broadcast(hashTestData)

    #    #NB prediction and labels and accuracy
    #    prediction_and_labels = hashTestData.map(lambda point: (model.predict(point.features), point.label))
    #    correct = prediction_and_labels.filter(lambda (predicted, actual): predicted == actual)
    #    accuracy = correct.count() / float(hashTestData.count())
    #    print "Classifier correctly predicted category " + str(accuracy * 100) + " percent of the time"

    #Random forest prediction and labels and accuracy
    print "prediction part lyndz"
    prediction1 = model1.predict(hashTestData.map(lambda x: x.features))
    labelsAndPredictions1 = hashTestData.map(lambda lp: lp.label).zip(
        prediction1)
    testErr1 = labelsAndPredictions1.filter(
        lambda (v, p): v != p).count() / float(hashTestData.count())
    print('Test Error = ' + str(testErr1))
    print('Learned classification forest model:')
    print(model.toDebugString())

    # Save and load random forest model
    model1.save(
        sc,
        "/Users/priyanka/Desktop/project2files/myRandomForestClassificationModel"
    )
    sameModel1 = RandomForestModel.load(
        sc,
        "/Users/priyanka/Desktop/project2files/myRandomForestClassificationModel"
    )
              file=sys.stderr)
        exit(-1)

    global totalTime, startTime

    conf = (
        SparkConf().setMaster(
            "local").setAppName("SparkStreamingFaultDetection").set(
                "spark.executor.memory",
                "16g").set("spark.driver.memory",
                           "16g").set("spark.executor.instances", "4")
        #.set("spark.executor.cores", "4")
    )

    sc = SparkContext(conf=conf)
    model = RandomForestModel.load(sc, '/RandomForest1.model')

    ssc = StreamingContext(sc, 1)

    brokers, topic = sys.argv[1:]
    kvs = KafkaUtils.createDirectStream(ssc, [topic],
                                        {"metadata.broker.list": brokers})

    testErr = 0
    totalData = 0
    Order = 10

    #testing Data
    data = kvs.filter(lambda row: checkCondition(row[1]) != 'sign')

    if (data.count() > 0):
Esempio n. 43
0
def applyModel(fileName, loadModelName, outlierPercentile = 100):

    sc = SparkContext( 'local', 'pyspark')
    sqlContext = SQLContext(sc)

    #########
    # load data
    #########

    data = sc.textFile(fileName)
    #extract header and remove it
    header = data.first()
    data = data.filter(lambda x:x !=header).cache()
    header = header.split('\t')
    #parse data
    data = data.map(lambda x : x.split('\t'))

    #########
    # prepare features
    #########

    df = sqlContext.createDataFrame(data, header)
    df = (df.withColumn("ADLOADINGTIME",func.regexp_replace('ADLOADINGTIME', 'null', '0').cast('float'))
         .withColumn("TIMESTAMP",func.regexp_replace('TIMESTAMP', 'null', '0').cast('int'))
         .withColumn("GEOIP_LAT",func.regexp_replace('GEOIP_LAT', 'null', '0').cast('int'))
          .withColumn("GEOIP_LNG",func.regexp_replace('GEOIP_LNG', 'null', '0').cast('int'))
          .withColumn("HOSTWINDOWHEIGHT",func.regexp_replace('HOSTWINDOWHEIGHT', 'null', '0').cast('int'))
          .withColumn("HOSTWINDOWWIDTH",func.regexp_replace('HOSTWINDOWWIDTH', 'null', '0').cast('int'))
          .withColumn("TOPMOSTREACHABLEWINDOWHEIGHT",func.regexp_replace('TOPMOSTREACHABLEWINDOWHEIGHT', 'null', '0').cast('int'))
          .withColumn("TOPMOSTREACHABLEWINDOWWIDTH",func.regexp_replace('TOPMOSTREACHABLEWINDOWWIDTH', 'null', '0').cast('int'))
         )
    thr = np.percentile(df.select("ADLOADINGTIME").rdd.collect(), outlierPercentile)
    df = df.filter(func.col('ADLOADINGTIME') < thr)
    df = df.withColumn("TOPMOSTREACHABLEWINDOWAREA", func.col("TOPMOSTREACHABLEWINDOWHEIGHT")*func.col("TOPMOSTREACHABLEWINDOWWIDTH"))
    df = df.withColumn("INTENDENTISACTUALDEVICETYPE", (func.col("ACTUALDEVICETYPE")==func.col("INTENDEDDEVICETYPE")).cast('int'))
    df = df.withColumn("COMBINEDID", 
            func.concat(
                func.col('ACCOUNTID'), 
                func.col('CAMPAIGNID'), 
                func.col('CREATIVEID'), 
                func.col('SDK')) )

    #df = df.withColumn("COMBINEDID", func.regexp_replace("COMBINEDID", '^$', 'NA'))

    df = df.withColumn("COMBINEDEXTERNALID", 
            func.concat( 
                func.regexp_replace('EXTERNALADSERVER', 'null', ''), 
                func.regexp_replace('EXTERNALPLACEMENTID', 'null', ''), 
                func.regexp_replace('EXTERNALSITEID', 'null', ''), 
                func.regexp_replace('EXTERNALSUPPLIERID', 'null', '') ))

    #df = df.withColumn("COMBINEDEXTERNALID", func.regexp_replace("COMBINEDEXTERNALID", '^$', 'NA'))

    df = df.withColumn("PLATFORMCOMBINED", 
            func.concat( 
                func.regexp_replace('PLATFORM', 'null', ''), 
                func.regexp_replace('PLATFORMVERSION', 'null', '') ))

    #df = df.withColumn("PLATFORMCOMBINED", func.regexp_replace("PLATFORMCOMBINED", '^$', 'NA'))

    df = df.withColumn("UA_OSCOMB", 
            func.concat( 
                func.regexp_replace('UA_OS', 'null', ''), 
                func.regexp_replace('UA_OSVERSION', 'null', '') ))

    #df = df.withColumn("UA_OSCOMB", func.regexp_replace("UA_OSCOMB", '^$', 'NA'))
    df = df.withColumn("FILESJSON_SIZE", 
                func.regexp_replace('FILESJSON', '[^,\d]', '') )

    df = df.withColumn("FILESJSON_SIZE", 
                func.regexp_replace('FILESJSON_SIZE', '^,', '') )

    df = df.withColumn("FILESJSON_SIZE", 
                func.regexp_replace('FILESJSON_SIZE', ',,', ',') )

    udf = func.udf(lambda x: int(np.fromstring(x,dtype=int, sep=',').sum()), IntegerType())
    df = df.withColumn("FILESJSON_SIZE", udf("FILESJSON_SIZE"))

    print('Loaded and prapared %d entries' % df.count())

    #########
    # keep only needed features
    #########   

    features = ['ADLOADINGTIME',
     'PLACEMENTID',
     'TIMESTAMP',
     'CREATIVETYPE',
     'UA_HARDWARETYPE',
     'UA_VENDOR',
     'UA_MODEL',
     'UA_BROWSER',
     'UA_BROWSERVERSION',
     'FILESJSON',
     'ERRORSJSON',
     'TOPMOSTREACHABLEWINDOWAREA',
     'FILESJSON_SIZE',
     'COMBINEDID',
     'COMBINEDEXTERNALID',
     'PLATFORMCOMBINED',
     'UA_OSCOMB',
     'SDK',
     'EXTERNALADSERVER'
       ]

    df = df.select(features)

    #########
    # Convert categorical features to numerical
    #########   


    featuresCat = [
     'PLACEMENTID',
     'CREATIVETYPE',
     'UA_HARDWARETYPE',
     'UA_VENDOR',
     'UA_MODEL',
     'UA_BROWSER',
     'UA_BROWSERVERSION',
     'FILESJSON',
     'ERRORSJSON',
     'COMBINEDID',
     'COMBINEDEXTERNALID',
     'PLATFORMCOMBINED',
     'UA_OSCOMB',
     'SDK',
     'EXTERNALADSERVER'
       ]

    for i in range(len(featuresCat)):

        indexer = StringIndexer(inputCol=featuresCat[i], outputCol='_'+featuresCat[i]).setHandleInvalid("skip").fit(df)
        df = indexer.transform(df).drop(featuresCat[i])
        writer = indexer._call_java("write")
        writer.overwrite().save("indexer_" + featuresCat[i])    

    featuresCat = [ '_' + featuresCat[i] for i in range(len(featuresCat))]    

    features = featuresCat[:]
    features.append('TIMESTAMP')    
    features.append('FILESJSON_SIZE')
    features.append('TOPMOSTREACHABLEWINDOWAREA')


    #########
    # Assemble features
    #########   


    assembler = VectorAssembler(
        inputCols=features,
        outputCol="features")

    df = assembler.transform(df)

    #########
    # Convert to labeled point
    #########   


    lp = (df.select(func.col("ADLOADINGTIME").alias("label"), func.col("features"))
      .map(lambda row: LabeledPoint(row.label, row.features)))
    lp.cache()


    #########
    # Load trained model
    #########
    
    model = RandomForestModel.load(sc, loadModelName)
    
    print('Model loaded!')
    
    predictions = model.predict(lp.map(lambda x: x.features)).collect()
    
    return predictions
Esempio n. 44
0
def main():
    sc = SparkContext(conf=SparkConf().setAppName("Random Forest"))
    sqlContext = SQLContext(sc)
    bytePath = "/Users/priyanka/Desktop/project2files/train"
    byteTestPath = "/Users/priyanka/Desktop/project2files/test"
    namePath = "/Users/priyanka/Desktop/X_train_small.txt"
    nameTestPath = "/Users/priyanka/Desktop/X_test_small.txt"
    classPath = "/Users/priyanka/Desktop/y_train_small.txt"
    classTestPath = "/Users/priyanka/Desktop/y_test_small.txt"

    #docData Output: ('file:/Users/priyanka/Desktop/project2files/train/04mcPSei852tgIKUwTJr.bytes', '00401000 20 FF 58 C0 20 FE 5C 01 F8 00 0F 8B 50 FC 06 01\r\n00401010 8C 01 FF")
    docData = sc.wholeTextFiles(
        bytePath,
        25).map(lambda (x, y): (x.encode("utf-8"), y.encode("utf-8")))
    print("docData frankie")
    docData.take(1)

    #clean docData here - remove 1st word from line and remove /r/n
    cleanDocData = docData.map(lambda (x, y): (x, clean(y.split())))

    #    #Extract bigrams
    #    print "Bigram extract priyanka"
    #    dfCleanDocData = sqlContext.createDataFrame(cleanDocData, ["label", "words"])
    #
    #
    #    ngram = NGram(inputCol="words", outputCol="ngrams")
    #    ngramDataFrame = ngram.transform(dfCleanDocData)
    #
    #    for ngrams_label in ngramDataFrame.select("ngrams", "label").take(3):
    #        print(ngrams_label)

    #try calculating tf here
    x = 16**2 + 1
    hashingTF = HashingTF(x)
    tfDocData = cleanDocData.map(lambda (x, y): (x, hashingTF.transform(y)))
    tfDocData.take(1)
    #Output format : (0, u'file:/Users/priyanka/Desktop/project2files/train/c2hn9edSNJKmw0OukrBv.bytes')
    nameData = sc.textFile(
        namePath, 25).map(lambda x: "file:" + bytePath + "/" + x + ".bytes"
                          ).zipWithIndex().map(lambda (x, y): (y, x))
    #nameData.take(5)

    #Output format: [(0, '2'), (1, '3'), (2, '2'), (3, '6'), (4, '2')]
    labelData = sc.textFile(
        classPath, 25).zipWithIndex().map(lambda (x, y): (y, str(int(x) - 1)))

    #Output format: (u'file:/Users/priyanka/Desktop/project2files/train/c2hn9edSNJKmw0OukrBv.bytes', '2'),
    joinNameLabel = nameData.join(labelData).map(lambda (x, y): y)
    #joinNameLabel.take(5)
    joinCleanDocLabel = joinNameLabel.join(tfDocData).map(lambda (x, y): y)
    hashData = joinCleanDocLabel.map(lambda
                                     (label, text): LabeledPoint(label, text))
    print "hashing TF done"
    #    model = NaiveBayes.train(hashData)

    print("generating model fliss")
    model1 = RandomForest.trainClassifier(hashData,
                                          numClasses=9,
                                          categoricalFeaturesInfo={},
                                          numTrees=50,
                                          featureSubsetStrategy="auto",
                                          impurity='gini',
                                          maxDepth=8,
                                          maxBins=32)
    #error: 31.36
    #==============================================================================
    # Testing starts here
    #==============================================================================
    docTestData = sc.wholeTextFiles(
        byteTestPath,
        25).map(lambda (x, y): (x.encode("utf-8"), y.encode("utf-8")))
    #docTestData.take(1)

    cleanDocTestData = docTestData.map(lambda (x, y): (x, clean(y.split())))
    tfDocTestData = cleanDocTestData.map(lambda (x, y):
                                         (x, hashingTF.transform(y)))

    nameTestData = sc.textFile(
        nameTestPath,
        25).map(lambda x: "file:" + byteTestPath + "/" + x + ".bytes"
                ).zipWithIndex().map(lambda (x, y): (y, x))
    labelTestData = sc.textFile(
        classTestPath,
        25).zipWithIndex().map(lambda (x, y): (y, str(int(x) - 1)))
    joinTestNameLabel = nameTestData.join(labelTestData).map(lambda (x, y): y)

    joinTestDocLabel = joinTestNameLabel.join(tfDocTestData).map(lambda
                                                                 (x, y): y)

    print("hashing test kenny")
    hashTestData = joinTestDocLabel.map(
        lambda (label, text): LabeledPoint(label, text))
    hashTestData.persist()

    #Random forest prediction and labels and accuracy
    print "prediction part lyndz"
    prediction1 = model1.predict(hashTestData.map(lambda x: x.features))
    labelsAndPredictions1 = hashTestData.map(lambda lp: lp.label).zip(
        prediction1)
    testErr1 = labelsAndPredictions1.filter(
        lambda (v, p): v != p).count() / float(hashTestData.count())
    print('Test Error = ' + str(testErr1))
    print('Learned classification forest model:')
    print(model1.toDebugString())

    # Save and load random forest model
    model1.save(
        sc,
        "/Users/priyanka/Desktop/project2files/myRandomForestClassificationModel1"
    )
    sameModel1 = RandomForestModel.load(
        sc,
        "/Users/priyanka/Desktop/project2files/myRandomForestClassificationModel1"
    )
Esempio n. 45
0
def run(jobNm, sc, sqlContext, inputFile, dictFile,
        bByDate=False,
        inputPartitions=-1,
        sNum=30,
        modelPath=None,
        bWriteMonitor=False,
        writeFileOutput=False):

    # import monitoring if needed
    if bWriteMonitor==True:
        import plotting

    #Create monitoring plot and associated vectors
    mPX = range(8)
    mPY = [0.]*8
    mSL = ["Create Feature Map", "Read in Data", "Aggregate for M.L.", "Read in Model", "Apply Model", "Output Results"]
    mInd = 0

    t0 = time.time()
    #Find the word document frequency for the corpus
    #this is used for an idf score used in feature vector formation
    t1 = time.time()
    revLookup = []
    fDict = None
    if dictFile[:3] == 's3:' or dictFile[:5] == 'hdfs:':
        # read dict file from hdfs
        fDict = sc.textFile(dictFile).collect()
    else:
        # read from local file
        fDict = open(dictFile,"r")
    for line in fDict:
        terms = line.split("\t")
        revLookup.append(terms[0])

    nVecLen = len(revLookup)
    t2 = time.time()
    diff = t2-t1
    print "Time to read dict", diff

    if bWriteMonitor:
        mPY[mInd] = diff
        mInd = mInd+1
        plotting.updateMonitorPlot(mPX, mPY, mSL, jobNm)

    #Read in data and filter out entries with no valid words
    t1 = time.time()
    print 'inputFile ',inputFile
    print 'inputPartitions ',inputPartitions
    records = aggregatedComparison.loadPoint(sc, sqlContext, inputFile, inputPartitions)
    nGoodTweets = records.count()
    t2 = time.time()
    print "Number of good tweets:",nGoodTweets
    diff = t2-t1
    print "Time to read in data", diff
    if bWriteMonitor:
        mPY[mInd] = diff
        mInd = mInd+1
        plotting.updateMonitorPlot(mPX, mPY, mSL, jobNm)

    #Format data for ML input
    t1 = time.time()
    mlApply = None
    if bByDate:
        mlApply = records.map(lambda x: (x.key, [LabeledPoint(1.0, x.vector), x.lat, x.lon, x.size, x.binSize, x.dt])).cache()
    else:
        mlApply = records.map(lambda x: (x.key, [LabeledPoint(1.0, x.vector), x.lat, x.lon, x.size, x.binSize])).cache()
    nApp = mlApply.count()
    t2 = time.time()
    print "Number of collapsed points:", nApp
    diff = t2-t1
    print "Time to map points", diff
    if bWriteMonitor:
        mPY[mInd] = diff
        mInd = mInd+1
        plotting.updateMonitorPlot(mPX, mPY, mSL, jobNm)

    # Read in Model
    t1 = time.time()
    model_Tree = RandomForestModel.load(sc, modelPath)
    t2 = time.time()
    diff = t2-t1
    print "Time to read in model", diff
    if bWriteMonitor:
        mPY[mInd] = diff
        mInd = mInd+1
        plotting.updateMonitorPlot(mPX, mPY, mSL, jobNm)

    # apply model
    t1 = time.time()
    predictions_Tree = model_Tree.predict(mlApply.map(lambda x: x[1][0].features))
    vecAndPredictions = mlApply.zip(predictions_Tree)
    vecAndPredictions.cache()
    vecAndPredictions.count()
    t2 = time.time()
    diff = t2-t1
    print "Time to apply model: ", diff
    if bWriteMonitor:
        mPY[mInd] = diff
        mInd = mInd+1
        plotting.updateMonitorPlot(mPX, mPY, mSL, jobNm)

    #Get the results
    t1 = time.time()
    resultSet = clustering.locationBasedOutputV2(bByDate, jobNm, vecAndPredictions, sNum, revLookup, writeFileOutput, [])
    t2 = time.time()
    diff = t2-t1
    print "Time to create json objects for output: ", diff
    if bWriteMonitor:
        mPY[mInd] = diff
        mInd = mInd+1
        plotting.updateMonitorPlot(mPX, mPY, mSL, jobNm)

    diff = time.time() - t0
    print "<----------BOOM GOES THE DYNOMITE!---------->"
    print "< total number of tweets:,", nGoodTweets
    print "< total process Time:", diff
    print "< total idf vector length:", nVecLen
    print "<------------------------------------------->"
    return resultSet
Esempio n. 46
0
    def test_classification(self):
        from pyspark.mllib.classification import LogisticRegressionWithSGD, SVMWithSGD, NaiveBayes
        from pyspark.mllib.tree import DecisionTree, DecisionTreeModel, RandomForest, \
            RandomForestModel, GradientBoostedTrees, GradientBoostedTreesModel
        data = [
            LabeledPoint(0.0, [1, 0, 0]),
            LabeledPoint(1.0, [0, 1, 1]),
            LabeledPoint(0.0, [2, 0, 0]),
            LabeledPoint(1.0, [0, 2, 1])
        ]
        rdd = self.sc.parallelize(data)
        features = [p.features.tolist() for p in data]

        temp_dir = tempfile.mkdtemp()

        lr_model = LogisticRegressionWithSGD.train(rdd, iterations=10)
        self.assertTrue(lr_model.predict(features[0]) <= 0)
        self.assertTrue(lr_model.predict(features[1]) > 0)
        self.assertTrue(lr_model.predict(features[2]) <= 0)
        self.assertTrue(lr_model.predict(features[3]) > 0)

        svm_model = SVMWithSGD.train(rdd, iterations=10)
        self.assertTrue(svm_model.predict(features[0]) <= 0)
        self.assertTrue(svm_model.predict(features[1]) > 0)
        self.assertTrue(svm_model.predict(features[2]) <= 0)
        self.assertTrue(svm_model.predict(features[3]) > 0)

        nb_model = NaiveBayes.train(rdd)
        self.assertTrue(nb_model.predict(features[0]) <= 0)
        self.assertTrue(nb_model.predict(features[1]) > 0)
        self.assertTrue(nb_model.predict(features[2]) <= 0)
        self.assertTrue(nb_model.predict(features[3]) > 0)

        categoricalFeaturesInfo = {0: 3}  # feature 0 has 3 categories
        dt_model = DecisionTree.trainClassifier(
            rdd,
            numClasses=2,
            categoricalFeaturesInfo=categoricalFeaturesInfo,
            maxBins=4)
        self.assertTrue(dt_model.predict(features[0]) <= 0)
        self.assertTrue(dt_model.predict(features[1]) > 0)
        self.assertTrue(dt_model.predict(features[2]) <= 0)
        self.assertTrue(dt_model.predict(features[3]) > 0)

        dt_model_dir = os.path.join(temp_dir, "dt")
        dt_model.save(self.sc, dt_model_dir)
        same_dt_model = DecisionTreeModel.load(self.sc, dt_model_dir)
        self.assertEqual(same_dt_model.toDebugString(),
                         dt_model.toDebugString())

        rf_model = RandomForest.trainClassifier(
            rdd,
            numClasses=2,
            categoricalFeaturesInfo=categoricalFeaturesInfo,
            numTrees=10,
            maxBins=4,
            seed=1)
        self.assertTrue(rf_model.predict(features[0]) <= 0)
        self.assertTrue(rf_model.predict(features[1]) > 0)
        self.assertTrue(rf_model.predict(features[2]) <= 0)
        self.assertTrue(rf_model.predict(features[3]) > 0)

        rf_model_dir = os.path.join(temp_dir, "rf")
        rf_model.save(self.sc, rf_model_dir)
        same_rf_model = RandomForestModel.load(self.sc, rf_model_dir)
        self.assertEqual(same_rf_model.toDebugString(),
                         rf_model.toDebugString())

        gbt_model = GradientBoostedTrees.trainClassifier(
            rdd,
            categoricalFeaturesInfo=categoricalFeaturesInfo,
            numIterations=4)
        self.assertTrue(gbt_model.predict(features[0]) <= 0)
        self.assertTrue(gbt_model.predict(features[1]) > 0)
        self.assertTrue(gbt_model.predict(features[2]) <= 0)
        self.assertTrue(gbt_model.predict(features[3]) > 0)

        gbt_model_dir = os.path.join(temp_dir, "gbt")
        gbt_model.save(self.sc, gbt_model_dir)
        same_gbt_model = GradientBoostedTreesModel.load(self.sc, gbt_model_dir)
        self.assertEqual(same_gbt_model.toDebugString(),
                         gbt_model.toDebugString())

        try:
            rmtree(temp_dir)
        except OSError:
            pass
                                    	numTrees=6, 
					featureSubsetStrategy="auto",
                                    	impurity='gini', 
					maxDepth=4, 
					maxBins=32
				    )

elapsedTime = time() - startTime

print "Classifier trained in {} seconds".format(round(elapsedTime,3))
# Save the madel for use in evaluating readings
model.save(sc,"models/IoTBackBraceRandomForest.model")

# Evaluate model on test instances and compute test error
predictions = model.predict(testData.map(lambda x: x.features))
labelsAndPredictions = testData.map(lambda lp: lp.label).zip(predictions)
testErr = labelsAndPredictions.filter(lambda (v, p): v != p).count() / float(testData.count())
print('Test Error = ' + str(testErr))
print('Learned classification forest model:')
print(model.toDebugString())

loadedModel = RandomForestModel.load(sc, "models/IoTBackBraceRandomForest.model")

for i in range(-50,10):
    prediction = loadedModel.predict([i])
    positions = {0 : "upright",
           1 : "back bent",
           2 : "stooped"
    }
    print str(i) + " => " + str(positions[prediction])