def test_mllib_model(spark_context): # Build RDD from numpy features and labels lp_rdd = to_labeled_point(spark_context, x_train, y_train, categorical=True) # Initialize SparkModel from Keras model and Spark context spark_model = SparkMLlibModel(model=model, frequency='epoch', mode='synchronous') # Train Spark model spark_model.fit(lp_rdd, epochs=5, batch_size=32, verbose=0, validation_split=0.1, categorical=True, nb_classes=nb_classes) # Evaluate Spark model by evaluating the underlying model score = spark_model.master_network.evaluate(x_test, y_test, verbose=2) print('Test accuracy:', score[1])
def test_mllib_model(spark_context, classification_model, mnist_data): rms = RMSprop() classification_model.compile(rms, 'categorical_crossentropy', ['acc']) x_train, y_train, x_test, y_test = mnist_data x_train = x_train[:1000] y_train = y_train[:1000] # Build RDD from numpy features and labels lp_rdd = to_labeled_point(spark_context, x_train, y_train, categorical=True) # Initialize SparkModel from tensorflow.keras model and Spark context spark_model = SparkMLlibModel( model=classification_model, frequency='epoch', mode='synchronous') # Train Spark model spark_model.fit(lp_rdd, epochs=5, batch_size=32, verbose=0, validation_split=0.1, categorical=True, nb_classes=nb_classes) # Evaluate Spark model by evaluating the underlying model score = spark_model.master_network.evaluate(x_test, y_test, verbose=2) assert score
model.add(Activation('softmax')) # Compile model rms = RMSprop() model.compile(rms, "categorical_crossentropy", ['acc']) # Create Spark context conf = SparkConf().setAppName('Mnist_Spark_MLP').setMaster('local[8]') sc = SparkContext(conf=conf) # Build RDD from numpy features and labels lp_rdd = to_labeled_point(sc, x_train, y_train, categorical=True) # Initialize SparkModel from tensorflow.keras model and Spark context spark_model = SparkMLlibModel(model=model, frequency='epoch', mode='synchronous') # Train Spark model spark_model.fit(lp_rdd, epochs=5, batch_size=32, verbose=0, validation_split=0.1, categorical=True, nb_classes=nb_classes) # Evaluate Spark model by evaluating the underlying model score = spark_model.master_network.evaluate(x_test, y_test, verbose=2) print('Test accuracy:', score[1])
outputCol="tf_features", vocabSize=input_dim) # IDF idf = sf.IDF(inputCol="tf_features", outputCol="features") label_string = sf.StringIndexer(inputCol="first_label", outputCol="label") pipeline_dl = Pipeline(stages=[cv, idf, label_string]) df = pipeline_dl.fit(training_set).transform(training_set) df = df.rdd.map(lambda x: (LabeledPoint(x[ 'label'], MLLibVectors.fromML(x['features'])))) logger.info("Pipeline created ...") logger.info("Transforms the text into tf idf RDD ...") model = create_keras_model(input_dim, output_dim) logger.info("Starts Training ...") spark_model = SparkMLlibModel(model=model, frequency='epoch', mode='asynchronous', parameter_server_mode='socket') spark_model.fit(df, epochs=epochs, batch_size=132, verbose=1, validation_split=0.2, categorical=True, nb_classes=output_dim) logger.info("Training done") spark_model._master_network.save(save_dir + model_dir + "/" + filename) logger.info("Program ended succesfully ! Find the model at :" + save_dir + model_dir + "/" + filename)