def test_functional_model(spark_context, classification_model_functional, mnist_data): batch_size = 64 epochs = 1 x_train, y_train, x_test, y_test = mnist_data x_train = x_train[:1000] y_train = y_train[:1000] df = to_data_frame(spark_context, x_train, y_train, categorical=True) test_df = to_data_frame(spark_context, x_test, y_test, categorical=True) sgd = optimizers.SGD() sgd_conf = optimizers.serialize(sgd) estimator = ElephasEstimator() estimator.set_keras_model_config(classification_model_functional.to_yaml()) estimator.set_optimizer_config(sgd_conf) estimator.set_mode("synchronous") estimator.set_loss("categorical_crossentropy") estimator.set_metrics(['acc']) estimator.set_epochs(epochs) estimator.set_batch_size(batch_size) estimator.set_validation_split(0.1) estimator.set_categorical_labels(True) estimator.set_nb_classes(10) pipeline = Pipeline(stages=[estimator]) fitted_pipeline = pipeline.fit(df) prediction = fitted_pipeline.transform(test_df) pnl = prediction.select("label", "prediction") pnl.show(100) prediction_and_label = pnl.rdd.map(lambda row: (row.label, row.prediction)) metrics = MulticlassMetrics(prediction_and_label) print(metrics.accuracy)
def test_spark_ml_model(spark_context): df = to_data_frame(spark_context, x_train, y_train, categorical=True) test_df = to_data_frame(spark_context, x_test, y_test, categorical=True) sgd = optimizers.SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True) sgd_conf = optimizers.serialize(sgd) # Initialize Spark ML Estimator estimator = ElephasEstimator() estimator.set_keras_model_config(model.to_yaml()) estimator.set_optimizer_config(sgd_conf) estimator.set_mode("synchronous") estimator.set_loss("categorical_crossentropy") estimator.set_metrics(['acc']) estimator.set_epochs(epochs) estimator.set_batch_size(batch_size) estimator.set_validation_split(0.1) estimator.set_categorical_labels(True) estimator.set_nb_classes(nb_classes) # Fitting a model returns a Transformer pipeline = Pipeline(stages=[estimator]) fitted_pipeline = pipeline.fit(df) # Evaluate Spark model by evaluating the underlying model prediction = fitted_pipeline.transform(test_df) pnl = prediction.select("label", "prediction") pnl.show(100) prediction_and_label = pnl.rdd.map(lambda row: (row.label, row.prediction)) metrics = MulticlassMetrics(prediction_and_label) print(metrics.precision()) print(metrics.recall())
def make_model(data): data.show() data = data.dropna() nb_classes = data.select("label").distinct().count() input_dim = len(data.select("features").first()[0]) print(nb_classes, input_dim) model = Sequential() model.add(Embedding(input_dim=input_dim, output_dim=100)) #model.add(LSTM(64,return_sequences=False,dropout=0.1,recurrent_dropout=0.1)) model.add(Dense(100, activation='relu')) model.add(Dropout(0.5)) model.add(Dense(nb_classes, activation='softmax')) #sgd = optimizers.SGD(lr=0.1) #model.compile(sgd, 'categorical_crossentropy', ['acc']) model.compile(loss='binary_crossentropy', optimizer='adam') #model.compile(loss='categorical_crossentropy', optimizer='adam') spark_model = SparkModel(model, frequency='epoch', mode='asynchronous') adam = optimizers.Adam(lr=0.01) opt_conf = optimizers.serialize(adam) estimator = ElephasEstimator() estimator.setFeaturesCol("features") estimator.setLabelCol("label") estimator.set_keras_model_config(model.to_yaml()) estimator.set_categorical_labels(True) estimator.set_nb_classes(nb_classes) estimator.set_num_workers(1) estimator.set_epochs(20) estimator.set_batch_size(128) estimator.set_verbosity(1) estimator.set_validation_split(0.15) estimator.set_optimizer_config(opt_conf) estimator.set_mode("synchronous") estimator.set_loss("categorical_crossentropy") estimator.set_metrics(['acc']) #estimator = ElephasEstimator(model, epochs=20, batch_size=32, frequency='batch', mode='asynchronous', nb_classes=1) pipeline = Pipeline(stages=[estimator]) #fitted_model = estimator.fit(data) #prediction = fitted_model.transform(data) fitted_pipeline = pipeline.fit(data) # Fit model to data prediction = fitted_pipeline.transform(data) # Evaluate on train data. # prediction = fitted_pipeline.transform(test_df) # <-- The same code evaluates test data. pnl = prediction.select("text", "prediction") pnl.show(100) prediction_and_label = pnl.map(lambda row: (row.text, row.prediction)) metrics = MulticlassMetrics(prediction_and_label) print(metrics.precision()) pnl = prediction.select("label", "prediction").show() pnl.show(100)
def test_spark_ml_model_classification(spark_context, classification_model, mnist_data): batch_size = 64 nb_classes = 10 epochs = 1 x_train, y_train, x_test, y_test = mnist_data x_train = x_train[:1000] y_train = y_train[:1000] df = to_data_frame(spark_context, x_train, y_train, categorical=True) test_df = to_data_frame(spark_context, x_test, y_test, categorical=True) sgd = optimizers.SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True) sgd_conf = optimizers.serialize(sgd) # Initialize Spark ML Estimator estimator = ElephasEstimator() estimator.set_keras_model_config(classification_model.to_yaml()) estimator.set_optimizer_config(sgd_conf) estimator.set_mode("synchronous") estimator.set_loss("categorical_crossentropy") estimator.set_metrics(['acc']) estimator.set_epochs(epochs) estimator.set_batch_size(batch_size) estimator.set_validation_split(0.1) estimator.set_categorical_labels(True) estimator.set_nb_classes(nb_classes) # Fitting a model returns a Transformer pipeline = Pipeline(stages=[estimator]) fitted_pipeline = pipeline.fit(df) # Evaluate Spark model by evaluating the underlying model prediction = fitted_pipeline.transform(test_df) pnl = prediction.select("label", "prediction") pnl.show(100) # since prediction in a multiclass classification problem is a vector, we need to compute argmax # the casting to a double is just necessary for using MulticlassMetrics pnl = pnl.select( 'label', argmax('prediction').astype(DoubleType()).alias('prediction')) prediction_and_label = pnl.rdd.map(lambda row: (row.label, row.prediction)) metrics = MulticlassMetrics(prediction_and_label) print(metrics.accuracy)
def test_batch_predict_classes_probability(spark_context, classification_model, mnist_data): batch_size = 64 nb_classes = 10 epochs = 1 x_train, y_train, x_test, y_test = mnist_data x_train = x_train[:1000] y_train = y_train[:1000] df = to_data_frame(spark_context, x_train, y_train, categorical=True) test_df = to_data_frame(spark_context, x_test, y_test, categorical=True) sgd = optimizers.SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True) sgd_conf = optimizers.serialize(sgd) # Initialize Spark ML Estimator estimator = ElephasEstimator() estimator.set_keras_model_config(classification_model.to_yaml()) estimator.set_optimizer_config(sgd_conf) estimator.set_mode("synchronous") estimator.set_loss("categorical_crossentropy") estimator.set_metrics(['acc']) estimator.set_epochs(epochs) estimator.set_batch_size(batch_size) estimator.set_validation_split(0.1) estimator.set_categorical_labels(True) estimator.set_nb_classes(nb_classes) # Fitting a model returns a Transformer fitted_pipeline = estimator.fit(df) results = fitted_pipeline.transform(test_df) # Set inference batch size and do transform again on the same test_df inference_batch_size = int(len(y_test) / 10) fitted_pipeline.set_params(inference_batch_size=inference_batch_size) fitted_pipeline.set_params(outputCol="prediction_via_batch_inference") results_with_batch_prediction = fitted_pipeline.transform(results) # we should have an array of 10 elements in the prediction column, since we have 10 classes # and therefore 10 probabilities results_np = results_with_batch_prediction.take(1)[0] assert len(results_np.prediction) == 10 assert len(results_np.prediction_via_batch_inference) == 10 assert np.array_equal(results_np.prediction, results_np.prediction_via_batch_inference)
def test_save_pipeline(spark_context, classification_model): sgd = optimizers.SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True) sgd_conf = optimizers.serialize(sgd) # Initialize Spark ML Estimator estimator = ElephasEstimator() estimator.set_keras_model_config(classification_model.to_yaml()) estimator.set_optimizer_config(sgd_conf) estimator.set_mode("synchronous") estimator.set_loss("categorical_crossentropy") estimator.set_metrics(['acc']) estimator.set_epochs(10) estimator.set_batch_size(10) estimator.set_validation_split(0.1) estimator.set_categorical_labels(True) estimator.set_nb_classes(10) # Fitting a model returns a Transformer pipeline = Pipeline(stages=[estimator]) pipeline.save('tmp')
def test_predict_classes_probability(spark_context, classification_model, mnist_data): batch_size = 64 nb_classes = 10 epochs = 1 x_train, y_train, x_test, y_test = mnist_data x_train = x_train[:1000] y_train = y_train[:1000] df = to_data_frame(spark_context, x_train, y_train, categorical=True) test_df = to_data_frame(spark_context, x_test, y_test, categorical=True) sgd = optimizers.SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True) sgd_conf = optimizers.serialize(sgd) # Initialize Spark ML Estimator estimator = ElephasEstimator() estimator.set_keras_model_config(classification_model.to_yaml()) estimator.set_optimizer_config(sgd_conf) estimator.set_mode("synchronous") estimator.set_loss("categorical_crossentropy") estimator.set_metrics(['acc']) estimator.set_predict_classes(False) estimator.set_epochs(epochs) estimator.set_batch_size(batch_size) estimator.set_validation_split(0.1) estimator.set_categorical_labels(True) estimator.set_nb_classes(nb_classes) # Fitting a model returns a Transformer pipeline = Pipeline(stages=[estimator]) fitted_pipeline = pipeline.fit(df) results = fitted_pipeline.transform(test_df) # we should have an array of 10 elements in the prediction column, since we have 10 classes # and therefore 10 probabilities assert len(results.take(1)[0].prediction) == 10
sgd = optimizers.SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True) sgd_conf = optimizers.serialize(sgd) # Initialize Spark ML Estimator estimator = ElephasEstimator() estimator.set_keras_model_config(model.to_yaml()) estimator.set_optimizer_config(sgd_conf) estimator.set_mode("synchronous") estimator.set_loss("categorical_crossentropy") estimator.set_metrics(['acc']) estimator.set_epochs(epochs) estimator.set_batch_size(batch_size) estimator.set_validation_split(0.1) estimator.set_categorical_labels(True) estimator.set_nb_classes(nb_classes) # Fitting a model returns a Transformer pipeline = Pipeline(stages=[estimator]) fitted_pipeline = pipeline.fit(df) # Evaluate Spark model by evaluating the underlying model prediction = fitted_pipeline.transform(test_df) pnl = prediction.select("label", "prediction") pnl.show(100) prediction_and_label = pnl.rdd.map(lambda row: (row.label, row.prediction)) metrics = MulticlassMetrics(prediction_and_label) print(metrics.precision()) print(metrics.recall())
model.compile(loss='categorical_crossentropy', optimizer='adam') # Initialize Elephas Spark ML Estimator adagrad = elephas_optimizers.Adagrad() estimator = ElephasEstimator() estimator.setFeaturesCol("scaled_features") estimator.setLabelCol("index_category") estimator.set_keras_model_config(model.to_yaml()) estimator.set_optimizer_config(adagrad.get_config()) estimator.set_nb_epoch(10) estimator.set_batch_size(128) estimator.set_num_workers(4) estimator.set_verbosity(0) estimator.set_validation_split(0.15) estimator.set_categorical_labels(True) estimator.set_nb_classes(nb_classes) # Fitting a model returns a Transformer pipeline = Pipeline(stages=[string_indexer, scaler, estimator]) fitted_pipeline = pipeline.fit(train_df) from pyspark.mllib.evaluation import MulticlassMetrics # Evaluate Spark model prediction = fitted_pipeline.transform(train_df) pnl = prediction.select("index_category", "prediction") pnl.show(100)
optimizer='rmsprop', metrics=['accuracy']) # Define elephas optimizer (which tells the model how to aggregate updates on the Spark master) adadelta = elephas_optimizers.Adadelta() # Initialize SparkML Estimator and set all relevant properties estimator = ElephasEstimator() estimator.setFeaturesCol("features") # These two come directly from pyspark, estimator.setLabelCol("label") # hence the camel case. Sorry :) estimator.set_keras_model_config( model.to_yaml()) # Provide serialized Keras model estimator.set_optimizer_config( adadelta.get_config()) # Provide serialized Elephas optimizer estimator.set_categorical_labels(True) estimator.set_nb_classes(2) estimator.set_num_workers( 1) # We just use one worker here. Feel free to adapt it. estimator.set_nb_epoch(20) estimator.set_batch_size(128) estimator.set_verbosity(1) estimator.set_validation_split(0.15) fitted_model = estimator.fit(train_df) prediction = fitted_model.transform(test_df) pnl = prediction.select("label", "prediction") pnl.show(100) #from pyspark.ml import Pipeline #pipeline = Pipeline(stages=[estimator])