# Define elephas optimizer adagrad = elephas_optimizers.Adagrad() # Initialize Spark ML Estimator estimator = ElephasEstimator() estimator.set_keras_model_config(model.to_yaml()) estimator.set_optimizer_config(adagrad.get_config()) estimator.set_nb_epoch(nb_epoch) estimator.set_batch_size(batch_size) estimator.set_num_workers(4) estimator.set_verbosity(2) estimator.set_validation_split(0.1) estimator.set_categorical_labels(True) estimator.set_nb_classes(nb_classes) estimator.set_frequency('batch') # Fitting a model returns a Transformer pipeline = Pipeline(stages=[estimator]) fitted_pipeline = pipeline.fit(df) # Evaluate Spark model by evaluating the underlying model prediction = fitted_pipeline.transform(test_df) pnl = prediction.select("label", "prediction") pnl.show(100) prediction_and_label = pnl.map(lambda row: (row.label, row.prediction)) metrics = MulticlassMetrics(prediction_and_label) print("Precision:", metrics.precision()) print("Recall:", metrics.recall())
# Define elephas optimizer adadelta = elephas_optimizers.Adagrad() # Initialize Spark ML Estimator estimator = ElephasEstimator() estimator.set_keras_model_config(model.to_yaml()) estimator.set_optimizer_config(adadelta.get_config()) estimator.set_nb_epoch(nb_epoch) estimator.set_batch_size(batch_size) estimator.set_num_workers(4) estimator.set_verbosity(0) estimator.set_validation_split(0.1) estimator.set_categorical_labels(True) estimator.set_nb_classes(nb_classes) estimator.set_frequency('batch') # Fitting a model returns a Transformer pipeline = Pipeline(stages=[estimator]) fitted_pipeline = pipeline.fit(df) # Evaluate Spark model by evaluating the underlying model prediction = fitted_pipeline.transform(test_df) pnl = prediction.select("label", "prediction") pnl.show(100) prediction_and_label = pnl.map(lambda row: (row.label, row.prediction)) metrics = MulticlassMetrics(prediction_and_label) print(metrics.precision()) print(metrics.recall())