Exemple #1
0
        slices=[['has_diabetes']],
        metrics={'has_diabetes': ['binary_crossentropy', 'binary_accuracy']}))

# Run the pipeline locally
training_pipeline.run()

######################
# DO SOME EVALUATION #
#####################
# Sample data
df = training_pipeline.sample_transformed_data()
print(df.shape)
print(df.describe())

# See schema of data and detect drift
print(training_pipeline.view_schema())

##########################
# CREATE SECOND PIPELINE #
#########################
training_pipeline_2 = training_pipeline.copy('Experiment 2')
training_pipeline_2.add_trainer(
    FeedForwardTrainer(loss='binary_crossentropy',
                       last_activation='sigmoid',
                       output_units=1,
                       metrics=['accuracy'],
                       epochs=15))
training_pipeline_2.run()

############################
# DO SOME REPOSITORY STUFF #
Exemple #2
0
                                     'parameters': {}
                                 }]
                             }
                         }))

# Add a trainer
training_pipeline.add_trainer(
    FeedForwardTrainer(loss='binary_crossentropy',
                       last_activation='sigmoid',
                       output_units=1,
                       metrics=['accuracy'],
                       epochs=20))

# Add an evaluator
training_pipeline.add_evaluator(
    TFMAEvaluator(
        slices=[['has_diabetes']],
        metrics={'has_diabetes': ['binary_crossentropy', 'binary_accuracy']}))

# Run the pipeline locally
training_pipeline.run()

# See schema of data
training_pipeline.view_schema()

# See statistics of train and eval
training_pipeline.view_statistics()

# Creates a notebook for evaluation
training_pipeline.evaluate()