# line plot
r.plot_line()

# up to two dimensional kernel density estimator
r.plot_kde('val_acc')

# a simple histogram
r.plot_hist(bins=50)

# heatmap correlation
r.plot_corr()

# a four dimensional bar grid
r.plot_bars('batch_size', 'val_acc', 'first_neuron', 'lr')

e = ta.Evaluate(h)
e.evaluate(x, y, folds=10, average='macro')

ta.Deploy(h, 'iris')

iris = ta.Restore('iris.zip')

# make predictions with the model
iris.model.predict(x)

# get the meta-data for the experiment
print(iris.details)
# get the hyperparameter space boundary
print(iris.params)
# sample of x and y data
print(iris.x)
                      y_train,
                      model=lstm_model,
                      params=p,
                      experiment_name=experiment_name)
r = ta.Reporting(scan_object)

print(r.data[[
    'val_accuracy', 'epochs', 'batch_size', 'learning_rate', 'dense_neurons',
    "lstm_hspace"
]].sort_values('val_accuracy', ascending=False))

# number of iterations in k-fold validation
folds = 5
# talos calls using the best model
p = ta.Predict(scan_object, task='multi_class')
e = ta.Evaluate(scan_object)
accuracy_scores = e.evaluate(X_test,
                             y_test,
                             folds=folds,
                             task='multi_label',
                             metric='val_accuracy')
predictions = p.predict(X_test, metric='val_accuracy')
print('F1: ', np.mean(accuracy_scores))


def get_actual_class(row):
    if row.loc[0] == 1:
        return 0
    elif row.loc[1] == 1:
        return 1
    elif row.loc[2] == 1:
del tempX, tempY

#dummyX, dummyY = train_generator.__getitem__(0)
#testX, testY = valid_generator.__getitem__(0)
#valid_generator.on_epoch_end()

t = talos.Scan(
    x=trainX,
    y=trainY,
    x_val=testX,
    y_val=testY,
    model=emotions_model,
    params=p,
    experiment_name='emotional_classification',
    round_limit=2000
)  # just does 10 rounds of modeling / 10 different param configs
#fraction_limit=.005)  # just does 10% of total number param configs)

results = talos.Evaluate(t)
results_df = results.data
results_df = results_df.sort_values(by='val_accuracy', ascending=True)
results_df.to_csv(
    r'/home/ubuntu/Desktop-Sync-Folder/Check2/tuning_results_3.csv')

# %% ------------------------------------------ Validate Best Model ----------------------------------------------------
# Get the best model from the results and try below:

# %%
# STEP_SIZE_TEST = test_generator.n // test_generator.batch_size
# history = model.evaluate_generator(generator=valid_generator, steps=STEP_SIZE_TEST)