Ejemplo n.º 1
0
def test_predict():

    print("\n >>> start Predict()...")

    import sys
    sys.path.insert(0, '/Users/mikko/Documents/GitHub/talos')
    import talos

    x, y = talos.templates.datasets.iris()
    p = talos.templates.params.iris()
    model = talos.templates.models.iris

    x = x[:50]
    y = y[:50]

    scan_object = talos.Scan(x=x,
                             y=y,
                             params=p,
                             model=model,
                             experiment_name='test_iris',
                             round_limit=2)

    predict = talos.Predict(scan_object)

    _preds = predict.predict(x, 'val_acc', False)
    _preds = predict.predict_classes(x, 'val_acc', False)

    print('finised Predict() \n')
Ejemplo n.º 2
0
scan_object = ta.Scan(X_train,
                      y_train,
                      model=lstm_model,
                      params=p,
                      experiment_name=experiment_name)
r = ta.Reporting(scan_object)

print(r.data[[
    'val_accuracy', 'epochs', 'batch_size', 'learning_rate', 'dense_neurons',
    "lstm_hspace"
]].sort_values('val_accuracy', ascending=False))

# number of iterations in k-fold validation
folds = 5
# talos calls using the best model
p = ta.Predict(scan_object, task='multi_class')
e = ta.Evaluate(scan_object)
accuracy_scores = e.evaluate(X_test,
                             y_test,
                             folds=folds,
                             task='multi_label',
                             metric='val_accuracy')
predictions = p.predict(X_test, metric='val_accuracy')
print('F1: ', np.mean(accuracy_scores))


def get_actual_class(row):
    if row.loc[0] == 1:
        return 0
    elif row.loc[1] == 1:
        return 1
Ejemplo n.º 3
0
params_final = {'lr': {0.0001},
                'l1': {0},
                'l2': {0},
                'first_neuron': {32, 128},
                'hidden_layers': {1, 2},
                'batch_size': {32},
                'epochs': {1000000},
                'dropout': {0},
                'optimizer': {Adam},
                'losses': [mse],
                'activation': {relu}}

# Run the experiment
os.chdir(path + "/Data/")

t = ta.Scan(x=x_train,
            y=y_train,
            model=build_model,
            grid_downsample=1,
            val_split=0.3,
            params=params_final,
            dataset_name='POL',
            experiment_no='2_final')

# Prediction

p = ta.Predict(t)
pred = p.predict(x_test, metric='val_loss')
MSE = np.mean((y_test - pred)**2)
print(MSE)
Ejemplo n.º 4
0
from talos.utils.gpu_utils import force_cpu


if __name__ == '__main__':

    '''NOTE: test/core_tests/test_scan.py needs to be edited as well!'''

    # Scan
    scan_object = test_scan_object()

    # Reporting
    test_reporting_object(scan_object)

    start_time = str(time.strftime("%s"))

    p = ta.Predict(scan_object)
    p.predict(scan_object.x)
    p.predict_classes(scan_object.x)

    ta.Autom8(scan_object, scan_object.x, scan_object.y)
    ta.Evaluate(scan_object)
    ta.Deploy(scan_object, start_time)
    ta.Restore(start_time + '.zip')

    test_random_methods()

    fit_generator = generator(scan_object.x, scan_object.y, 20)
    force_cpu()

    TestCancer().test_scan_cancer_metric_reduction()
    TestCancer().test_scan_cancer_loss_reduction()
Ejemplo n.º 5
0
    'epoch': [5, 10, 15],
    'hidden_layers': [100, 500]
}
cnn_scan = talos.Scan(x=X,
                      y=d_train_array,
                      model=cnn_optimization,
                      params=cnn_params,
                      experiment_name='CNN_Optimization',
                      round_limit=10,
                      fraction_limit=0.05)
cnn_analyze = talos.Analyze(cnn_scan)
documentation_file_parameteropt.write(
    "CNN: Best parameters {}, reached score: {} \n".format(
        cnn_analyze.best_params('accuracy', ['accuracy', 'loss', 'val_loss']),
        cnn_analyze.high('accuracy')))
pred_cnn = talos.Predict(cnn_scan).predict(x_t, metric='val_f1score', asc=True)
#evaluate the model
cnn_evaluation_scores, cnn_cm = evaluation.multilabel_evaluation(
    d_test_array, label_binarizer.inverse_transform(pred_cnn), "CNN")
documentation_file_modelopt.write(cnn_evaluation_scores)
#deploy best model
model_cnn = talos.Deploy(cnn_scan, "model_cnn_scibert", metric='val_accuracy')

#build LSTM model and evaluate the model
print("LSTM model evaluation")


def lstm_optimization(x_train, y_train, x_test, y_test, params):
    """Randomized search to optimize parameters of Neural Network."""
    optimization_model = models.Sequential()
    optimization_model.add(layers.LSTM(params['units'], return_sequences=True))
################################
# Generate a report
r = ta.Reporting(h)
# return the highest value for the validation accuracy and the best hyperparams to achieve it
print(r.high('val_acc'))
print(r.best_params('val_acc',3))

################################
# compute confusion matrixes

from sklearn.metrics import confusion_matrix
import numpy as np
np.set_printoptions(precision=3, suppress=True)

p = ta.Predict(h)

#Confusion matrix and class distribution for train data
train_classes = p.predict_classes(train_features)
train_matrix = confusion_matrix(train_classes,train_labels_int)
print("\nConfusion matrix for classifying train data:")
print(train_matrix)
print("\nDistribution of true labels for train data:")
train_dist = np.sum(train_matrix,axis=0)
print(train_dist/sum(train_dist))
print("\nDistribution of predicted labels for train data:")
train_dist_pred = np.sum(train_matrix,axis=1)
print(train_dist_pred/sum(train_dist_pred))


#Confusion matrix for validation data