Esempio n. 1
0
def test_system(test_list: list):
    K.set_learning_phase(False)
    num_test_batches = int(len(test_list) / BATCH_SIZE)
    predicted = []
    result = []
    error = []
    for seed in range(num_test_batches):
        batch = get_test_batch(seed, test_list)
        predicted_result = SESSION.run(
            predicted_player_result,
            feed_dict={predicted_player_skills: batch["player_skills"]})
        predicted_result = np.swapaxes(predicted_result, 0, 1)
        for i in range(len(predicted_result)):
            for player in range(len(predicted_result[i])):
                predicted.append(predicted_result[i][player])
                result.append(batch["player_results"][i][player])
                error.append(batch["player_results"][i][player] -
                             predicted_result[i][player])
    prediction = np.array(predicted)
    results = np.array(result)
    prediction_error = np.array(error)
    if DEBUG > 2:
        print("Error std:    {}".format(np.std(prediction_error, 0)))
        print("Original std: {}".format(np.std(results, 0)))
    accuracy = np.mean(1 - np.var(prediction_error, 0) / np.var(results, 0))
    K.set_learning_phase(True)
    data = {
        "predicted": prediction,
        "error": prediction_error,
        "result": results,
        "accuracy": accuracy
    }
    return data
Esempio n. 2
0
    def __init__(self, restore, session=None):
        self.num_channels = 1
        self.image_size = 16
        self.num_labels = 2
        max_features = 20000
        maxlen = 256  # cut texts after this number of words (among top max_features most common words)

        K.set_learning_phase(True)

        # model = Sequential()
        # model.add(Reshape((256,), input_shape=(16, 16, 1)))
        # model.add(Embedding(max_features, 50))
        # model.add(LSTM(64, dropout=0.2, recurrent_dropout=0.2))
        # model.add(Dense(2, activation='sigmoid'))
        # try using different optimizers and different optimizer configs
        # model.compile(loss='binary_crossentropy',
        #               optimizer='adam',
        #               metrics=['accuracy'])

        model = Sequential()
        model.add(Reshape((256,), input_shape=(16, 16, 1)))
        model.add(Embedding(max_features, 128))
        model.add(LSTM(128, dropout=0.2, recurrent_dropout=0.2))
        model.add(Dense(2, activation='softmax'))
        model.load_weights("models/imdb_model_new.h5")

        self.model = model
    def __init__(self, restore, session=None):
        self.num_channels = 128
        self.image_size = 16
        self.num_labels = 2
        max_features = 20000
        maxlen = 256  # cut texts after this number of words (among top max_features most common words)

        K.set_learning_phase(True)

        print('Build model...')
        model = Sequential()
        model.add(Reshape((256, ), input_shape=(16, 16, 1)))
        model.add(Embedding(max_features, 128))
        model.add(LSTM(128))
        model.add(Dense(2))  #, activation='softmax'))

        model.load_weights(restore)
        self.model = model
Esempio n. 4
0
from bottle import route, run, static_file, request, BaseRequest
import base64
import re
import numpy as np
import tensorflow as tf
import cv2
from keras.layers.core import K
K.set_learning_phase(0)
import time
import random
import os
import datetime
from keras.models import Model,load_model
from keras.layers import Input, Conv2D, MaxPooling2D
import threading

seed = random.randint(0, 2**31 - 1)
tf.set_random_seed(seed)
np.random.seed(seed)
random.seed(seed)
BaseRequest.MEMFILE_MAX = 10000 * 1000

import chainer
import chainer.links as L
import chainer.functions as F
class GoogLeNet(chainer.Chain):
    def __init__(self):
        super(GoogLeNet, self).__init__(
            conv1=L.Convolution2D(3, 64, 7, stride=2, pad=3, nobias=True),
            norm1=L.BatchNormalization(64),
            conv2=L.Convolution2D(64, 192, 3, pad=1, nobias=True),
Esempio n. 5
0
def build_and_train(hype_space, save_best_weights=True, log_for_tensorboard=False):
    """Build the model and train it."""
    K.set_learning_phase(1)

    # if log_for_tensorboard:
    #     # We need a smaller batch size to not blow memory with tensorboard
    #     hype_space["lr_rate_mult"] = hype_space["lr_rate_mult"] / 10.0
    #     hype_space["batch_size"] = hype_space["batch_size"] / 10.0

    model = build_model(hype_space)

    # K.set_learning_phase(1)
    time_str = datetime.now().strftime("%Y_%m_%d-%H_%M")
    model_weight_name = MODEL_NAME + "-" + time_str

    callbacks = []

    # Weight saving callback:
    if save_best_weights:
        weights_save_path = os.path.join(
            WEIGHTS_DIR, '{}.hdf5'.format(model_weight_name))
        print("Model's weights will be saved to: {}".format(weights_save_path))
        if not os.path.exists(WEIGHTS_DIR):
            os.makedirs(WEIGHTS_DIR)

        callbacks.append(keras.callbacks.ModelCheckpoint(
            weights_save_path,
            monitor='val_accuracy',
            save_best_only=True, mode='max'))

        callbacks.append(keras.callbacks.EarlyStopping(
            monitor='val_accuracy',
            patience=10, verbose=1, mode='max'))

    # TensorBoard logging callback:
    log_path = None
    if log_for_tensorboard:
        log_path = os.path.join(TENSORBOARD_DIR, model_weight_name)
        print("Tensorboard log files will be saved to: {}".format(log_path))
        if not os.path.exists(log_path):
            os.makedirs(log_path)

        # Right now Keras's TensorBoard callback and TensorBoard itself are not
        # properly documented so we do not save embeddings (e.g.: for T-SNE).

        # embeddings_metadata = {
        #     # Dense layers only:
        #     l.name: "../10000_test_classes_labels_on_1_row_in_plain_text.tsv"
        #     for l in model.layers if 'dense' in l.name.lower()
        # }

        tb_callback = keras.callbacks.TensorBoard(
            log_dir=log_path,
            histogram_freq=2,
            # write_images=True, # Enabling this line would require more than 5 GB at each `histogram_freq` epoch.
            write_graph=True
            # embeddings_freq=3,
            # embeddings_layer_names=list(embeddings_metadata.keys()),
            # embeddings_metadata=embeddings_metadata
        )
        tb_callback.set_model(model)
        callbacks.append(tb_callback)

    # Train net:
    history = model.fit(
        X_train_aug,
        y_train,
        batch_size=int(hype_space['batch_size']),
        epochs=EPOCHS,
        shuffle=True,
        verbose=2,
        callbacks=callbacks,
        validation_data=(X_val_aug, y_val)
    ).history

    # Test net:
    K.set_learning_phase(0)
    score = evaluate_model(model, weights_save_path)
    print("\n\n")
    max_acc = max(history['val_accuracy'])

    model_name = MODEL_NAME+"_{}_{}".format(str(max_acc), time_str)
    print("Model name: {}".format(model_name))

    # Note: to restore the model, you'll need to have a keras callback to
    # save the best weights and not the final weights. Only the result is
    # saved.
    print(history.keys())
    print(history)
    print('Score: ', score)
    result = {
        # We plug "-val_accuracy" as a
        # minimizing metric named 'loss' by Hyperopt.
        'loss': -max_acc,
        # Misc:
        'model_name': model_name,
        'space': hype_space,
        'status': STATUS_OK
    }

    print("RESULT:")
    print_json(result)

    f = open("/nosave/lange/cu-ssp/model_neu/optimized/logs/test_results_mod6.txt", "a+")
    res = ""
    for k, v in score.items():
        res += str(k)+": "+str(v)+"\t"
    f.write("\n"+str(model_weight_name)+"\t"+ res)
    f.close()

    return model, model_name, result, log_path
Esempio n. 6
0
def classify_api(request):
    data = {"success": False}
    clean_directory()

    if request.method == "POST":
        model = request.POST.get("model", None)
        if model == 'imagenet':
            tmp_f = NamedTemporaryFile()
            tmp_adver = NamedTemporaryFile()

            if request.FILES.get("image", None) is not None:
                image_request = request.FILES["image"]
                image_bytes = image_request.read()
                image.save(tmp_f, image.format)

            elif request.POST.get("image64", None) is not None:
                base64_data = request.POST.get("image64", None).split(',',
                                                                      1)[1]
                plain_data = base64.b64decode(base64_data)
                image = Image.open(io.BytesIO(plain_data))
                image.save(
                    os.path.join(current_dir,
                                 'imagenet/dataset/images/testtest.png'))
                tmp_f.write(plain_data)

            tmp_f.close()

            # Backend session for attack
            print('Building Backend Session.')
            K.set_learning_phase(0)
            sess = tf.Session()
            backend.set_session(sess)

            # Image preprocess
            print('Modifying image')
            x = np.expand_dims(preprocess(image.resize((299, 299))), axis=0)
            img_shape = [1, 299, 299, 3]
            x_input = tf.placeholder(tf.float32, shape=img_shape)

            # Define model
            d = discriminator()

            # Prediction of original image
            print('prediction of original image')
            classify_result = get_predictions(d, x, 10)

            # Select attack algorithm and iteration

            attack_algorithm = request.POST.get("attack", None)
            n = int(request.POST.get("iterate", None))

            # Start attack
            result, attack_speed = attack(attack_algorithm, n, d, x_input, x,
                                          sess)
            print("attack speed: %s seconds" % (round(attack_speed, 5)))
            print('original image:', classify_result[0][1])
            print('adversarial example is classified by', result[0][1])

            # Print image to web site
            with open(
                    os.path.join(current_dir, 'imagenet/output/testtest.png'),
                    'rb') as img_file:
                img_str = base64.b64encode(img_file.read())
            tmp_adver.write(base64.b64decode(img_str))
            tmp_adver.close()
        elif model == 'mnist':
            tmp_adver = NamedTemporaryFile()
            tmp_f = NamedTemporaryFile()
            mnist_sample = int(request.POST.get("sample", None))
            mnist_target = int(request.POST.get("target", None))
            mnist_algorithm = request.POST.get("mnist_algorithm", None)
            result, attack_speed = mnist_attack_func(mnist_sample,
                                                     mnist_target,
                                                     mnist_algorithm)
            print("attack speed: %s seconds" % (round(attack_speed, 5)))
            print('original class:', mnist_sample, 'target class:',
                  mnist_target)
            print('adversarial example is classified by', np.argmax(result))

            result = result.tolist()
            with open(
                    os.path.join(current_dir,
                                 'mnist/dataset/images/testtest.png'),
                    'rb') as input_file:
                input_str = base64.b64encode(input_file.read())
            tmp_f.write(base64.b64decode(input_str))
            tmp_f.close()
            with open(os.path.join(current_dir, 'mnist/output/testtest.png'),
                      'rb') as img_file:
                img_str = base64.b64encode(img_file.read())
            tmp_adver.write(base64.b64decode(img_str))
            tmp_adver.close()

        # Make Graph
        data["attack_speed"] = attack_speed
        data["success"] = True
        data["confidence"] = {}
        if model == 'imagenet':
            data["model"] = 'imagenet'
            for i in range(len(classify_result)):
                data["confidence"][classify_result[i][1]] = float(
                    classify_result[i][2])
            data["adverimage"] = 'data:image/png;base64,' + img_str.decode(
                'utf-8')
            data["adversarial"] = {}
            for i in range(len(result)):
                data["adversarial"][result[i][1]] = float(result[i][2])
                #print('iter:', i, 'name:', result[i][1], 'pred:', result[i][2])

            sess.close()

        elif model == 'mnist':
            data["model"] = 'mnist'
            for i in range(10):
                if i == mnist_sample:
                    data["confidence"][str(i)] = float(1)
                else:
                    data["confidence"][str(i)] = float(0)
            data["input_image"] = 'data:image/png;base64,' + input_str.decode(
                'utf-8')
            data["adverimage"] = 'data:image/png;base64,' + img_str.decode(
                'utf-8')
            data["adversarial"] = {}
            for i in range(len(result[0])):
                data["adversarial"][str(i)] = float(result[0][i])

        # Close the session
        # sess.close()
    return JsonResponse(data)
def build_and_train(hype_space,
                    save_best_weights=False,
                    log_for_tensorboard=False):
    """Build the deep CNN model and train it."""
    tf.logging.info("start build and train\n")

    K.set_learning_phase(1)
    K.set_image_data_format('channels_last')

    # if log_for_tensorboard:
    #     # We need a smaller batch size to not blow memory with tensorboard
    #     hype_space["lr_rate_mult"] = hype_space["lr_rate_mult"] / 10.0
    #     hype_space["batch_size"] = hype_space["batch_size"] / 10.0

    model = build_model(hype_space)
    tf.logging.info("After build model")
    # K.set_learning_phase(1)

    model_uuid = str(uuid.uuid4())[:5]

    callbacks = []

    # Weight saving callback:
    if save_best_weights:
        weights_save_path = os.path.join(WEIGHTS_DIR,
                                         '{}.hdf5'.format(model_uuid))
        tf.logging.info(
            "Model's weights will be saved to: {}".format(weights_save_path))
        if not os.path.exists(WEIGHTS_DIR):
            os.makedirs(WEIGHTS_DIR)

        callbacks.append(
            keras.callbacks.ModelCheckpoint(weights_save_path,
                                            monitor='val_fine_outputs_acc',
                                            save_best_only=True,
                                            mode='max'))

    # TensorBoard logging callback:
    log_path = None
    if log_for_tensorboard:
        log_path = os.path.join(TENSORBOARD_DIR, model_uuid)
        tf.logging.info(
            "Tensorboard log files will be saved to: {}".format(log_path))
        if not os.path.exists(log_path):
            os.makedirs(log_path)

        # Right now Keras's TensorBoard callback and TensorBoard itself are not
        # properly documented so we do not save embeddings (e.g.: for T-SNE).

        # embeddings_metadata = {
        #     # Dense layers only:
        #     l.name: "../10000_test_classes_labels_on_1_row_in_plain_text.tsv"
        #     for l in model.layers if 'dense' in l.name.lower()
        # }

        tb_callback = keras.callbacks.TensorBoard(
            log_dir=log_path,
            histogram_freq=2,
            # write_images=True, # Enabling this line would require more than 5 GB at each `histogram_freq` epoch.
            write_graph=True
            # embeddings_freq=3,
            # embeddings_layer_names=list(embeddings_metadata.keys()),
            # embeddings_metadata=embeddings_metadata
        )
        tb_callback.set_model(model)
        callbacks.append(tb_callback)

    # Train net:
    history = model.fit(
        [x_train],
        [y_train, y_train_c],
        batch_size=int(hype_space['batch_size']),
        epochs=EPOCHS,
        shuffle=True,
        verbose=1,
        callbacks=callbacks,
        validation_data=([x_test], [y_test, y_test_coarse]),
    ).history

    # Test net:
    K.set_learning_phase(0)
    score = model.evaluate([x_test], [y_test, y_test_coarse], verbose=0)
    max_acc = max(history['val_fine_outputs_acc'])

    model_name = "model_{}_{}".format(str(max_acc), str(uuid.uuid4())[:5])
    tf.logging.info("Model name: {}".format(model_name))

    # Note: to restore the model, you'll need to have a keras callback to
    # save the best weights and not the final weights. Only the result is
    # saved.
    tf.logging.debug(history.keys())
    tf.logging.debug(history)
    tf.logging.info(score)
    result = {
        # We plug "-val_fine_outputs_acc" as a
        # minimizing metric named 'loss' by Hyperopt.
        'loss': -max_acc,
        'real_loss': score[0],
        # Fine stats:
        'fine_best_loss': min(history['val_fine_outputs_loss']),
        'fine_best_accuracy': max(history['val_fine_outputs_acc']),
        'fine_end_loss': score[1],
        'fine_end_accuracy': score[3],
        # Coarse stats:
        'coarse_best_loss': min(history['val_coarse_outputs_loss']),
        'coarse_best_accuracy': max(history['val_coarse_outputs_acc']),
        'coarse_end_loss': score[2],
        'coarse_end_accuracy': score[4],
        # Misc:
        'model_name': model_name,
        'space': hype_space,
        'history': history,
        'status': STATUS_OK
    }

    tf.logging.info("RESULT:")
    print_json(result)

    return model, model_name, result, log_path
def build_and_train(hype_space,
                    save_best_weights=False,
                    log_for_tensorboard=False):
    """Build the deep CNN model and train it."""
    tf.logging.info("start build and train\n")

    K.set_learning_phase(1)
    K.set_image_data_format('channels_last')

    if log_for_tensorboard:
        # We need a smaller batch size to not blow memory with tensorboard
        hype_space["lr_rate_mult"] = hype_space["lr_rate_mult"] / 10.0
        hype_space["batch_size"] = hype_space["batch_size"] / 10.0

    model = build_model(hype_space)
    tf.logging.info("After build model")

    model_uuid = str(uuid.uuid4())[:5]

    callbacks = []

    # Weight saving callback:
    if save_best_weights:
        weights_save_path = os.path.join(WEIGHTS_DIR,
                                         '{}.hdf5'.format(model_uuid))
        tf.logging.info(
            "Model's weights will be saved to: {}".format(weights_save_path))
        if not os.path.exists(WEIGHTS_DIR):
            os.makedirs(WEIGHTS_DIR)

        callbacks.append(
            keras.callbacks.ModelCheckpoint(weights_save_path,
                                            monitor='val_fine_outputs_acc',
                                            save_best_only=True,
                                            mode='max'))

    # TensorBoard logging callback:
    log_path = None
    if log_for_tensorboard:
        log_path = os.path.join(TENSORBOARD_DIR, model_uuid)
        tf.logging.info(
            "Tensorboard log files will be saved to: {}".format(log_path))
        if not os.path.exists(log_path):
            os.makedirs(log_path)

        tb_callback = keras.callbacks.TensorBoard(log_dir=log_path,
                                                  histogram_freq=2,
                                                  write_graph=True)
        tb_callback.set_model(model)
        callbacks.append(tb_callback)

    # Train net:
    history = model.fit(
        [x_train],
        [y_train, y_train_c],
        batch_size=int(hype_space['batch_size']),
        epochs=EPOCHS,
        shuffle=True,
        verbose=1,
        callbacks=callbacks,
        validation_data=([x_test], [y_test, y_test_coarse]),
    ).history

    # Test net:
    K.set_learning_phase(0)
    score = model.evaluate([x_test], [y_test, y_test_coarse], verbose=0)
    max_acc = max(history['val_fine_outputs_acc'])

    model_name = "model_{}_{}".format(str(max_acc), str(uuid.uuid4())[:5])
    tf.logging.info("Model name: {}".format(model_name))

    # Note: to restore the model, you'll need to have a keras callback to
    # save the best weights and not the final weights. Only the result is
    # saved.
    tf.logging.debug(history.keys())
    tf.logging.debug(history)
    tf.logging.info(score)
    result = {
        # We plug "-val_fine_outputs_acc" as a
        # minimizing metric named 'loss' by Hyperopt.
        'loss': -max_acc,
        'real_loss': score[0],
        # Fine stats:
        'fine_best_loss': min(history['val_fine_outputs_loss']),
        'fine_best_accuracy': max(history['val_fine_outputs_acc']),
        'fine_end_loss': score[1],
        'fine_end_accuracy': score[3],
        # Coarse stats:
        'coarse_best_loss': min(history['val_coarse_outputs_loss']),
        'coarse_best_accuracy': max(history['val_coarse_outputs_acc']),
        'coarse_end_loss': score[2],
        'coarse_end_accuracy': score[4],
        # Misc:
        'model_name': model_name,
        'space': hype_space,
        'history': history,
        'status': STATUS_OK
    }

    tf.logging.info("RESULT:")
    print_json(result)

    return model, model_name, result, log_path
#from keras import backend as K
from keras.layers.core import K
# setupo some things
K.set_image_dim_ordering('th')
TRAIN_MODEL = False
if not TRAIN_MODEL: K.set_learning_phase(0)
print('using ', K.image_dim_ordering())
import tensorflow as tf
import theano.tensor as T
SEED = 428
np.random.seed(SEED) # for reproducibility
from keras.datasets import mnist
from keras.layers.noise import GaussianNoise
import keras.models as models
from keras.layers import Layer, InputLayer, Dense, Dropout, Activation, Flatten, Reshape, Merge, Permute
from keras.layers.convolutional import Convolution2D, MaxPooling2D, UpSampling2D, ZeroPadding2D
from keras.layers.normalization import BatchNormalization
from keras.utils import np_utils
#from keras.regularizers import ActivityRegularizer
#from keras.utils.visualize_util import plot
import os
import pylab as pl
import cv2
import numpy as np
import glob
from collections import Counter
import itertools
from functools import partial

# set class weights from some file - gt_path
n_classes = 14 #len(np.unique(gt_prior))
def build_and_train(hype_space, model_uuid, save_best_weights=True):
    """Build the deep CNN model and train it."""
    # setup Keras to learning phase - learn
    K.set_learning_phase(1)
    K.set_image_data_format('channels_last')

    # Build the model according to the hyper-parameter space passed.
    model = build_model(hype_space)

    # Create callbacks list to add to as according to constructor parameters
    callbacks = []

    # Weight saving callback:
    if save_best_weights:
        weights_save_path = os.path.join(WEIGHTS_DIR,
                                         '{}.hdf5'.format(model_uuid))
        print("Model's weights will be saved to: {}".format(weights_save_path))
        if not os.path.exists(WEIGHTS_DIR):
            os.makedirs(WEIGHTS_DIR)

        # Add weights saving callback to model's callbacks
        callbacks.append(
            keras.callbacks.ModelCheckpoint(weights_save_path,
                                            monitor='val_accuracy',
                                            save_best_only=True,
                                            mode='max'))

    # Train net:
    print("\nBegin training of model:")
    history = model.fit_generator(
        train_it,
        validation_data=val_it,
        epochs=EPOCHS,
        shuffle=True,
        verbose=1,
        callbacks=callbacks,
    ).history

    # Test net:
    print("\nBegin evaluation of model:")
    K.set_learning_phase(0)
    score = model.evaluate_generator(test_it, verbose=1)
    max_acc = max(history['accuracy'])

    euclidean_distance = euclidean_distance_metric(model)

    # Define model name
    model_name = "retrained_model_{}_{}".format(str(max_acc), model_uuid)
    print("Model name: {}".format(model_name))

    print(history.keys())
    print(history)
    print(score)
    result = {
        # We plug "-accuracy" as a
        # minimizing metric named 'loss' by Hyperopt.
        'loss': -history['val_accuracy'][-1],
        'real_loss': score[0],
        # Stats:
        'best_loss': min(history['loss']),
        'best_accuracy': max(history['accuracy']),
        'end_loss': score[0],
        'end_accuracy': score[1],
        'euclidean_distance_error': euclidean_distance,
        # Misc:
        'model_name': model_name,
        'model_uuid': model_uuid,
        'space': hype_space,
        'history': history,
        'status': STATUS_OK
    }
    print("\nRESULT:")
    print_json(result)

    return model, model_name, result, model_uuid
Esempio n. 11
0
PLAYER_SKILLS = {}
PLAYER_SKILLS_MU = {}
PLAYER_PERFORMANCES = {}

PP = pprint.PrettyPrinter(indent=2)

# Print options for numpy arrays
np.set_printoptions(precision=3, suppress=True)

# Make an array with games for each player
PLAYER_GAMES = {}

# Initialise TensorFlow session
SESSION = tf.Session()

K.set_learning_phase(True)

LOSS_MULTIPLIER = [1] * PLAYER_RESULT_DIM
# LOSS_MULTIPLIER[0] = 10

VARIABLE_ORDER = [Stats.GPM, Stats.XPM, Stats.CREEPS, Stats.DENIES, Stats.KILLS, Stats.DEATHS, Stats.ASSISTS,
                  Stats.LEVEL]

# Standardiser settings
SCALER = {}

# pyplot settings
plt.rc('text', usetex=True)
plt.rc('font', family='serif')
plt.rc('font', size=16)
Esempio n. 12
0
def build_and_train(hype_space, save_best_weights=True):
    """Build the model and train it."""
    start_time = time.time()
    K.set_learning_phase(1)
    model = build_model(hype_space)

    time_str = datetime.now().strftime("%Y_%m_%d-%H_%M")
    model_weight_name = MODEL_NAME + "-" + time_str

    callbacks = []

    # Weight saving callback:
    if save_best_weights:
        weights_save_path = os.path.join(WEIGHTS_DIR,
                                         '{}.hdf5'.format(model_weight_name))
        print("Model's weights will be saved to: {}".format(weights_save_path))
        if not os.path.exists(WEIGHTS_DIR):
            os.makedirs(WEIGHTS_DIR)

        callbacks.append(
            ModelCheckpoint(filepath=weights_save_path,
                            monitor='val_accuracy',
                            verbose=1,
                            save_best_only=True,
                            mode='max'))

    callbacks.append(
        EarlyStopping(monitor='val_accuracy',
                      patience=10,
                      verbose=1,
                      mode='max'))

    callbacks.append(
        ReduceLROnPlateau(monitor='val_accuracy',
                          factor=0.5,
                          patience=10,
                          verbose=1,
                          mode='max',
                          cooldown=2))

    #standardize train and val profiles
    X_train, y_train, X_test, y_test = get_data()

    # Train net:
    history = model.fit(X_train,
                        y_train,
                        batch_size=int(hype_space['batch_size']),
                        epochs=EPOCHS,
                        shuffle=True,
                        verbose=2,
                        callbacks=callbacks,
                        validation_split=0.1).history

    end_time = time.time() - start_time

    # evaluate on cb513:
    score = evaluate_model(model, weights_save_path, hype_space, X_test,
                           y_test)
    K.set_learning_phase(0)

    print("\n\n")

    min_loss = min(history['val_loss'])
    max_acc = max(history['val_accuracy'])
    number_of_epochs_it_ran = len(history['loss'])

    model_name = MODEL_NAME + "_{}_{}".format(str(score['cb513']), time_str)
    print("Model name: {}".format(model_name))

    result = {
        # We plug "-val_accuracy" as a minimizing metric named 'loss' by Hyperopt.
        'loss': -max_acc,
        'real_loss': min_loss,
        'cb513': score['cb513'],
        'nb_epochs': number_of_epochs_it_ran,
        'accuracy_history': history['val_accuracy'],
        'time_in_sec': end_time,
        # Misc:
        'model_name': model_name,
        'weight_path': weights_save_path,
        'space': hype_space,
        'status': STATUS_OK
    }

    print("RESULT:")
    print_json(result)

    return model, model_name, result