def build_and_train(hype_space,
                    save_best_weights=False,
                    log_for_tensorboard=False):
    """Build the deep CNN model and train it."""
    tf.logging.info("start build and train\n")

    K.set_learning_phase(1)
    K.set_image_data_format('channels_last')

    if log_for_tensorboard:
        # We need a smaller batch size to not blow memory with tensorboard
        hype_space["lr_rate_mult"] = hype_space["lr_rate_mult"] / 10.0
        hype_space["batch_size"] = hype_space["batch_size"] / 10.0

    model = build_model(hype_space)
    tf.logging.info("After build model")

    model_uuid = str(uuid.uuid4())[:5]

    callbacks = []

    # Weight saving callback:
    if save_best_weights:
        weights_save_path = os.path.join(WEIGHTS_DIR,
                                         '{}.hdf5'.format(model_uuid))
        tf.logging.info(
            "Model's weights will be saved to: {}".format(weights_save_path))
        if not os.path.exists(WEIGHTS_DIR):
            os.makedirs(WEIGHTS_DIR)

        callbacks.append(
            keras.callbacks.ModelCheckpoint(weights_save_path,
                                            monitor='val_fine_outputs_acc',
                                            save_best_only=True,
                                            mode='max'))

    # TensorBoard logging callback:
    log_path = None
    if log_for_tensorboard:
        log_path = os.path.join(TENSORBOARD_DIR, model_uuid)
        tf.logging.info(
            "Tensorboard log files will be saved to: {}".format(log_path))
        if not os.path.exists(log_path):
            os.makedirs(log_path)

        tb_callback = keras.callbacks.TensorBoard(log_dir=log_path,
                                                  histogram_freq=2,
                                                  write_graph=True)
        tb_callback.set_model(model)
        callbacks.append(tb_callback)

    # Train net:
    history = model.fit(
        [x_train],
        [y_train, y_train_c],
        batch_size=int(hype_space['batch_size']),
        epochs=EPOCHS,
        shuffle=True,
        verbose=1,
        callbacks=callbacks,
        validation_data=([x_test], [y_test, y_test_coarse]),
    ).history

    # Test net:
    K.set_learning_phase(0)
    score = model.evaluate([x_test], [y_test, y_test_coarse], verbose=0)
    max_acc = max(history['val_fine_outputs_acc'])

    model_name = "model_{}_{}".format(str(max_acc), str(uuid.uuid4())[:5])
    tf.logging.info("Model name: {}".format(model_name))

    # Note: to restore the model, you'll need to have a keras callback to
    # save the best weights and not the final weights. Only the result is
    # saved.
    tf.logging.debug(history.keys())
    tf.logging.debug(history)
    tf.logging.info(score)
    result = {
        # We plug "-val_fine_outputs_acc" as a
        # minimizing metric named 'loss' by Hyperopt.
        'loss': -max_acc,
        'real_loss': score[0],
        # Fine stats:
        'fine_best_loss': min(history['val_fine_outputs_loss']),
        'fine_best_accuracy': max(history['val_fine_outputs_acc']),
        'fine_end_loss': score[1],
        'fine_end_accuracy': score[3],
        # Coarse stats:
        'coarse_best_loss': min(history['val_coarse_outputs_loss']),
        'coarse_best_accuracy': max(history['val_coarse_outputs_acc']),
        'coarse_end_loss': score[2],
        'coarse_end_accuracy': score[4],
        # Misc:
        'model_name': model_name,
        'space': hype_space,
        'history': history,
        'status': STATUS_OK
    }

    tf.logging.info("RESULT:")
    print_json(result)

    return model, model_name, result, log_path
def build_and_train(hype_space,
                    save_best_weights=False,
                    log_for_tensorboard=False):
    """Build the deep CNN model and train it."""
    tf.logging.info("start build and train\n")

    K.set_learning_phase(1)
    K.set_image_data_format('channels_last')

    # if log_for_tensorboard:
    #     # We need a smaller batch size to not blow memory with tensorboard
    #     hype_space["lr_rate_mult"] = hype_space["lr_rate_mult"] / 10.0
    #     hype_space["batch_size"] = hype_space["batch_size"] / 10.0

    model = build_model(hype_space)
    tf.logging.info("After build model")
    # K.set_learning_phase(1)

    model_uuid = str(uuid.uuid4())[:5]

    callbacks = []

    # Weight saving callback:
    if save_best_weights:
        weights_save_path = os.path.join(WEIGHTS_DIR,
                                         '{}.hdf5'.format(model_uuid))
        tf.logging.info(
            "Model's weights will be saved to: {}".format(weights_save_path))
        if not os.path.exists(WEIGHTS_DIR):
            os.makedirs(WEIGHTS_DIR)

        callbacks.append(
            keras.callbacks.ModelCheckpoint(weights_save_path,
                                            monitor='val_fine_outputs_acc',
                                            save_best_only=True,
                                            mode='max'))

    # TensorBoard logging callback:
    log_path = None
    if log_for_tensorboard:
        log_path = os.path.join(TENSORBOARD_DIR, model_uuid)
        tf.logging.info(
            "Tensorboard log files will be saved to: {}".format(log_path))
        if not os.path.exists(log_path):
            os.makedirs(log_path)

        # Right now Keras's TensorBoard callback and TensorBoard itself are not
        # properly documented so we do not save embeddings (e.g.: for T-SNE).

        # embeddings_metadata = {
        #     # Dense layers only:
        #     l.name: "../10000_test_classes_labels_on_1_row_in_plain_text.tsv"
        #     for l in model.layers if 'dense' in l.name.lower()
        # }

        tb_callback = keras.callbacks.TensorBoard(
            log_dir=log_path,
            histogram_freq=2,
            # write_images=True, # Enabling this line would require more than 5 GB at each `histogram_freq` epoch.
            write_graph=True
            # embeddings_freq=3,
            # embeddings_layer_names=list(embeddings_metadata.keys()),
            # embeddings_metadata=embeddings_metadata
        )
        tb_callback.set_model(model)
        callbacks.append(tb_callback)

    # Train net:
    history = model.fit(
        [x_train],
        [y_train, y_train_c],
        batch_size=int(hype_space['batch_size']),
        epochs=EPOCHS,
        shuffle=True,
        verbose=1,
        callbacks=callbacks,
        validation_data=([x_test], [y_test, y_test_coarse]),
    ).history

    # Test net:
    K.set_learning_phase(0)
    score = model.evaluate([x_test], [y_test, y_test_coarse], verbose=0)
    max_acc = max(history['val_fine_outputs_acc'])

    model_name = "model_{}_{}".format(str(max_acc), str(uuid.uuid4())[:5])
    tf.logging.info("Model name: {}".format(model_name))

    # Note: to restore the model, you'll need to have a keras callback to
    # save the best weights and not the final weights. Only the result is
    # saved.
    tf.logging.debug(history.keys())
    tf.logging.debug(history)
    tf.logging.info(score)
    result = {
        # We plug "-val_fine_outputs_acc" as a
        # minimizing metric named 'loss' by Hyperopt.
        'loss': -max_acc,
        'real_loss': score[0],
        # Fine stats:
        'fine_best_loss': min(history['val_fine_outputs_loss']),
        'fine_best_accuracy': max(history['val_fine_outputs_acc']),
        'fine_end_loss': score[1],
        'fine_end_accuracy': score[3],
        # Coarse stats:
        'coarse_best_loss': min(history['val_coarse_outputs_loss']),
        'coarse_best_accuracy': max(history['val_coarse_outputs_acc']),
        'coarse_end_loss': score[2],
        'coarse_end_accuracy': score[4],
        # Misc:
        'model_name': model_name,
        'space': hype_space,
        'history': history,
        'status': STATUS_OK
    }

    tf.logging.info("RESULT:")
    print_json(result)

    return model, model_name, result, log_path
def build_and_train(hype_space, model_uuid, save_best_weights=True):
    """Build the deep CNN model and train it."""
    # setup Keras to learning phase - learn
    K.set_learning_phase(1)
    K.set_image_data_format('channels_last')

    # Build the model according to the hyper-parameter space passed.
    model = build_model(hype_space)

    # Create callbacks list to add to as according to constructor parameters
    callbacks = []

    # Weight saving callback:
    if save_best_weights:
        weights_save_path = os.path.join(WEIGHTS_DIR,
                                         '{}.hdf5'.format(model_uuid))
        print("Model's weights will be saved to: {}".format(weights_save_path))
        if not os.path.exists(WEIGHTS_DIR):
            os.makedirs(WEIGHTS_DIR)

        # Add weights saving callback to model's callbacks
        callbacks.append(
            keras.callbacks.ModelCheckpoint(weights_save_path,
                                            monitor='val_accuracy',
                                            save_best_only=True,
                                            mode='max'))

    # Train net:
    print("\nBegin training of model:")
    history = model.fit_generator(
        train_it,
        validation_data=val_it,
        epochs=EPOCHS,
        shuffle=True,
        verbose=1,
        callbacks=callbacks,
    ).history

    # Test net:
    print("\nBegin evaluation of model:")
    K.set_learning_phase(0)
    score = model.evaluate_generator(test_it, verbose=1)
    max_acc = max(history['accuracy'])

    euclidean_distance = euclidean_distance_metric(model)

    # Define model name
    model_name = "retrained_model_{}_{}".format(str(max_acc), model_uuid)
    print("Model name: {}".format(model_name))

    print(history.keys())
    print(history)
    print(score)
    result = {
        # We plug "-accuracy" as a
        # minimizing metric named 'loss' by Hyperopt.
        'loss': -history['val_accuracy'][-1],
        'real_loss': score[0],
        # Stats:
        'best_loss': min(history['loss']),
        'best_accuracy': max(history['accuracy']),
        'end_loss': score[0],
        'end_accuracy': score[1],
        'euclidean_distance_error': euclidean_distance,
        # Misc:
        'model_name': model_name,
        'model_uuid': model_uuid,
        'space': hype_space,
        'history': history,
        'status': STATUS_OK
    }
    print("\nRESULT:")
    print_json(result)

    return model, model_name, result, model_uuid