Пример #1
0
checkpointer = ModelCheckpoint(filepath=CHECKPOINTED_WEIGHTS, verbose=1, save_best_only=True, monitor='oneshot_acc')


# In[33]:


STEPS_PER_EPOCH = 1 + (NUM_TRAIN_TRIPLETS//BATCH_SIZE)
VALIDATION_STEPS = (NUM_VAL_TRIPLETS//BATCH_SIZE) + 1


# In[34]:


import custom_callbacks; reload(custom_callbacks)
from custom_callbacks import LossHistory
loss_history = LossHistory(X_val, y_val, test_alphabet_to_index)


# In[35]:


from keras.optimizers import Adam
adam = Adam(1e-3)
triplet_net.compile(loss=triplet_loss, optimizer=adam)
triplet_net.load_weights(INIT_WEIGHTS)


# In[36]:


# triplet_net.load_weights(CHECKPOINTED_WEIGHTS)
Пример #2
0
    def train(self,
              X_train,
              y_train,
              X_valid,
              y_valid,
              batch_size=128,
              epochs=50,
              class_weight=False,
              loss_per_batch=True,
              early_stopping_metric='val_loss',
              value_cutoff=2.1,
              patience=5,
              min_delta=0.00001,
              deadrelu_filepath=None,
              only_first_epoch=True,
              best_model_filepath=None,
              log_filepath=None,
              epochtime_filepath=None):

        callbacks = []

        early_stopping = EarlyStoppingCustomed(value_cutoff=value_cutoff,
                                               monitor=early_stopping_metric,
                                               min_delta=min_delta,
                                               patience=patience,
                                               verbose=1)
        callbacks.append(early_stopping)

        if loss_per_batch:
            batch_loss_history = LossHistory()
            callbacks.append(batch_loss_history)

        if deadrelu_filepath:
            dead_relu = DeadReluDetector(X_train,
                                         deadrelu_filepath,
                                         only_first_epoch,
                                         verbose=True)
            callbacks.append(dead_relu)

        if best_model_filepath:
            best_model = ModelCheckpoint(filepath=best_model_filepath,
                                         monitor=early_stopping_metric,
                                         verbose=0,
                                         save_best_only=True,
                                         save_weights_only=False,
                                         mode='auto')
            callbacks.append(best_model)

        if log_filepath:
            csv_logger = CSVLoggerCustomed(log_filepath)
            callbacks.append(csv_logger)

        if epochtime_filepath:
            time_callback = TimeHistory(epochtime_filepath)
            callbacks.append(time_callback)

        if class_weight:
            num_pos = y_train.sum()
            num_seq = len(y_train)
            num_neg = num_seq - num_pos

            self.hist = self.model.fit(X_train,
                                       y_train,
                                       class_weight={
                                           True: num_seq / num_pos,
                                           False: num_seq / num_neg
                                       },
                                       batch_size=batch_size,
                                       epochs=epochs,
                                       verbose=2,
                                       validation_data=(X_valid, y_valid),
                                       shuffle=True,
                                       callbacks=callbacks)
        else:
            self.hist = self.model.fit(X_train,
                                       y_train,
                                       batch_size=batch_size,
                                       epochs=epochs,
                                       verbose=2,
                                       validation_data=(X_valid, y_valid),
                                       shuffle=True,
                                       callbacks=callbacks)

        return self.hist, batch_loss_history
Пример #3
0
def question_1():
    global input_shape, X_train, y_train_labels, y_train, X_test, y_test_labels, y_test

    print(
        "------------------------------------------------------------------------"
    )
    print("Baseline Model")
    print(
        "------------------------------------------------------------------------"
    )
    model1 = baseline_model(input_shape, num_classes)
    loss_callback_1 = LossHistory((X_test, y_test))
    model1.fit(X_train,
               y_train,
               batch_size=batch_size,
               epochs=epochs,
               verbose=1,
               validation_data=(X_test, y_test),
               callbacks=[loss_callback_1])
    model1.save('model1.h5')
    plot_learning_curve(
        [loss_callback_1.train_indices, loss_callback_1.test_indices],
        [loss_callback_1.train_losses, loss_callback_1.test_losses],
        colors=['g-', 'm-'],
        labels=['Train loss', 'Test loss'],
        title="Loss evolution for Baseline Model",
        path="../outputs/q1/plots/train_test_loss_baseline.png",
        axlabels=["Iterations", "Loss"])
    plot_learning_curve([loss_callback_1.test_indices],
                        [loss_callback_1.test_acc],
                        colors=['c-'],
                        labels=['Test Accuracy'],
                        title="Accuracy evolution for Baseline Model",
                        path="../outputs/q1/plots/test_acc_baseline.png",
                        axlabels=["Iterations", "Accuracy"])

    print(
        "------------------------------------------------------------------------"
    )
    print("2 conv layer model")
    print(
        "------------------------------------------------------------------------"
    )
    model2 = two_conv_layer_model(input_shape, num_classes)
    loss_callback_2 = LossHistory((X_test, y_test))
    model2.fit(X_train,
               y_train,
               batch_size=batch_size,
               epochs=epochs,
               verbose=1,
               validation_data=(X_test, y_test),
               callbacks=[loss_callback_2])
    model2.save('model2.h5')
    plot_learning_curve(
        [loss_callback_2.train_indices, loss_callback_2.test_indices],
        [loss_callback_2.train_losses, loss_callback_2.test_losses],
        colors=['g-', 'm-'],
        labels=['Train loss', 'Test loss'],
        title="Loss evolution for 2 conv layered Model",
        path="../outputs/q1/plots/train_test_loss_2_conv.png",
        axlabels=["Iterations", "Loss"])
    plot_learning_curve([loss_callback_1.test_indices],
                        [loss_callback_1.test_acc],
                        colors=['c-'],
                        labels=['Test Accuracy'],
                        title="Accuracy evolution for 2 conv layered Model",
                        path="../outputs/q1/plots/test_acc_2_conv.png",
                        axlabels=["Iterations", "Accuracy"])

    print(
        "------------------------------------------------------------------------"
    )
    print("2 conv layer + 1 hidden dense layer model")
    print(
        "------------------------------------------------------------------------"
    )
    model3 = two_conv_one_dense_layer_model(input_shape, num_classes)
    loss_callback_3 = LossHistory((X_test, y_test))
    model3.fit(X_train,
               y_train,
               batch_size=batch_size,
               epochs=epochs,
               verbose=1,
               validation_data=(X_test, y_test),
               callbacks=[loss_callback_3])
    model3.save('model3.h5')
    plot_learning_curve(
        [loss_callback_3.train_indices, loss_callback_3.test_indices],
        [loss_callback_3.train_losses, loss_callback_3.test_losses],
        colors=['g-', 'm-'],
        labels=['Train loss', 'Test loss'],
        title="Loss evolution for 2 Conv + 1 Dense layer config",
        path="../outputs/q1/plots/train_test_loss_2_conv_1_dense.png",
        axlabels=["Iterations", "Loss"])
    plot_learning_curve([loss_callback_3.test_indices],
                        [loss_callback_3.test_acc],
                        colors=['c-'],
                        labels=['Test Accuracy'],
                        title="Accuracy evolution for 2 conv + 1 dense config",
                        path="../outputs/q1/plots/test_acc_2_conv_1_dense.png",
                        axlabels=["Iterations", "Accuracy"])

    ids = np.random.choice(X_test.shape[0], 20)
    X_samples = X_train[ids]
    pred_samples_1 = model1.predict(X_samples)
    generate_image_outputs(X_samples,
                           np.argmax(pred_samples_1, axis=1),
                           path="../outputs/q1/predictions/baseline")
    pred_samples_2 = model2.predict(X_samples)
    generate_image_outputs(X_samples,
                           np.argmax(pred_samples_2, axis=1),
                           path="../outputs/q1/predictions/2_conv")
    pred_samples_3 = model3.predict(X_samples)
    generate_image_outputs(X_samples,
                           np.argmax(pred_samples_3, axis=1),
                           path="../outputs/q1/predictions/2_conv_1_dense")