def train_discriminator(X_train, Y_train, model):
    sgd = optimizers.SGD(lr=0.01)

    model.compile(loss='categorical_crossentropy',
                  optimizer='sgd',
                  metrics=['accuracy'])
    earlyStopping = callbacks.EarlyStopping(monitor='val_loss',
                                            patience=10,
                                            verbose=0,
                                            mode='min')
    mcp_save = callbacks.ModelCheckpoint('bestmodel.hdf5',
                                         save_best_only=True,
                                         monitor='val_loss',
                                         mode='min')
    reduce_lr_loss = callbacks.ReduceLROnPlateau(monitor='val_loss',
                                                 factor=0.1,
                                                 patience=10,
                                                 verbose=1,
                                                 epsilon=1e-4,
                                                 mode='min')
    csv_logger = callbacks.CSVLogger('Discriminator_stats_cnn.csv')
    history = model.fit(X_train,
                        Y_train,
                        callbacks=[mcp_save, reduce_lr_loss, csv_logger],
                        validation_split=0.1,
                        epochs=100,
                        batch_size=16)
def create_simple_model(x, y, num_epochs, batch_size, shape):
    """Creates a simple three-layer sequential model from given input and output data,
       using a given batch size and number of epochs to fit the model to the data"""

    sample_size = len(x)

    # Normalize the data
    nx = normalize(x)
    ny = normalize(y)

    # Model creation:
    model = Sequential()

    # Input layer
    model.add(Dense(units=100, activation='sigmoid', input_shape=shape))

    # Filtering layers, used as a way to deal with comllexity beyond
    # simple linear relationships
    model.add(Dense(units=50, activation='relu'))
    model.add(Dense(units=20, activation='relu'))

    # Output layer
    model.add(Dense(units=1, activation='tanh'))

    # Model compilation. The optimizer, loss, and other
    # variables to this function can be tweaked or altered to get
    # a better model to relate the input and output
    model.compile(optimizer=SGD(lr=0.01,
                                decay=1e-6,
                                momentum=0.9,
                                nesterov=True),
                  loss='mean_squared_error',
                  metrics=['accuracy'])

    # This is where the magic happens:
    m_callbacks = [callbacks.EarlyStopping(patience=3)]
    model.fit(x=nx,
              y=ny,
              epochs=num_epochs,
              validation_data=[nx, ny],
              verbose=0,
              shuffle=False,
              batch_size=batch_size,
              use_multiprocessing=True,
              callbacks=m_callbacks)

    return model
Esempio n. 3
0
def run(train_params,
        model_params,
        data_params,
        v_params=None,
        plot=False,
        path=os.path.join(os.getcwd(), "Saves"),
        model=None):
    if model == None:
        backend.clear_session()
        gc.collect()
        print("Generating Model")
        model = model_params["model"](model_params)
        print(model.summary())
    else:
        pass
    data_manager = Save_Manager(path)

    if train_params["patience"] >= 0:
        callbacks = [
            cb.EarlyStopping(
                monitor='val_loss',
                patience=train_params["patience"],
                restore_best_weights=train_params["restore_best_weights"])
        ]
    else:
        callbacks = []
    try:
        callbacks += train_params["callbacks"]
    except KeyError:
        print("No Custom Callbacks")

    print("Generating Data")
    ret = data_manager.load_data(data_params)
    if ret is not None:
        data, labels, key = ret
        shapes, _ = data_manager.load_shapes(data_params)
    else:
        shapes, data, labels = data_gen(data_params)
        key = data_manager.save_data(data, labels, data_params)
        data_manager.save_shapes(shapes, data_params)

    if train_params["verification"]:
        ret = data_manager.load_data(v_params)
        if ret is not None:
            v_data, v_labels, _ = ret
            v_shapes, _ = data_manager.load_shapes(v_params)
        else:
            v_shapes, v_data, v_labels = data_gen(v_params)
            data_manager.save_data(v_data, v_labels, v_params)
            data_manager.save_shapes(v_shapes, v_params)
        print("Begining fit")
        start = time.time()
        history = model.fit(data,
                            labels,
                            epochs=train_params["epochs"],
                            validation_data=(v_data, v_labels),
                            shuffle=True,
                            callbacks=callbacks)
    else:
        print("Begining fit")
        start = time.time()
        history = model.fit(data,
                            labels,
                            epochs=train_params["epochs"],
                            shuffle=True,
                            callbacks=callbacks)

    end = time.time()
    path = os.path.join(path, str(key) + "_models")
    model_manager = Save_Manager(path)
    model_key, train_key, model_n = model_manager.save_model(
        model, model_params, train_params)

    if plot:
        #Plot training & validation accuracy values
        plt.figure()
        try:
            plt.plot(history.history["accuracy"])
            plt.plot(history.history["val_accuracy"])
            acc1 = True
        except KeyError:
            plt.plot(history.history["output_main_accuracy"])
            plt.plot(history.history["output_assister_accuracy"])
            plt.plot(history.history["val_output_main_accuracy"])
            plt.plot(history.history["val_output_assister_accuracy"])
            acc1 = False

        plt.title("Network accuracy")
        plt.ylabel("Accuracy")
        #plt.yscale("log")
        plt.xlabel("Epoch")
        if acc1:
            plt.legend(["Train", "Validation"], loc="upper left")
        else:
            plt.legend(
                ["Train main", "Train assisst", "Test main", "Test assist"],
                loc="upper left")
        plt.xlim(0)
        plt.tight_layout()

        # Plot training & validation loss values
        plt.figure()
        plt.plot(history.history["loss"])
        plt.plot(history.history["val_loss"])
        plt.title("Network loss")
        plt.ylabel("Loss")
        #plt.yscale("log")
        plt.xlabel("Epoch")
        plt.legend(["Train", "Validation"], loc="upper left")
        plt.xlim(0)
        plt.tight_layout()
        plt.show()

        print()
        try:
            data_params["h_range"]
            print("Prediction with hole:\t", model.predict(v_data)[0])
            print("Prediction without hole:\t", model.predict(v_data)[-1])
        except KeyError:
            print("Prediction with clockwise: [0,1]\n",
                  model.predict(data)[0:10])
            print("Prediction with anticlockwise: [1,0]\n",
                  model.predict(data)[-11:-1])
            print()
            print("Prediction with clockwise: [0,1]\n",
                  model.predict(v_data)[0:10])
            print("Prediction with anticlockwise: [1,0]\n",
                  model.predict(v_data)[-11:-1])
        print()

        fig, axs = plt.subplots(2, 2)
        i = 1
        j = -1
        axs[0, 0].set_xlim(data_params["lower"], data_params["upper"])
        axs[0, 1].set_xlim(data_params["lower"], data_params["upper"])
        v_shapes[i].rotate_coords(about=data_params["about"])
        v_shapes[i].plot(axs[0, 0])
        v_shapes[j].rotate_coords(about=data_params["about"])
        v_shapes[j].plot(axs[0, 1])
        n = data_params["data_points"]
        xs = [
            i * (data_params["upper"] - data_params["lower"]) / n +
            (data_params["lower"] - data_params["upper"]) / 2 for i in range(n)
        ]
        ax = axs[1, 0]
        ax.plot(xs, v_data[0][i])
        ax.set_xlim(data_params["lower"], data_params["upper"])
        ax.set_ylim(0, 1)
        ax.set_aspect('equal', 'box')
        ax.set_ylabel("Intensity")
        ax.set_xlabel("Position")
        ax = axs[1, 1]
        ax.plot(xs, v_data[0][j])
        ax.set_xlim(data_params["lower"], data_params["upper"])
        ax.set_ylim(0, 1)
        ax.set_aspect('equal', 'box')
        ax.set_ylabel("Intensity")
        ax.set_xlabel("Position")
        plt.tight_layout()

    epoch = np.argmin(history.history["val_loss"])
    try:
        accuracy = history.history["val_accuracy"][epoch]
    except KeyError:
        accuracy = (history.history["val_output_main_accuracy"][epoch],
                    history.history["val_output_assister_accuracy"][epoch])
    keys = [key, model_key, train_key, model_n]
    return model, accuracy, epoch, end - start, keys
Esempio n. 4
0
def train(model, msc):
    # Load training and eval data
    print(keras.__version__)
    datagen = ImageDataGenerator(
        ##  featurewise_center=True,
        featurewise_std_normalization=True,
        rotation_range=20,
        width_shift_range=0.10,
        height_shift_range=0.10,
        horizontal_flip=True)

    mnist = input_data.read_data_sets(DATA_DIR,
                                      one_hot=True,
                                      validation_size=0)
    train_data = mnist.train.images  # Returns np.array
    train_labels = np.asarray(mnist.train.labels, dtype=np.int32)
    train_data, train_labels = shuffle(train_data, train_labels)
    eval_data = mnist.test.images  # Returns np.array
    eval_labels = np.asarray(mnist.test.labels, dtype=np.int32)

    eval_data, eval_labels = shuffle(eval_data, eval_labels)
    train_data = train_data.reshape(-1, 28, 28, 1)  # Returns np.array
    eval_data = eval_data.reshape(-1, 28, 28, 1)  # Returns np.array

    input_shape = (28, 28, 1)

    last_acc = msc.lastacc
    if msc.lastacc == 0:
        model.compile(
            loss=keras.metrics.categorical_crossentropy,
            optimizer=keras.optimizers.Adamax(),  ##adam
            metrics=['accuracy'])

    datagen.fit(train_data)
    best_weights_filepath = './best_weights.hdf5'
    earlyStopping = kcallbacks.EarlyStopping(monitor='accuracy',
                                             patience=8,
                                             verbose=1,
                                             mode='max')
    saveBestModel = kcallbacks.ModelCheckpoint(best_weights_filepath,
                                               monitor='accuracy',
                                               verbose=1,
                                               save_best_only=True,
                                               mode='auto')

    history = model.fit_generator(datagen.flow(train_data,
                                               train_labels,
                                               batch_size=32),
                                  steps_per_epoch=len(train_data) / 32,
                                  epochs=TRAIN_EPOCH,
                                  workers=4,
                                  callbacks=[
                                      earlyStopping, saveBestModel,
                                      TensorBoard(log_dir='./tmp/log')
                                  ])

    loss, accuracy = model.evaluate(eval_data, eval_labels)

    print('Test loss:', loss)
    print('Accuracy:', accuracy)

    return loss, accuracy
Esempio n. 5
0
INITIAL_LEARN_RATE = 10e-5
optimizer = Adam(learning_rate=INITIAL_LEARN_RATE)
# optimizer = RMSprop(learning_rate = 3e-5)
model.compile(optimizer,
              loss=categorical_crossentropy(label_smoothing=0.005),
              metrics=['accuracy'])

cbks = [
    callbacks.TerminateOnNaN(),
    callbacks.ReduceLROnPlateau(monitor='val_loss',
                                factor=0.1,
                                patience=5,
                                verbose=1),
    callbacks.EarlyStopping(monitor='loss',
                            min_delta=0,
                            patience=10,
                            verbose=1,
                            baseline=None,
                            restore_best_weights=True)
]

model.summary()

test_set = (test_x, test_y)
# Can't have validation split because too many intents.
# Val accuracy of 0.94 is good
model.fit(x=embed_xvals,
          y=cat_yval,
          epochs=100,
          verbose=1,
          validation_split=0.0,
          batch_size=25,