Exemplo n.º 1
0
    def train(self,
              x_train,
              batch_size,
              epochs,
              run_folder,
              print_every_n_batches=100,
              initial_epoch=0,
              lr_decay=1):

        custom_callback = Customcallback(run_folder, print_every_n_batches,
                                         initial_epoch, self)
        lr_schedule = step_decay_schedule(initial_lr=self.learning_rate,
                                          decay_factor=lr_decay,
                                          step_size=1)
        checkpoint_filepath = os.path.join(
            run_folder, "weights/weights-{epoch:03d}-{loss:.2f}.h5")
        checkpoint1 = ModelCheckpoint(checkpoint_filepath,
                                      save_weights_only=True,
                                      verbose=1)
        checkpoint2 = ModelCheckpoint(os.path.join(run_folder,
                                                   'weights/weights.h5'),
                                      save_weights_only=True,
                                      verbose=1)

        callbacks_list = [
            checkpoint1, checkpoint2, custom_callback, lr_schedule
        ]

        self.model.fit(x_train,
                       x_train,
                       batch_size=batch_size,
                       shuffle=True,
                       epochs=epochs,
                       initial_epoch=initial_epoch,
                       callbacks=callbacks_list)
def train_model(x, y, epoch_num, save, maxlen, char_indices):
	#model = load_model('bot.h5')

	model = Sequential()
	model.add(LSTM(256, input_shape=(maxlen, len(char_indices)), return_sequences=True))
	model.add(Dropout(0.2))
	model.add(LSTM(128, return_sequences=True))
	model.add(Dropout(0.2))
	model.add(LSTM(128, return_sequences=True))
	model.add(Dropout(0.2))
	model.add(LSTM(64))
	model.add(Dropout(0.2))
	model.add(Dense(len(char_indices), activation='softmax'))

	model.compile(loss='categorical_crossentropy', optimizer = 'adam')

	if(save):
		#save model
		mc = ModelCheckpoint('bot.h5', monitor='val_accuracy', mode = 'min')
		callbacks_list = [mc]

		model.fit(x, y, batch_size = 32, epochs = epoch_num, callbacks=callbacks_list, verbose = 1)
	else:
		model.fit(x, y, batch_size = 32, epochs = epoch_num, verbose = 1)

	return model
Exemplo n.º 3
0
    def train(self, pretrained_model=None, model_path=None):
        '''
		Runs the training
		'''
        if pretrained_model != None: lstm_model = load_model(pretrained_model)
        else: lstm_model = self.get_lstm_model()

        if model_path == None: model_path = "CONV2D_LSTM_E{epoch:02d}.hdf5"
        model_path = os.path.join(self.training_version, model_path)

        callbacks = [
            ModelCheckpoint(model_path,
                            monitor='val_accuracy',
                            save_best_only=True,
                            mode='max')
        ]

        opt = Adam(learning_rate=0.01)
        lstm_model.compile(optimizer=opt,
                           loss='sparse_categorical_crossentropy',
                           metrics=['accuracy'])
        lstm_model.fit(self.X_train,
                       self.y_train,
                       epochs=self.epochs,
                       batch_size=self.batch_size,
                       validation_split=self.validation_split,
                       shuffle=True,
                       verbose=2,
                       callbacks=callbacks)
def trainModel(model, x_train, y_train, x_val, y_val, epochs, batch_size):
    earlyStopping = EarlyStopping(monitor='val_loss', patience=10, verbose=0, mode='min')
    mcp_save = ModelCheckpoint('temp/.mdl_wts.hdf5', save_best_only=True, monitor='val_loss', mode='min')
    reduce_lr_loss = ReduceLROnPlateau(monitor='val_loss', factor=0.1, patience=7, verbose=1, epsilon=1e-4, mode='min')

    history = model.fit(x_train, y_train,epochs = epochs, batch_size = batch_size, validation_data = (x_val, y_val),
                        callbacks = [earlyStopping, mcp_save, reduce_lr_loss])
    return model
Exemplo n.º 5
0
	def train(self, pretrained_model = None, model_path = None):
		'''
		Runs the training
		'''
		if pretrained_model != None: self.c3d_model = load_model(pretrained_model)
		else: self.c3d_model = c3d_model(resolution = self.operating_resolution, n_frames = 16, channels = 3, nb_classes = 3)

		if model_path == None: model_path = "C3D_E{epoch:02d}_VA{val_accuracy:.2f}.hdf5"
		model_path = os.path.join(self.training_version, model_path)

		callbacks = [ModelCheckpoint(model_path, monitor='val_accuracy', verbose=1, save_best_only=True, mode='max')]
		
		self.c3d_model.compile(optimizer='adam', loss='sparse_categorical_crossentropy')
		self.c3d_model.fit(self.X_train, self.y_train, epochs=self.epochs, batch_size=self.batch_size, validation_split=validation_split, shuffle=True, verbose=2, callbacks = callbacks)
Exemplo n.º 6
0
 def useNetwork(self, train_base, val_base, epochs=10000, patience=1000):
     if os.path.isfile(self.name + '.hf5'):
         self.network.load_weights(self.name + ".hf5")
     else:
         Model = ModelCheckpoint(self.name + ".hf5")
         Early = EarlyStopping(patience=1000)
         self.network.fit(x=train_base[0],
                          y=train_base[1],
                          batch_size=20,
                          epochs=epochs,
                          callbacks=[Early, Model],
                          validation_split=0.1,
                          validation_data=val_base,
                          shuffle=True,
                          use_multiprocessing=True)
Exemplo n.º 7
0
def train_model(model_name, month, day, bp_type, epochs, X_train, y_train, X_test, y_test):

    model = Sequential()
    # model.add(Dense(units=100, input_shape=(1, X_train.shape[2])))
    # model.add(Dense(units=64))
    # model.add(Dense(units=32))
    # model.add(Bidirectional(LSTM(units=50, input_shape=(1, X_train.shape[2]), return_sequences=True)))
    model.add(LSTM(units=128, dropout=0.5, recurrent_dropout=0.5))
    model.add(Dense(128))
    model.add(Activation('relu'))
    model.add(Dropout(0.5))
    model.add(Dense(1))

    # sgd = SGD(learning_rate=0.00002, momentum=0.9, nesterov=True)
    model.compile(loss='mse', optimizer='adam', metrics=['mean_absolute_error'])


    earlyStopping = EarlyStopping(monitor='val_mean_absolute_error', patience=15, verbose=0, mode='min')
    mcp_save = ModelCheckpoint('{}_{}_{}.h5'.format(model_name, month, day), save_best_only=True, monitor='val_mean_absolute_error', mode='min')
    history = model.fit(X_train, y_train, epochs=epochs, batch_size=32, validation_data=(X_test, y_test), callbacks=[mcp_save, earlyStopping])


    # Save model to HDF5 for later use
    # model.save("model.h5")
    print("Saved model to disk")


    plt.plot(history.history['mean_absolute_error'])
    plt.plot(history.history['val_mean_absolute_error'])
    plt.title('{} Blood Pressure Model Error (RNN)'.format(bp_type))
    plt.ylabel('Mean Absolute Error (mmHg)')
    plt.xlabel('Epoch')
    plt.legend(['Training', 'Testing'], loc='best')
    plt.savefig('./Models/{}_{}_{}.png'.format(model_name, month, day), dpi=600)
    print('Saved graph to disk')

    plt.close('all')
    del model
    model = load_model('./Models/{}_{}_{}.h5'.format(model_name, month, day))
    plt.plot(y_test.reshape(-1, 1))
    # X_train = X_train.reshape(-1, 2100, 1)
    plt.plot(model.predict(X_test).reshape(-1, 1))
    plt.title('{} Blood Pressure'.format(bp_type))
    plt.xlabel('Data point')
    plt.ylabel('{} BP (mmHg)'.format(bp_type))
    plt.legend(['Ground Truth', 'Prediction'])
    plt.show()
    return model, history
Exemplo n.º 8
0
    def setup_model_checkpoint(self, params):

        path = self.get_model_path()
        if self.load_epoch:
            path /= "epoch_{}_".format(self.load_epoch)
        else:
            path /= "epoch_"

        cb = ModelCheckpoint(
            str(path) + "{epoch:02d}.h5",
            monitor=params["monitor"],
            mode=params["mode"],
            save_best_only=params["save_best_only"],
            verbose=params["verbose"],
        )

        return cb
Exemplo n.º 9
0
    def __init__(self, params):
        video_features = Input(shape = (1024,))
        _layers = [video_features]
        _layers.append(Dropout(params['input_dropout'])(_layers[-1]))
        
        for layer in params['layers']:
            _layers.append(Dense(layer, activation = 'relu')(_layers[-1]))
            _layers.append(Dropout(params['dense_dropout'])(_layers[-1]))

        output = Dense(params['n_classes'], activation = 'softmax')(_layers[-1])
        
        self.model = Model(inputs = video_features, outputs = output)
        self.model.compile(Adam(learning_rate = params['lr']), loss = sparse_categorical_crossentropy,metrics = ['accuracy'])
        self.model.summary()
        self.callback = ModelCheckpoint(params['savefile'], monitor='val_loss', verbose=1, save_best_only=True, save_weights_only=True, period=1)
        self.val_split = params['val_split']
        self.batch_size = params['batch_size']
Exemplo n.º 10
0
 def train_model(self):
     #callbacks
     es = EarlyStopping(monitor='val_loss',
                        patience=5,
                        restore_best_weights=True)
     check_point = ModelCheckpoint('Log/best_sequencial.h5',
                                   monitor='val_loss',
                                   save_best_only=True,
                                   mode='min')
     #train
     hist = self.model.fit(self.data[0],
                           self.data[1],
                           validation_data=(self.data[2], self.data[3]),
                           batch_size=128,
                           epochs=50,
                           callbacks=[es, check_point])
     print(self.model.summary())
     return hist
Exemplo n.º 11
0
    def train_generator(self,
                        gen_train,
                        steps_per_epoch,
                        epochs,
                        save_path,
                        valid_gen=None,
                        valid_steps=None,
                        weights_path=None,
                        initial_epoch=0):

        checkpoint = ModelCheckpoint(save_path, period=1)
        self.model.fit_generator(gen_train,
                                 steps_per_epoch=steps_per_epoch,
                                 epochs=epochs,
                                 validation_data=valid_gen,
                                 validation_steps=valid_steps,
                                 initial_epoch=initial_epoch,
                                 callbacks=[checkpoint])
def train(args):
    data_path = args.data
    epochs = args.epochs
    early_stop = args.early_stop
    batch_size = args.batch_size
    weights = args.weights

    model = model_architecture()
    model.summary()

    data_gen = ImageDataGenerator(preprocessing_function=preprocess_input)
    train_it = data_gen.flow_from_directory(data_path + '/train', target_size = (image_shape,image_shape), batch_size = batch_size)
    val_it = data_gen.flow_from_directory(data_path + '/val', target_size = (image_shape,image_shape), batch_size = 20)
    test_it = data_gen.flow_from_directory(data_path + '/test', target_size = (image_shape,image_shape), batch_size = 20)

    model.compile(optimizer = 'adam', loss = 'categorical_crossentropy', metrics = ['accuracy'])

    if weights is not None:
        model.load_weights(weights)

    os.mkdir('logs')
    filepath="logs/weights-improvement-{epoch:02d}-{val_loss:.2f}.hdf5"
    checkpoint = ModelCheckpoint(filepath, period=2)
    callbacks = [checkpoint]
    if early_stop:
        early_stopping = EarlyStopping(patience = 2, restore_best_weights = True)
        callbacks.append(early_stopping)

    model.fit_generator(train_it, epochs=epochs, callbacks=callbacks, validation_data=val_it)

    os.mkdir('model')
    model_json = model.to_json()
    with open("model/model_final.json", "w") as json_file:
        json_file.write(model_json)

    model.save_weights('model/model_final.hdf5')

    print("MODEL TRAINED")

    test_loss, test_acc = model.evaluate_generator(test_it)
    print("Test Results: ")
    print("Loss: " + str(test_loss))
    print("Test: " + str(test_acc))
Exemplo n.º 13
0
def lstm_model(batch_size, time_steps, sym, lr, epochs, dropout=.3):
    cols = ['open', 'high', 'low', 'close', 'volume']
    mat = get_data(sym).loc[:, cols].values
    x_train_ts, y_train_ts, x_test_ts, y_test_ts, scaler = data(
        mat, batch_size, time_steps)
    lstm = Sequential()
    lstm.add(
        LSTM(70,
             batch_input_shape=(batch_size, time_steps, x_train_ts.shape[2]),
             dropout=0.1,
             recurrent_dropout=dropout,
             stateful=True,
             kernel_initializer='random_uniform'))
    lstm.add(Flatten())
    lstm.add(Dense(1, activation='sigmoid'))
    opt = op.RMSprop(lr=lr)
    lstm.compile(loss='mean_squared_error',
                 optimizer=opt,
                 metrics=['accuracy'])

    csv_log = CSVLogger(sym + "_log.log", append=True)
    early_stp = EarlyStopping(monitor='val_loss',
                              mode='min',
                              verbose=1,
                              patience=15,
                              min_delta=1)
    checkpoint = ModelCheckpoint(str(sym) + '_best_model.h5',
                                 monitor='val_loss',
                                 mode='min',
                                 save_best_only=True,
                                 verbose=1)

    history = lstm.fit(x_train_ts,
                       y_train_ts,
                       epochs=epochs,
                       verbose=2,
                       batch_size=batch_size,
                       validation_data=[x_test_ts, y_test_ts],
                       callbacks=[csv_log, early_stp, checkpoint])

    return lstm, history, scaler, x_test_ts, y_test_ts
Exemplo n.º 14
0
 def train(self, X_train, Y_train, X_test, Y_test, batch_size, epochs,
           filepath):
     #X_train = X_train.reshape(-1,1)
     #Y_train = Y_train.reshape(-1,1)
     #X_test = X_test.reshape(-1,1)
     #Y_test = Y_test.reshape(-1,1)
     if not self.model:
         raise "You must call build_model first!"
     checkpoint = ModelCheckpoint(filepath,
                                  monitor='loss',
                                  verbose=1,
                                  save_best_only=True,
                                  mode='min',
                                  period=10)
     callbacks_list = [checkpoint]
     self.model.fit(X_train,
                    Y_train,
                    epochs=epochs,
                    batch_size=batch_size,
                    callbacks=callbacks_list,
                    validation_data=(X_test, Y_test))
Exemplo n.º 15
0
def train():
    checkpoint = ModelCheckpoint('data/model/model.{epoch:02d}-{val_loss:.2f}.hdf5', monitor='val_loss', verbose=0, save_best_only=True, save_weights_only=True, mode='auto', period=5)

    X_buckets, ORF_buckets, y_buckets = load_bucket_dataset('data/train/49000-train.h5')
    X_val, ORF_val, y_val = load_dataset('data/train/49000-validation.h5')
    history = []
    model = build_rnn()
    bucket_num = len(X_buckets)
    epochs = 200
    for epoch in range(epochs):
        print('===Epoch', epoch)
        for i in range(bucket_num):
            if len(y_buckets[i]) > 0:
                model.fit([X_buckets[i], ORF_buckets[i]], y_buckets[i], callbacks=[checkpoint], verbose=1)
        loss, acc, sensibility, specificity = model.evaluate([X_val, ORF_val], y_val)
        print('epoch: {}, acc: {}'.format(epoch, acc))
        history.append({'loss': loss, 'acc': acc})
    
    import pickle
    with open('history.pickle', 'wb') as f:
        pickle.dump(history, f)
Exemplo n.º 16
0
def run_test_harness():
    # define model
    model = define_model()
    # create data generator
    datagen = ImageDataGenerator(featurewise_center=True,
                                 horizontal_flip=True,
                                 rescale=True,
                                 shear_range=0.2,
                                 zoom_range=0.2)
    # specify imagenet mean values for centering
    datagen.mean = [123.68, 116.779, 103.939]
    # prepare iterator
    train_it = datagen.flow_from_directory(f'{folder_name}/train/',
                                           class_mode='categorical',
                                           batch_size=64,
                                           target_size=(224, 224))
    test_it = datagen.flow_from_directory(f'{folder_name}/test/',
                                          class_mode='categorical',
                                          batch_size=64,
                                          target_size=(224, 224))
    # fit model
    callback = ModelCheckpoint('cards_model.hdf5',
                               monitor='val_loss',
                               verbose=0,
                               save_best_only=True,
                               save_weights_only=False,
                               mode='auto',
                               period=1)
    history = model.fit_generator(train_it,
                                  steps_per_epoch=len(train_it),
                                  validation_data=test_it,
                                  validation_steps=len(test_it),
                                  epochs=3,
                                  verbose=1,
                                  callbacks=[callback])
    # evaluate model
    _, acc = model.evaluate_generator(test_it, steps=len(test_it), verbose=0)
    print('> %.3f' % (acc * 100.0))
    # learning curves
    summarize_diagnostics(history)
    if is_seasonal_circle:
        sc = np.linspace(0, 11, 12, dtype='int32')
        sc = np.tile(sc, int((training_num - training_start) / 12 + 1))
        data_sc = sc[:(training_num - training_start - time_step + 1 - prediction_month + 1)]
    # print(data_x.shape, data_y.shape)

    model_num = 0
    models_summary = {}
    kf = KFold(n_splits=10, random_state=0)
    for train_index, test_index in kf.split(data_x):
        # print('train_index', train_index, 'test_index', test_index)

        model = convlstm.ConvlstmModel(model_name, time_step, prediction_month, is_seasonal_circle,
                                       is_nino_output).get_model()
        save_best = ModelCheckpoint('..\model\\cross_validation\\' + model_name + ' ' + str(test_index[0]) + '.h5',
                                    monitor='val_loss',
                                    verbose=1, save_best_only=True, mode='min', period=1)
        train_x, train_y = data_x[train_index], data_y[train_index]
        test_x, test_y = data_x[test_index], data_y[test_index]
        if is_seasonal_circle:
            train_sc, test_sc = data_sc[train_index], data_sc[test_index]
        if is_nino_output:
            train_nino, test_nino = data_y_nino[train_index], data_y_nino[test_index]
        if not is_seasonal_circle and not is_nino_output:
            train_hist = model.fit(train_x, train_y, batch_size=batch_size, epochs=epochs,
                                   verbose=2,
                                   callbacks=[save_best],
                                   validation_data=(test_x, test_y))
        elif is_seasonal_circle and not is_nino_output:
            train_hist = model.fit([train_x, train_sc], train_y, batch_size=batch_size, epochs=epochs,
                                   verbose=2,
Exemplo n.º 18
0
    test_fraction=args.test_fraction)

# Label data comes in the form [visitors score, home score].
# Condense to just a spread.
y_train = np.asarray([points[0] - points[1] for points in y_train], dtype=int)
y_validate = np.asarray([points[0] - points[1] for points in y_validate],
                        dtype=int)
y_test = np.asarray([points[0] - points[1] for points in y_test], dtype=int)

# Early stopping with patience
early_stopper = EarlyStopping(monitor='val_loss',
                              verbose=1,
                              patience=args.patience)
model_checkpoint = ModelCheckpoint(args.model_path,
                                   monitor='val_loss',
                                   mode='min',
                                   save_best_only=True,
                                   verbose=1)
callbacks = [early_stopper, model_checkpoint]
if args.roster_shuffle:
    print('Roster shuffling (data augmentation) enabled.')
    callbacks.append(ShuffleCallback(x_train))

# Define and train the model
input_shape = x_train[0].shape
model = Sequential()
model.add(
    Bidirectional(
        GRU(args.rnn_layer_size,
            return_sequences=True,
            input_shape=input_shape,
Exemplo n.º 19
0
def create_train_save_model(x_train, y_train, x_test, y_test):

    # Hyperparameter values were calculated by keras tuners
    model = Sequential()
    model.add(
        Conv2D(64, (3, 3),
               activation='relu',
               input_shape=(28, 28, 1),
               kernel_constraint=max_norm(3),
               bias_constraint=max_norm(3),
               padding='same'))
    model.add(MaxPooling2D((2, 2), padding='same'))
    model.add(Dropout(0.1))

    model.add(
        Conv2D(256, (3, 3),
               activation='relu',
               kernel_constraint=max_norm(3),
               bias_constraint=max_norm(3),
               padding='same'))
    model.add(MaxPooling2D((2, 2), padding='same'))
    model.add(Dropout(0.15))

    model.add(
        Conv2D(256, (3, 3),
               activation='relu',
               kernel_constraint=max_norm(3),
               bias_constraint=max_norm(3),
               padding='same'))
    model.add(MaxPooling2D((2, 2), padding='same'))
    model.add(Dropout(0.2))

    model.add(
        Conv2D(256, (3, 3),
               activation='relu',
               padding='same',
               kernel_constraint=max_norm(3),
               bias_constraint=max_norm(3)))
    model.add(MaxPooling2D((2, 2), padding='same'))
    # model.add(Dropout(0.05))

    model.add(Flatten())
    # model.add(Dropout(0.25))  # Dropout for regularization
    model.add(Dense(768, activation='relu', kernel_regularizer=l2(l=0.001)))
    model.add(Dense(len(labels), activation='softmax'))
    model.compile(loss='categorical_crossentropy',
                  optimizer=keras.optimizers.RMSprop(lr=1e-4),
                  metrics=['acc'])

    x_train = x_train[:math.ceil(0.8 * len(x_train))]
    x_val = x_train[math.ceil(0.8 * len(x_train)):]
    y_train = y_train[:math.ceil(0.8 * len(y_train))]
    y_val = y_train[math.ceil(0.8 * len(y_train)):]

    # tuner = define_random_tuner(num_classes=len(labels))
    # tuner.search(x_train, y_train, epochs=20, validation_data=(x_val, y_val))
    # tuner.results_summary()
    # print('-------------------------------------')
    # best_hp = tuner.get_best_hyperparameters()[0]
    # model = tuner.hypermodel.build(best_hp)
    # print(model.get_config())
    #
    # quit()

    train_data_gen = ImageDataGenerator()
    val_data_gen = ImageDataGenerator()
    train_generator = train_data_gen.flow(x_train,
                                          y_train,
                                          batch_size=batch_size)
    val_generator = val_data_gen.flow(x_val, y_val, batch_size=batch_size)

    early_stopping = EarlyStopping(monitor='val_loss',
                                   patience=7,
                                   verbose=0,
                                   mode='min')
    file_to_save_to = ''
    if number_of_images_per_label == 100000:
        file_to_save_to = 'vanilla_cnn_model_100k.h5'
    elif number_of_images_per_label == 10000:
        file_to_save_to = 'vanilla_cnn_model_10k.h5'
    else:
        file_to_save_to = 'vanilla_cnn_model.h5'
    mcp_save = ModelCheckpoint(file_to_save_to,
                               save_best_only=True,
                               monitor='val_loss',
                               mode='min')
    reduce_lr_loss = ReduceLROnPlateau(monitor='val_loss',
                                       factor=0.1,
                                       patience=5,
                                       verbose=1,
                                       min_delta=1e-4,
                                       mode='min')

    # fit_generator is necessary for 100k, where using batches is required due to memory size limitations
    history = model.fit_generator(
        train_generator,
        steps_per_epoch=len(x_train) // batch_size,
        epochs=64,
        validation_data=val_generator,
        validation_steps=len(x_val) // batch_size,
        callbacks=[early_stopping, mcp_save, reduce_lr_loss],
        verbose=2)

    # model.save(file_to_save_to) # not using this because of mcp_save
    print(model.evaluate(x_test, y_test))
    return history
Exemplo n.º 20
0
model = get_model()

data = np.load(TRAIN_DATA_PATH, allow_pickle=True)

train_size = int(len(data) * 80 / 100)

train = data[:train_size]
test = data[train_size:]

X_train = np.array([i[0] for i in train]).reshape(-1, WIDTH, HEIGHT,
                                                  NUMBER_OF_CHANNELS)
y_train = np.array([i[1] for i in train])

X_test = np.array([i[0] for i in test]).reshape(-1, WIDTH, HEIGHT,
                                                NUMBER_OF_CHANNELS)
y_test = np.array([i[1] for i in test])

if os.path.exists(MODEL_CHECKPOINT_PATH):
    model.load_weights(MODEL_CHECKPOINT_PATH)

checkpoint = ModelCheckpoint(MODEL_CHECKPOINT_PATH, verbose=1)

model.fit(X_train,
          y_train,
          epochs=EPOCHS,
          validation_data=(X_test, y_test),
          callbacks=[checkpoint])

model.save('./' + MODEL_NAME)
Exemplo n.º 21
0
    files = [i for i in os.listdir("cleaned_data_npz")]
    files.sort()
    number_of_samples = int(0.85 * len(files))

    partition["train"] = [i for i in files[:number_of_samples]]
    partition["validation"] = [i for i in files[number_of_samples:]]

    for i in range(number_of_samples - 1):
        train_labels[files[i]] = files[i + 1]
    for i in range(number_of_samples, len(files) - 1):
        val_labels[files[i]] = files[i + 1]

    modelcheckpoint = ModelCheckpoint("weights/",
                                      monitor="val_acc",
                                      save_best_only=False,
                                      verbose=1,
                                      save_weights_only=True)
    earlystopping = EarlyStopping(monitor="val_loss",
                                  min_delta=0.000001,
                                  patience=1)
    tensorboard = TensorBoard(log_dir=".logdir/")
    reduce_plateau = ReduceLROnPlateau(monitor="val_loss",
                                       factor=0.3,
                                       patience=1,
                                       min_lr=0.00001)
    val_generator = DataGenerator(partition['validation'],
                                  val_labels,
                                  batch_size=12)
    train_generator = DataGenerator(partition['train'],
                                    train_labels,
Exemplo n.º 22
0
           padding="same",
           activation="linear",
           kernel_initializer="he_normal"))
model.add(TimeDistributed(Dense(4, activation="softmax")))

np.random.seed(1000)
model.compile(optimizer="adam",
              loss="categorical_crossentropy",
              metrics=['accuracy'])
stop_criteria = EarlyStopping(monitor="val_loss",
                              mode="min",
                              verbose=1,
                              patience=10)
best_model_path = "./my_model" + ".h5"
best_model = ModelCheckpoint(best_model_path,
                             monitor="val_loss",
                             verbose=2,
                             save_best_only=True)
my_model = load_model("./my_model.h5")
my_model.fit(all_one_hot_x_train,
             all_one_hot_y_train,
             epochs=20,
             batch_size=20,
             validation_split=0.05,
             callbacks=[stop_criteria])

evaluation = model.evaluate(all_one_hot_x_train, all_one_hot_y_train)
print(evaluation)

predictions = my_model.predict(all_one_hot_x_test)

tp = 0
# Optional additional model layer to make a deep network. If you want to use this, uncomment #return_sequences param in previous add
"""
model.add(LSTM(units = 25, activation = 'relu'))
model.add(Dropout(0.2))
"""

# Output layer
model.add(Dense(1))
model.compile(loss='mean_squared_error', optimizer='adam')

# Save models
filepath = 'saved_models/model_epoch_{epoch:02d}.hdf5'

checkpoint = ModelCheckpoint(filepath=filepath,
                             monitor='val_loss',
                             verbose=1,
                             save_best_only=True,
                             mode='min')

history = model.fit(X_train,
                    Y_train,
                    epochs=100,
                    batch_size=20,
                    validation_data=(X_test, Y_test),
                    callbacks=[checkpoint],
                    verbose=1,
                    shuffle=False)
"""
Loading the best model and predicting
"""
from keras.models import load_model
Exemplo n.º 24
0
def train(args):
    data_path = args.data
    epochs = args.epochs
    early_stop = args.early_stop
    batch_size = args.batch_size
    weights = args.weights

    model = model_architecture()
    model.summary()

    data_gen = ImageDataGenerator(rescale=1. / 255)
    train_it = data_gen.flow_from_directory(data_path + '/train',
                                            target_size=(image_shape,
                                                         image_shape),
                                            batch_size=batch_size,
                                            class_mode='binary')
    val_it = data_gen.flow_from_directory(data_path + '/val',
                                          target_size=(image_shape,
                                                       image_shape),
                                          batch_size=20,
                                          class_mode='binary')
    test_it = data_gen.flow_from_directory(data_path + '/test',
                                           target_size=(image_shape,
                                                        image_shape),
                                           batch_size=1,
                                           class_mode='binary',
                                           shuffle=False)

    model.compile(loss='binary_crossentropy',
                  optimizer=Adam(lr=0.0001),
                  metrics=['accuracy'])

    if weights is not None:
        model.load_weights(weights)

    try:
        os.mkdir('logs')
    except:
        pass
    filepath = "logs/weights-improvement-{epoch:02d}-{val_loss:.2f}.hdf5"
    checkpoint = ModelCheckpoint(filepath, period=2)
    callbacks = [checkpoint]
    if early_stop:
        early_stopping = EarlyStopping(patience=2, restore_best_weights=True)
        callbacks.append(early_stopping)

    steps = 375 // batch_size

    model.fit_generator(train_it,
                        steps_per_epoch=steps,
                        epochs=epochs,
                        callbacks=callbacks,
                        validation_data=val_it,
                        validation_steps=3)

    os.mkdir('model')
    model_json = model.to_json()
    with open("model/model_final.json", "w") as json_file:
        json_file.write(model_json)

    model.save_weights('model/model_final.hdf5')

    print("MODEL TRAINED")

    test_loss, test_acc = model.evaluate_generator(test_it)
    print("Test Results: ")
    print("Loss: " + str(test_loss))
    print("Test: " + str(test_acc))
Exemplo n.º 25
0
model.add(Conv2D(64, (3, 3), activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(128, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(num_classes, activation='softmax'))

model.compile(loss=keras.losses.categorical_crossentropy,
              optimizer=keras.optimizers.Adadelta(),
              metrics=['accuracy'])

# add checkpoint to save model with lowest val loss
filepath = 'tf1_mnist_cnn.hdf5'
save_checkpoint = ModelCheckpoint(filepath, monitor='val_loss', verbose=2, \
                             save_best_only=False, save_weights_only=False, \
                             mode='auto', period=1)

# verbose=2 is important; it writes one line per epoch
# verbose=1 is the 'live monitoring' version, which doesn't work with slurm
model.fit(x_train,
          y_train,
          batch_size=batch_size,
          epochs=epochs,
          verbose=2,
          validation_data=(x_test, y_test),
          callbacks=[save_checkpoint])

score = model.evaluate(x_test, y_test, verbose=0)
print('Test loss:', score[0])
print('Test accuracy:', score[1])
Exemplo n.º 26
0
valid_x_series = np.load('./data/valid_series.npy')

train_y = to_categorical(pd.read_csv(train_y_path)['roas'].values)
valid_y = to_categorical(pd.read_csv('./data/validset.csv')['roas'].values)

# build model and train
seq_shape = train_x_series.shape[1:]

model = build_lstm(seq_shape, n_classes, hidden_units, lr)

model_json = model.to_json()
with open(model_json_path, 'w') as json_file:
    json_file.write(model_json)

ckpt = ModelCheckpoint(filepath=model_weights_path,
                       verbose=1,
                       save_best_only=True)

history = model.fit(train_x_series,
                    train_y,
                    batch_size=batch_size,
                    epochs=epochs,
                    validation_data=(valid_x_series, valid_y),
                    callbacks=[ckpt])

# plot learning curve
f = plt.figure()
ax = f.add_subplot(111)
plt.plot(history.history['acc'])
plt.plot(history.history['val_acc'])
plt.title('model accuracy')
    # compiling with adam optimizer & categorical_crossentropy loss function
    model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy'])
    return model

#load model
model = build_model()

#using data aumentagtion to generate more data
datagen = ImageDataGenerator(
    rotation_range=30,
    width_shift_range=0.1,
    height_shift_range=0.1,
    zoom_range = 1.2)

#saving the model with best validation accuracy
checkpointer = ModelCheckpoint(filepath='trained_model.hdf5', verbose=1, save_best_only=True, monitor='val_accuracy' )

# fitting the model on batches with real-time data augmentation:
history = model.fit_generator(datagen.flow(train_data, train_labels_cat, batch_size=batch_size),
                    steps_per_epoch=len(train_data) / batch_size,
                    epochs=num_epochs,
                    validation_data=(val_data,val_labels_cat),
                    callbacks=[checkpointer])

val_loss, val_accuracy = model.evaluate(val_data, val_labels_cat, batch_size=batch_size)
print('Val loss: %.3f accuracy: %.3f' % (val_loss, val_accuracy))

#plotting confusion matrix
val_pred = model.predict(val_data)
y_predicted = np.argmax(val_pred, axis=1)
y_true = np.argmax(val_labels_cat,axis=1)
Exemplo n.º 28
0
model.compile(loss='binary_crossentropy',
              optimizer=Adam(lr=100e-6),
              metrics=['acc'])

history = model.fit(
    x=train_X,
    y=train_data[:, -1],
    epochs=150,
    batch_size=1024,
    validation_data=(validation_X, validation_data[:, -1]),
    callbacks=[
        ModelCheckpoint(
            "mobileNetV2Top_is_lungs_{val_acc:.4f}_{val_loss:.4f}.h5",
            save_best_only=False,
            monitor='val_acc',
            verbose=0,
            mode='auto',
            period=1)
    ],
    verbose=2)

plt.plot(history.history['acc'])
plt.plot(history.history['val_acc'])
# plt.title('Dokładność modelu')
plt.ylabel('Dokładność modelu')
plt.xlabel('Epoka')
plt.legend(['Zbiór trenujący', 'Zbiór walidujący'], loc='upper left')
plt.show()

plt.plot(history.history['loss'])
    # the data for training is ssta and ha
    training_data, testing_data = file_helper_unformatted.load_sstha_for_conv2d(
        training_start, training_num)
    training_data = data_preprocess.data_preprocess(training_data, 0,
                                                    data_preprocess_method)
    data_x, data_y = data_preprocess.sequence_data(training_data,
                                                   input_length=time_step)
    print(data_x.shape, data_y.shape)

    # sc = np.linspace(0, 11, 12, dtype='int32')
    # sc = np.tile(sc, int((training_num-training_start)/12+1))
    # data_sc = sc[:(training_num-training_start)]
    # tesorboard = TensorBoard('..\..\model\\tensorboard\\' + model_name)
    save_best = ModelCheckpoint('..\..\model\\best\\' + model_name + '.h5',
                                monitor='val_root_mean_squared_error',
                                verbose=1,
                                save_best_only=True,
                                mode='min',
                                period=1)
    train_hist = model.fit(data_x,
                           data_y,
                           batch_size=batch_size,
                           epochs=epochs,
                           verbose=2,
                           callbacks=[save_best],
                           validation_split=0.1)

    # To save the model and logs
    # model.save('..\..\model\\' + model_name + '.h5')
    with open(file_helper_unformatted.find_logs_final(model_name + '_train'),
              'w') as f:
        f.write(str(train_hist.history))
Exemplo n.º 30
0
def model_selector(model_name, experiment_name, augment, augment_level,
                   batch_size, original_dataset_dir):

    run_directory = original_dataset_dir + "/" + experiment_name
    model = None
    if model_name == "base":
        model = generate_xBD_baseline_model(original_dataset_dir)
    if model_name == "small_cnn":
        model = generate_small_cnn()
    if model_name == "small_siamese_cnn":
        model = generate_small_siamese_cnn()
    if model_name == "siamese":
        model = generate_siamese_model()
    if model_name == "siamese_2":
        model = generate_small_weight_siamese_cnn()
    if model_name == "":
        model = None
    if model == None:
        return "Error!!!!"
    print(model.summary())
    train_datagen = None
    if augment == True:
        train_datagen = ImageDataGenerator(rescale=1. / 255,
                                           rotation_range=40,
                                           width_shift_range=0.2,
                                           height_shift_range=0.2,
                                           shear_range=0.2,
                                           zoom_range=0.2,
                                           horizontal_flip=True)
    else:
        train_datagen = ImageDataGenerator(rescale=1. / 255)

    test_datagen = ImageDataGenerator(rescale=1. / 255)

    #model=multi_gpu_model(model, gpus=1)
    try:
        model = multi_gpu_model(model)
    except:
        pass
    f1 = tfa.metrics.F1Score(num_classes=4, average="micro")
    model.compile(loss='categorical_crossentropy',
                  optimizer=optimizers.Adam(),
                  metrics=['acc', f1_score, precision_1, recall_1])
    csv_callback = CSVLogger(run_directory + "/log.csv",
                             separator=',',
                             append=False)
    chk_callback = ModelCheckpoint(run_directory + "/model",
                                   monitor='val_acc',
                                   verbose=1,
                                   save_best_only=True,
                                   save_weights_only=False,
                                   mode='auto',
                                   period=1)

    train_csv = pd.read_csv(original_dataset_dir +
                            "/polygon_csv/new_all_balanced_train.csv")
    valid_csv = pd.read_csv(original_dataset_dir +
                            "/polygon_csv/new_all_balance_valid.csv")
    test_csv = pd.read_csv(original_dataset_dir +
                           "/polygon_csv/new_all_balanced_test.csv")

    df_train = generate_csv(train_csv)
    df_valid = generate_csv(valid_csv)
    df_test = generate_csv(test_csv)
    #print(df_valid["labels"])
    #df_train=df_train[(df_train["labels"]==str(0) )| (df_train["labels"]==str(3))]
    #df_valid=df_valid[(df_valid["labels"]==str(0))|(df_valid["labels"]==str(3))]
    #df_test=df_test[(df_test["labels"]==str(0))|(df_test["labels"]==str(3))]

    print("\n\n\n\n\n")
    print(df_train.shape)
    train_generator = None
    valid_generator = None
    test_generator = None
    full_test_generator = None
    if model_name == "siamese" or model_name == "small_siamese_cnn" or model_name == "siamese_2":
        train_generator = generator_siamese(
            train_datagen, df_train, batch_size, 128, 128,
            original_dataset_dir + "/balanced_data/")
        valid_generator = generator_siamese(
            test_datagen, df_valid, batch_size, 128, 128,
            original_dataset_dir + "/balanced_data/")
        test_generator = generator_siamese(
            test_datagen, df_test, batch_size, 128, 128,
            original_dataset_dir + "/balanced_data")

        print("\n\nsiamese generator\n\n")

        history = model.fit(train_generator,
                            steps_per_epoch=df_train.shape[0] //
                            (batch_size * 1) * augment_level,
                            epochs=100,
                            validation_data=valid_generator,
                            validation_steps=df_valid.shape[0] // (batch_size),
                            callbacks=[csv_callback, chk_callback],
                            verbose=1)

        evaluation = model.evaluate(test_generator,
                                    steps=df_test.shape[0] // (batch_size * 1),
                                    verbose=1)
        print(dict(zip(model.metrics_names, evaluation)))
    if model_name == "base" or model_name == "small_cnn":
        history = model.fit(train_datagen.flow_from_dataframe(
            class_mode="categorical",
            batch_size=batch_size,
            dataframe=df_train,
            directory=original_dataset_dir + "/balanced_data/",
            x_col="uuid_post",
            y_col="labels",
            target_size=(128, 128),
            seed=47,
            shuffle=False,
            validate_filenames=False),
                            steps_per_epoch=df_train.shape[0] //
                            (batch_size * 1) * augment_level,
                            epochs=100,
                            validation_data=test_datagen.flow_from_dataframe(
                                class_mode="categorical",
                                batch_size=batch_size,
                                dataframe=df_valid,
                                directory=original_dataset_dir +
                                "/balanced_data/",
                                x_col="uuid_post",
                                y_col="labels",
                                target_size=(128, 128),
                                seed=47,
                                shuffle=False,
                                validate_filenames=False),
                            validation_steps=df_valid.shape[0] // batch_size,
                            callbacks=[csv_callback, chk_callback],
                            verbose=1)

        csv_callback_evaluate = CSVLogger(run_directory + "/log.csv",
                                          separator=',',
                                          append=False)
        evaluation = model.evaluate(test_datagen.flow_from_dataframe(
            class_mode="categorical",
            batch_size=batch_size,
            dataframe=df_test,
            directory=original_dataset_dir + "/balanced_data/",
            x_col="uuid_post",
            y_col="labels",
            target_size=(128, 128),
            seed=47,
            shuffle=False),
                                    steps=df_test.shape[0] // (batch_size * 1),
                                    verbose=1)
    f = open(run_directory + "/test.txt")
    f.write(dict(zip(model.metrics_names, evaluation)))
    f.close()

    return "Program conpleted"