Пример #1
0
def train_val(model, base_model):

    train_gen = sample_gen(file_id_mapping_train)
    # print gen(train_gen, batch_size).next()
    test_gen = sample_gen(file_id_mapping_test)

    checkpoint = ModelCheckpoint(file_path,
                                 monitor='val_loss',
                                 verbose=1,
                                 save_best_only=True,
                                 mode='min')
    early = EarlyStopping(monitor="val_loss", mode="min", patience=5)
    reduce_lr = ReduceLROnPlateau(monitor='val_loss', factor=0.5, patience=3)
    callbacks_list = [checkpoint, early, reduce_lr]  # early

    history = model.fit_generator(gen(train_gen, batch_size),
                                  validation_data=gen(test_gen, batch_size),
                                  epochs=60,
                                  verbose=1,
                                  workers=4,
                                  use_multiprocessing=True,
                                  callbacks=callbacks_list,
                                  steps_per_epoch=500,
                                  validation_steps=30)

    # model.compile(loss=identity_loss, optimizer=SGD(0.000001))
    # history = model.fit_generator(gen(train_gen, batch_size), validation_data=gen(test_gen, batch_size), epochs=60, verbose=1, workers=4, use_multiprocessing=True,
    #                               callbacks=callbacks_list, steps_per_epoch=500, validation_steps=30)

    # return
    file_name = file_path
    for i in xrange(1, 10):
        train_file_distance = gen_distance(base_model, file_id_mapping_train,
                                           i)
        test_file_distance = gen_distance(base_model, file_id_mapping_test, i)
        train_gen = hard_sample_gen(train_file_distance)
        test_gen = hard_sample_gen(test_file_distance)

        model, base_model = build_model()
        model = multi_gpu_model(model, gpus=4)
        model.compile(loss=identity_loss, optimizer=Adam(0.000001))
        model.load_weights(file_name)
        file_name = 'hard_{}.h5'.format(i)

        checkpoint = ModelCheckpoint(file_name,
                                     monitor='val_loss',
                                     verbose=1,
                                     save_best_only=True,
                                     mode='min')
        early = EarlyStopping(monitor="val_loss", mode="min", patience=15)
        history = model.fit_generator(gen(train_gen, batch_size),
                                      validation_data=gen(
                                          test_gen, batch_size),
                                      epochs=60,
                                      verbose=1,
                                      workers=4,
                                      use_multiprocessing=True,
                                      steps_per_epoch=500,
                                      validation_steps=30,
                                      callbacks=[checkpoint, early])
Пример #2
0
def train_all():

    reduce_lr = ReduceLROnPlateau(monitor='loss', factor=0.5, patience=3)

    callbacks_list = [reduce_lr]  # early

    history = model.fit_generator(gen(all_gen, batch_size),
                                  epochs=60,
                                  verbose=1,
                                  workers=4,
                                  use_multiprocessing=True,
                                  callbacks=callbacks_list,
                                  steps_per_epoch=500)

    model.compile(loss=identity_loss, optimizer=SGD(0.000001))
    history = model.fit_generator(gen(all_gen, batch_size),
                                  epochs=60,
                                  verbose=1,
                                  workers=4,
                                  use_multiprocessing=True,
                                  callbacks=callbacks_list,
                                  steps_per_epoch=500)

    model.save('vgg_all.h5')
Пример #3
0
    emb_matrix = data_utils.load_glove_embedding(glove_path, vocab)

    K.clear_session()
    model = models.build_glove_model(p,
                                     len(vocab),
                                     emb_matrix,
                                     glove_trainable=args.ft,
                                     drop_rate=args.drop)

    if last_weight:
        print('Loading weight :', last_weight)
        model.load_weights(last_weight)

    train_gen = data_utils.gen(MODEL_NAME,
                               p,
                               train_df,
                               vocab,
                               batch_size=BATCH_SIZE)
    val_gen = data_utils.gen(MODEL_NAME,
                             p,
                             val_df,
                             vocab,
                             batch_size=BATCH_SIZE,
                             shuffle=False)

    train_steps = np.ceil(len(train_df) / BATCH_SIZE)
    val_steps = np.ceil(len(val_df) / BATCH_SIZE)

    print(train_steps, val_steps)

    callbacks = [
Пример #4
0
# reduce_lr = ReduceLROnPlateau(monitor='val_loss', factor=0.5, patience=3)

# callbacks_list = [checkpoint, early, reduce_lr]  # early

# history = model.fit_generator(gen(train_gen, batch_size), validation_data=gen(test_gen, batch_size), epochs=60, verbose=1, workers=4, use_multiprocessing=True,
#                               callbacks=callbacks_list, steps_per_epoch=500, validation_steps=30)

# model.compile(loss=identity_loss, optimizer=SGD(0.000001))
# history = model.fit_generator(gen(train_gen, batch_size), validation_data=gen(test_gen, batch_size), epochs=60, verbose=1, workers=4, use_multiprocessing=True,
#                               callbacks=callbacks_list, steps_per_epoch=500, validation_steps=30)

reduce_lr = ReduceLROnPlateau(monitor='loss', factor=0.5, patience=3)

callbacks_list = [reduce_lr]  # early

history = model.fit_generator(gen(all_gen, batch_size),
                              epochs=60,
                              verbose=1,
                              workers=4,
                              use_multiprocessing=True,
                              callbacks=callbacks_list,
                              steps_per_epoch=500)

model.compile(loss=identity_loss, optimizer=SGD(0.000001))
history = model.fit_generator(gen(all_gen, batch_size),
                              epochs=60,
                              verbose=1,
                              workers=4,
                              use_multiprocessing=True,
                              callbacks=callbacks_list,
                              steps_per_epoch=500)
Пример #5
0
        glove_path = 'glove/glove.6B.50d.txt'
        emb_matrix = data_utils.load_glove_embedding(glove_path, vocab)
        model = models.build_glove_model(p,
                                         len(vocab),
                                         emb_matrix,
                                         glove_trainable=args.ft,
                                         drop_rate=args.drop,
                                         summary=False)

    print('Loading weight :', weight)
    model.load_weights(weight)

    test_gen = data_utils.gen(MODEL_NAME,
                              p,
                              test_df,
                              vocab,
                              batch_size=BATCH_SIZE,
                              test=True,
                              shuffle=False)

    test_steps = np.ceil(len(test_df) / BATCH_SIZE)

    print(test_gen, test_steps)

    y_true = data_utils.prepare_features(MODEL_NAME,
                                         df=test_df,
                                         prompt=p,
                                         vocab=vocab,
                                         y_only=True,
                                         norm=True)
Пример #6
0
    def on_epoch_end(self, epoch, logs):
        y_pred = self.model.predict_generator(
            gen(self.model_name, self.prompt, self.val_data, self.vocab, self.batch_size, test=True, shuffle=False), steps=self.steps, verbose=1)

        generate_qwk(self.prompt, self.model_name,
                     self.y_true, y_pred, epoch+1, 'val')