Exemplo n.º 1
0
try:
    model.load_weights('LSTM+BN4--26--0.011.hdf5')
    print("...Previous weight data...")
except:
    print("...New weight data...")
    pass

train_file_path = './DB/train/' ##该目录下是指定好的图片,没有其他的文件夹
tiger_train = TextImageGenerator(train_file_path, img_w, img_h, batch_size, downsample_factor)
tiger_train.build_data()

valid_file_path = './DB/test/'##该目录下是指定好的图片,没有其他的文件夹
tiger_val = TextImageGenerator(valid_file_path, img_w, img_h, val_batch_size, downsample_factor)
tiger_val.build_data()

ada = Adadelta()

early_stop = EarlyStopping(monitor='loss', min_delta=0.001, patience=4, mode='min', verbose=1)
checkpoint = ModelCheckpoint(filepath='LSTM+BN5--{epoch:02d}--{val_loss:.3f}.hdf5', monitor='loss', verbose=1, mode='min', period=1)
# the loss calc occurs elsewhere, so use a dummy lambda func for the loss
model.compile(loss={'ctc': lambda y_true, y_pred: y_pred}, optimizer=ada)

# captures output of softmax so we can decode the output during visualization
model.fit_generator(generator=tiger_train.next_batch(),
                    steps_per_epoch=int(tiger_train.n / batch_size),
                    epochs=30,
                    callbacks=[checkpoint],
                    validation_data=tiger_val.next_batch(),
                    validation_steps=int(tiger_val.n / val_batch_size))
Exemplo n.º 2
0
                           patience=4,
                           mode='min',
                           verbose=1)
checkpoint = ModelCheckpoint(
    filepath='LSTM+BN7--{epoch:02d}--{val_loss:.3f}.hdf5',
    monitor='loss',
    verbose=1,
    mode='min',
    period=1)

# the loss calc occurs elsewhere, so use a dummy lambda func for the loss
model.compile(loss={'ctc': lambda y_true, y_pred: y_pred}, optimizer=ada)

# captures output of softmax so we can decode the output during visualization
seqModel = model.fit_generator(
    generator=tiger_train.next_batch(),
    steps_per_epoch=int(num_train_images / batch_size),
    epochs=15,
    callbacks=[checkpoint],
    validation_data=tiger_val.next_batch(),
    validation_steps=int(num_validation_images / val_batch_size))

train_loss = seqModel.history['loss']
val_loss = seqModel.history['val_loss']

ep = range(1, 16)

plt.figure()
plt.plot(ep, train_loss, color='blue')
plt.xlabel('Epoch')
plt.ylabel('train_loss')
Exemplo n.º 3
0
# valid_file_path = './DB/test/'
# tiger_val = TextImageGenerator(valid_file_path, img_w, img_h, val_batch_size, downsample_factor)
# tiger_val.build_data()

ada = tf.keras.optimizers.Adam()

early_stop = EarlyStopping(monitor='loss',
                           min_delta=0.001,
                           patience=4,
                           mode='min',
                           verbose=1)
checkpoint = ModelCheckpoint(
    filepath='LSTM+BN5--{epoch:02d}--{val_loss:.3f}.hdf5',
    monitor='loss',
    verbose=1,
    mode='min',
    period=1)
# the loss calc occurs elsewhere, so use a dummy lambda func for the loss
model.compile(loss={'ctc': lambda y_true, y_pred: y_pred}, optimizer=ada)
model.summary()

# captures output of softmax so we can decode the output during visualization
model.fit_generator(
    generator=tiger_train.next_batch(),
    steps_per_epoch=100,  #int(tiger_train.n / batch_size),
    epochs=60,
    callbacks=[checkpoint],
    validation_data=tiger_train.next_batch_val(),
    validation_steps=20,
)  #int(tiger_train.n / val_batch_size))
Exemplo n.º 4
0
    
    print("...Previous weight data...")
except:
    print("...New weight data...")
    pass

train_file_path = './DB/synt_new/'
tiger_train = TextImageGenerator(train_file_path, img_w, img_h, batch_size, downsample_factor)
tiger_train.build_data()

#valid_file_path = './DB/test/'
#tiger_val = TextImageGenerator(valid_file_path, img_w, img_h, val_batch_size, downsample_factor)
#tiger_val.build_data()

ada = Adadelta()

early_stop = EarlyStopping(monitor='loss', min_delta=0.001, patience=4, mode='min', verbose=1)
#checkpoint = ModelCheckpoint(filepath='LSTM+BN5--{epoch:02d}--{val_loss:.3f}.hdf5', monitor='loss', verbose=1, mode='min', period=1)
checkpoint = ModelCheckpoint(filepath='new_model.hdf5', monitor='loss', verbose=1, mode='min', period=1)
# the loss calc occurs elsewhere, so use a dummy lambda func for the loss
model.compile(loss={'ctc': lambda y_true, y_pred: y_pred}, optimizer=ada)

# captures output of softmax so we can decode the output during visualization
model.fit_generator(generator=tiger_train.next_batch(),
                    steps_per_epoch=int(tiger_train.n / batch_size),
                    epochs=30,
                    callbacks=[checkpoint])
                    #validation_data=tiger_val.next_batch(),
                    #validation_steps=2)
                    #validation_steps=int(tiger_val.n / val_batch_size))