Esempio n. 1
0
from parameter import *
K.set_learning_phase(0)

# # Model description and training

model = get_Model(training=True)

try:
    model.load_weights('LSTM+BN4--26--0.011.hdf5')
    print("...Previous weight data...")
except:
    print("...New weight data...")
    pass

train_file_path = './DB/train/' ##该目录下是指定好的图片,没有其他的文件夹
tiger_train = TextImageGenerator(train_file_path, img_w, img_h, batch_size, downsample_factor)
tiger_train.build_data()

valid_file_path = './DB/test/'##该目录下是指定好的图片,没有其他的文件夹
tiger_val = TextImageGenerator(valid_file_path, img_w, img_h, val_batch_size, downsample_factor)
tiger_val.build_data()

ada = Adadelta()

early_stop = EarlyStopping(monitor='loss', min_delta=0.001, patience=4, mode='min', verbose=1)
checkpoint = ModelCheckpoint(filepath='LSTM+BN5--{epoch:02d}--{val_loss:.3f}.hdf5', monitor='loss', verbose=1, mode='min', period=1)
# the loss calc occurs elsewhere, so use a dummy lambda func for the loss
model.compile(loss={'ctc': lambda y_true, y_pred: y_pred}, optimizer=ada)

# captures output of softmax so we can decode the output during visualization
model.fit_generator(generator=tiger_train.next_batch(),
Esempio n. 2
0
# # Model description and training

model = get_Model(training=True)

model.summary()

try:
    model.load_weights('LSTM+BN4--50--0.056.hdf5')
    print("...Previous weight data...")
except:
    print("...New weight data...")
    pass

input_shape = (img_w, img_h)

tiger_train = TextImageGenerator(batch_size, input_shape)

tiger_val = TextImageGenerator(batch_size, input_shape)

ada = Adadelta()

early_stop = EarlyStopping(monitor='loss',
                           min_delta=0.001,
                           patience=4,
                           mode='min',
                           verbose=1)
checkpoint = ModelCheckpoint(
    filepath='LSTM+BN7--{epoch:02d}--{val_loss:.3f}.hdf5',
    monitor='loss',
    verbose=1,
    mode='min',
Esempio n. 3
0
K.set_learning_phase(0)

# # Model description and training

model = get_Model(training=True)

try:
    model.load_weights('LSTM+BN5--01--33.762.hdf5')
    print("...Previous weight data...")
except:
    print("...New weight data...")
    pass

train_file_path = './DB'
tiger_train = TextImageGenerator(train_file_path, img_w, img_h, batch_size,
                                 downsample_factor)
tiger_train.build_data()

# valid_file_path = './DB/test/'
# tiger_val = TextImageGenerator(valid_file_path, img_w, img_h, val_batch_size, downsample_factor)
# tiger_val.build_data()

ada = tf.keras.optimizers.Adam()

early_stop = EarlyStopping(monitor='loss',
                           min_delta=0.001,
                           patience=4,
                           mode='min',
                           verbose=1)
checkpoint = ModelCheckpoint(
    filepath='LSTM+BN5--{epoch:02d}--{val_loss:.3f}.hdf5',
Esempio n. 4
0
model = get_Model(training=True)

try:
    model.load_weights('./model_OCR/LSTM+BN5--08--1.671.hdf5')
    print("...Previous weight data...")
except:
    print("...New weight data...")
    pass
print(model.summary())

json_train_path = './train_all.json'
tiger_train = TextImageGenerator(json_train_path,
                                 img_w,
                                 img_h,
                                 batch_size,
                                 'train',
                                 downsample_factor,
                                 max_text_len=max_text_len)
#tiger_train.build_data()

json_val_path = './val_all.json'
tiger_val = TextImageGenerator(json_val_path,
                               img_w,
                               img_h,
                               val_batch_size,
                               'val',
                               downsample_factor,
                               max_text_len=max_text_len)
#tiger_val.build_data()
print('>>>>>>>>>>>>>>>>>', val_batch_size)