model.layers[i].trainable = False if True: #--------------------------------------------# # BATCH_SIZE不要太小,不然训练效果很差 #--------------------------------------------# BATCH_SIZE = 4 Lr = 1e-3 Init_Epoch = 0 Freeze_Epoch = 50 gen = Generator(bbox_util, BATCH_SIZE, lines[:num_train], lines[num_train:], (image_sizes[phi], image_sizes[phi]), NUM_CLASSES) model.compile(loss={ 'regression': smooth_l1(), 'classification': focal() }, optimizer=keras.optimizers.Adam(Lr)) print('Train on {} samples, val on {} samples, with batch size {}.'. format(num_train, num_val, BATCH_SIZE)) model.fit(gen.generate(True, eager=False), steps_per_epoch=max(1, num_train // BATCH_SIZE), validation_data=gen.generate(False, eager=False), validation_steps=max(1, num_val // BATCH_SIZE), epochs=Freeze_Epoch, verbose=1, initial_epoch=Init_Epoch, callbacks=[logging, checkpoint, reduce_lr, early_stopping]) for i in range(freeze_layers[phi]): model.layers[i].trainable = True
epoch_size = num_train // BATCH_SIZE epoch_size_val = num_val // BATCH_SIZE lr_schedule = tf.keras.optimizers.schedules.ExponentialDecay( initial_learning_rate=Lr, decay_steps=epoch_size, decay_rate=0.95, staircase=True) print('Train on {} samples, val on {} samples, with batch size {}.'. format(num_train, num_val, BATCH_SIZE)) optimizer = tf.keras.optimizers.Adam(learning_rate=lr_schedule) for epoch in range(Init_Epoch, Freeze_Epoch): fit_one_epoch(model, focal(), smooth_l1(), optimizer, epoch, epoch_size, epoch_size_val, gen, gen_val, Freeze_Epoch, get_train_step_fn()) for i in range(freeze_layers[phi]): model.layers[i].trainable = True if True: #--------------------------------------------# # BATCH_SIZE不要太小,不然训练效果很差 #--------------------------------------------# BATCH_SIZE = 4 Lr = 5e-5 Freeze_Epoch = 50 Epoch = 100