model = build_encoder_decoder() final = build_refinement(model) # if pretrained_path is not None: # final.load_weights(pretrained_path) if len(os.listdir(checkpoint_dir)) > 0: latest = tf.train.latest_checkpoint(checkpoint_dir) final.load_weights(latest) initial_epoch = get_initial_epoch(latest) else: migrate_model_2(final) initial_epoch = 0 final.compile(optimizer='nadam', loss=overall_loss) print(final.summary()) # keras.utils.plot_model(final, "model_modified.png") # Final callbacks callbacks = [tensor_board, model_checkpoint, early_stop, reduce_lr] # Start Fine-tuning final.fit(train_gen(), batch_size=4, validation_data=valid_gen(), epochs=epochs, verbose=1, callbacks=callbacks, initial_epoch=initial_epoch, use_multiprocessing=True, workers=2 )
final = multi_gpu_model(model, gpus=num_gpu) # rewrite the callback: saving through the original model and not the multi-gpu model. model_checkpoint = MyCbk(model) else: model = build_encoder_decoder() final = build_refinement(model) # if pretrained_path is not None: # final.load_weights(pretrained_path) if len(os.listdir(checkpoint_dir)) > 0: latest = tf.train.latest_checkpoint(checkpoint_dir) final.load_weights(latest) final.compile(optimizer='nadam', loss=overall_loss) print(final.summary()) # Final callbacks callbacks = [tensor_board, model_checkpoint, early_stop, reduce_lr] # Start Fine-tuning final.fit_generator(train_gen(), steps_per_epoch=num_train_samples // batch_size, validation_data=valid_gen(), validation_steps=num_valid_samples // batch_size, epochs=epochs, verbose=1, callbacks=callbacks, # use_multiprocessing=True, # workers=2 )