self.model_to_save = model def on_epoch_end(self, epoch, logs=None): fmt = checkpoint_models_path + 'final.%02d-%.4f.hdf5' self.model_to_save.save(fmt % (epoch, logs['val_loss'])) # Load our model, added support for Multi-GPUs num_gpu = len(get_available_gpus()) if num_gpu >= 2: with tf.device("/cpu:0"): model = build_encoder_decoder() model = build_refinement(model) if pretrained_path is not None: model.load_weights(pretrained_path) else: migrate_model(model) final = multi_gpu_model(model, gpus=num_gpu) # rewrite the callback: saving through the original model and not the multi-gpu model. model_checkpoint = MyCbk(model) else: model = build_encoder_decoder() final = build_refinement(model) if pretrained_path is not None: final.load_weights(pretrained_path) else: migrate_model(final) decoder_target = tf.placeholder(dtype='float32', shape=(None, None, None, None)) final.compile(optimizer='nadam',
model_checkpoint = ModelCheckpoint(model_names, monitor='val_loss', verbose=1, save_best_only=True) early_stop = EarlyStopping('val_loss', patience=patience) reduce_lr = ReduceLROnPlateau('val_loss', factor=0.1, patience=int(patience / 4), verbose=1) if pretrained_path is not None: model = build_encoder_decoder() model.load_weights(pretrained_path) else: model = build_encoder_decoder() migrate.migrate_model(model) model.compile(optimizer='nadam', loss=depth_loss) print(model.summary()) # Final callbacks callbacks = [tensor_board, model_checkpoint, early_stop, reduce_lr] batch_size = 14 # Start Fine-tuning model.fit_generator(train_gen(batch_size), steps_per_epoch=num_train_samples // batch_size, validation_data=valid_gen(batch_size), validation_steps=num_valid_samples // batch_size,