# Multi-gpu setup: basemodel = model if args.gpus > 1: model = multi_gpu_model(model, gpus=args.gpus) # Optimizer optimizer = Adam(lr=args.lr, amsgrad=True) # Compile the model print( '\n\n\n', 'Compiling model..', runID, '\n\n\tGPU ' + (str(args.gpus) + ' gpus' if args.gpus > 1 else args.gpuids) + '\t\tBatch size [ ' + str(args.bs) + ' ] ' + ' \n\n') model.compile(loss=depth_loss_function, optimizer=optimizer) print('Ready for training!\n') # Callbacks if args.data == 'nyu': callbacks = get_nyu_callbacks(model, basemodel, train_generator, test_generator, runPath) # Start training model.fit_generator(train_generator, callbacks=callbacks, validation_data=test_generator, epochs=args.epochs, shuffle=True) # Save the final trained model: basemodel.save(runPath + '/model.h5')
# Compile the model print( '\n\n\n', 'Compiling model..', runID, '\n\n\tGPU ' + (str(args.gpus) + ' gpus' if args.gpus > 1 else args.gpuids) + '\t\tBatch size [ ' + str(args.bs) + ' ] ' + ' \n\n') model.compile(loss=depth_loss_function, optimizer=optimizer) print('Ready for training!\n') # Callbacks # 訓練中にモデル内部の状態と統計量を可視化する際に,コールバックを使う # ここではTensorBoardを用いて訓練とテストの評価値を動的にグラフ化し,可視化する。 callbacks = [] if args.data == 'nyu': callbacks = get_nyu_callbacks(model, basemodel, train_generator, test_generator, load_test_data() if args.full else None, runPath) if args.data == 'unreal': callbacks = get_nyu_callbacks(model, basemodel, train_generator, test_generator, load_test_data() if args.full else None, runPath) if args.data == 'eyemodel': callbacks = get_eyemodel_callbacks(model, basemodel, train_generator, test_generator, load_test_data() if args.full else None, runPath, minDepth=args.mindepth, maxDepth=args.maxdepth,
rfanet_x = RFDN() x = Input(shape=(120, 160, 3)) out = rfanet_x(x) parallel_model = Model(inputs=x, outputs=out) parallel_model.compile(loss=depth_loss_function, optimizer=optimizer, metrics=metrics) parallel_model.summary() print('Ready for training!\n') # Callbacks callbacks = get_nyu_callbacks(parallel_model, train_generator, val_generator, runPath, totaL_epochs=args.epochs, warmup_epoch=5, batch_size=args.bs, lr=args.lr, val_loss="val_loss_sirmse_baseline") # Start training parallel_model.fit(train_generator, validation_data=val_generator, callbacks=callbacks, epochs=args.epochs, shuffle=True, batch_size=args.bs, verbose=1)
optimizer = Adam(lr=args.lr, amsgrad=True) # Compile the model print( '\n\n\n', 'Compiling model..', runID, '\n\n\tGPU ' + (str(args.gpus) + ' gpus' if args.gpus > 1 else args.gpuids) + '\t\tBatch size [ ' + str(args.bs) + ' ] ' + ' \n\n') model.compile(loss=loss, optimizer=optimizer) print('Ready for training!\n') # Callbacks callbacks = [] if args.data == 'nyu': callbacks = get_nyu_callbacks(model, basemodel, train_generator, test_generator, load_test_data() if args.full else None, runPath) if args.data == 'unreal': callbacks = get_nyu_callbacks(model, basemodel, train_generator, test_generator, load_test_data() if args.full else None, runPath) if args.data == 'megadepth': callbacks = get_nyu_callbacks( model, basemodel, train_generator, test_generator, load_test_data() if args.full else None, runPath, depth_norm=lambda x: (x - np.min(x)) / (1e-10 + np.max(x) - np.min(x)),
# Compile the model print( '\n\n\n', 'Compiling model..', runID, '\n\n\tGPU ' + (str(args.gpus) + ' gpus' if args.gpus > 1 else args.gpuids) + '\t\tBatch size [ ' + str(args.bs) + ' ] ' + ' \n\n') model.compile(loss=depth_loss_function, optimizer=optimizer) print('Ready for training!\n') # Callbacks callbacks = [] if args.data == 'nyu': callbacks = get_nyu_callbacks(model, basemodel, train_generator, test_generator, load_test_data() if args.full else None, runPath, save_checkpoint_period=args.save_period) if args.data == 'unreal': callbacks = get_nyu_callbacks(model, basemodel, train_generator, test_generator, load_test_data() if args.full else None, runPath) if args.data == 'own': callbacks = get_nyu_callbacks(model, basemodel, train_generator, test_generator, load_test_data() if args.full else None, runPath,