help='Start training from an existing model.') args = parser.parse_args() # Create the model model = create_model(existing=args.checkpoint) # Inform about multi-gpu training if args.gpus == 1: os.environ['CUDA_VISIBLE_DEVICES'] = args.gpuids print('Will use GPU ' + args.gpuids) else: print('Will use ' + str(args.gpus) + ' GPUs.') # Data loaders if args.data == 'nyu': train_generator, test_generator = get_nyu_train_test_data(args.bs) # Training session details runID = str(int(time.time())) + '-n' + str(len(train_generator)) + '-e' + str(args.epochs) + \ '-bs' + str(args.bs) + '-lr' + str(args.lr) + '-' + args.name outputPath = './models/' runPath = outputPath + runID pathlib.Path(runPath).mkdir(parents=True, exist_ok=True) pathlib.Path(runPath + '/samples').mkdir(parents=True, exist_ok=True) print('Output: ' + runPath) # (optional steps) if True: # Keep a copy of this training script and calling arguments with open(__file__, 'r') as training_script: training_script_content = training_script.read()
default=None, help='Use non-uniform sampling') args = parser.parse_args() # Inform about multi-gpu training if args.gpus == 1: os.environ['CUDA_VISIBLE_DEVICES'] = args.gpuids print('Will use GPU ' + args.gpuids) else: print('Will use ' + str(args.gpus) + ' GPUs.') loss = depth_loss_function # Data loaders if args.data == 'nyu': train_generator, test_generator = get_nyu_train_test_data( args.bs, nus=args.nus_smooth) if args.data == 'unreal': train_generator, test_generator = get_unreal_train_test_data(args.bs) if args.data == 'megadepth': train_generator, test_generator = get_megadepth_train_test_data(args.bs) loss = noisy_depth_loss_function # Create the model model = create_model(existing=args.checkpoint) # Training session details runID = str(int(time.time())) + '-n' + str(len(train_generator)) + '-e' + str( args.epochs) + '-bs' + str(args.bs) + '-lr' + str( args.lr) + '-' + args.name outputPath = './models/' runPath = outputPath + runID
# Create the model if args.resnet50: # if want a resnet model model = create_model_resnet(existing=args.checkpoint) elif args.efficientnet: model = create_model_efficientnet(existing=args.checkpoint) else: # choose densetnet model if args.dnetVersion == 'small': model = create_model(existing=args.checkpoint, is121 = True) if args.dnetVersion == 'medium': model = create_model( existing=args.checkpoint ) if args.dnetVersion == 'large': model = create_model( existing=args.checkpoint, is_twohundred=True ) # Data loaders if args.data == 'nyu': train_generator, test_generator = get_nyu_train_test_data( args.bs ) if args.data == 'redweb': train_generator, test_generator = get_redweb_train_test_data( args.bs ) # Training session details runID = str(int(time.time())) + '-n' + str(len(train_generator)) + '-e' + str(args.epochs) + '-bs' + str(args.bs) + '-lr' + str(args.lr) + '-' + args.name outputPath = './models/' runPath = outputPath + runID pathlib.Path(runPath).mkdir(parents=True, exist_ok=True) print('Output: ' + runPath) # Multi-gpu setup: basemodel = model if args.gpus > 1: model = multi_gpu_model(model, gpus=args.gpus) # Optimizer optimizer = Adam(lr=args.lr, amsgrad=True)
# Custom object needed for inference and training custom_objects = {'BilinearUpSampling2D': BilinearUpSampling2D, 'depth_loss_function': None} print('Loading model...') # Load model into GPU / CPU model = load_model(args.model, custom_objects=custom_objects, compile=False) print('\nModel loaded ({0}).'.format(args.model)) # Input images # inputs = load_images( glob.glob(args.input) ) # print('\nLoaded ({0}) images of size {1}.'.format(inputs.shape[0], inputs.shape[1:])) traingen,_ = get_nyu_train_test_data(4) inputs=traingen[0][0]/255 # Compute results outputs = predict(model, inputs) #matplotlib problem on ubuntu terminal fix #matplotlib.use('TkAgg') # Display results viz = display_images(outputs.copy(), inputs.copy()) plt.figure(figsize=(10,5)) plt.imshow(viz) plt.savefig('test.png') plt.show()
#------------Test images snippet import glob image_list = glob.glob('*.jpg') test_images = load_images(image_list) print(test_images.shape) show_images(test_images) #------------ ''' Create Model with Decoder ''' model = create_model() ''' Create Train and Test Generators Returns Data_generator objects for Keras ''' train_generator, test_generator = get_nyu_train_test_data(BATCH_SIZE) print('\n\nGenerators Ready:', train_generator, test_generator) optimizer = keras.optimizers.Adam(lr=lr) model.compile(loss=depth_loss, optimizer=optimizer) print('\n\nModel Compiled. Ready to train.') model.fit_generator(train_generator, validation_data=test_generator, epochs=EPOCHS, shuffle=True) print('Finished Training. Running Inference')