def scale_torch(img, scale): """ Scale the image and output it in torch.tensor. :param img: input image. [C, H, W] :param scale: the scale factor. float :return: img. [C, H, W] """ img = np.transpose(img, (2, 0, 1)) img = img[::-1, :, :] img = img.astype(np.float32) img /= scale img = torch.from_numpy(img.copy()) img = transforms.Normalize(cfg.DATASET.RGB_PIXEL_MEANS, cfg.DATASET.RGB_PIXEL_VARS)(img) return img model = MetricDepthModel() model.eval() #model.cuda() load_ckpt("/content/VNL_Monocular_Depth_Prediction/ResNext101_32x4d_NYU.pth", model) with torch.no_grad(): img = cv2.imread("/content/VNL_Monocular_Depth_Prediction/test_any_imgs_examples/107_r.png") img_resize = cv2.resize(img, (int(img.shape[1]), int(img.shape[0])), interpolation=cv2.INTER_LINEAR) img_torch = scale_torch(img_resize, 255) img_torch = img_torch[None, :, :, :].cpu() _, pred_depth_softmax= model.depth_model(img_torch) pred_depth = bins_to_depth(pred_depth_softmax) pred_depth = pred_depth.cpu().numpy().squeeze() pred_depth_scale = (pred_depth / pred_depth.max() * 60000).astype(np.uint16) # scale 60000 for visualization cv2.imwrite("depth.png", pred_depth_scale)
def test(model_path): test_args = TestOptions().parse() test_args.thread = 0 test_args.batchsize = 1 merge_cfg_from_file(test_args) data_loader = CustomerDataLoader(test_args) test_datasize = len(data_loader) logger.info('{:>15}: {:<30}'.format('test_data_size', test_datasize)) # load model model = MetricDepthModel() model.eval() test_args.load_ckpt = model_path # load checkpoint if test_args.load_ckpt: load_ckpt(test_args, model) model.cuda() # model = torch.nn.DataParallel(model) # test smoothed_absRel = SmoothedValue(test_datasize) smoothed_rms = SmoothedValue(test_datasize) smoothed_logRms = SmoothedValue(test_datasize) smoothed_squaRel = SmoothedValue(test_datasize) smoothed_silog = SmoothedValue(test_datasize) smoothed_silog2 = SmoothedValue(test_datasize) smoothed_log10 = SmoothedValue(test_datasize) smoothed_delta1 = SmoothedValue(test_datasize) smoothed_delta2 = SmoothedValue(test_datasize) smoothed_delta3 = SmoothedValue(test_datasize) smoothed_whdr = SmoothedValue(test_datasize) smoothed_criteria = { 'err_absRel': smoothed_absRel, 'err_squaRel': smoothed_squaRel, 'err_rms': smoothed_rms, 'err_silog': smoothed_silog, 'err_logRms': smoothed_logRms, 'err_silog2': smoothed_silog2, 'err_delta1': smoothed_delta1, 'err_delta2': smoothed_delta2, 'err_delta3': smoothed_delta3, 'err_log10': smoothed_log10, 'err_whdr': smoothed_whdr } for i, data in enumerate(data_loader): out = model.inference(data) pred_depth = torch.squeeze(out['b_fake']) img_path = data['A_paths'] invalid_side = data['invalid_side'][0] pred_depth = pred_depth[invalid_side[0]:pred_depth.size(0) - invalid_side[1], :] pred_depth = pred_depth / data['ratio'].cuda() # scale the depth pred_depth = resize_image(pred_depth, torch.squeeze(data['B_raw']).shape) smoothed_criteria = evaluate_err(pred_depth, data['B_raw'], smoothed_criteria, mask=(45, 471, 41, 601), scale=10.) # save images model_name = test_args.load_ckpt.split('/')[-1].split('.')[0] image_dir = os.path.join(cfg.ROOT_DIR, './evaluation', cfg.MODEL.ENCODER, model_name) if not os.path.exists(image_dir): os.makedirs(image_dir) img_name = img_path[0].split('/')[-1] #plt.imsave(os.path.join(image_dir, 'd_' + img_name), pred_depth, cmap='rainbow') #cv2.imwrite(os.path.join(image_dir, 'rgb_' + img_name), data['A_raw'].numpy().squeeze()) # print('processing (%04d)-th image... %s' % (i, img_path)) # print("###############absREL ERROR: %f", smoothed_criteria['err_absRel'].GetGlobalAverageValue()) # print("###############silog ERROR: %f", np.sqrt(smoothed_criteria['err_silog2'].GetGlobalAverageValue() - ( # smoothed_criteria['err_silog'].GetGlobalAverageValue()) ** 2)) # print("###############log10 ERROR: %f", smoothed_criteria['err_log10'].GetGlobalAverageValue()) # print("###############RMS ERROR: %f", np.sqrt(smoothed_criteria['err_rms'].GetGlobalAverageValue())) # print("###############delta_1 ERROR: %f", smoothed_criteria['err_delta1'].GetGlobalAverageValue()) # print("###############delta_2 ERROR: %f", smoothed_criteria['err_delta2'].GetGlobalAverageValue()) # print("###############delta_3 ERROR: %f", smoothed_criteria['err_delta3'].GetGlobalAverageValue()) # print("###############squaRel ERROR: %f", smoothed_criteria['err_squaRel'].GetGlobalAverageValue()) # print("###############logRms ERROR: %f", np.sqrt(smoothed_criteria['err_logRms'].GetGlobalAverageValue())) f.write("tested model:" + model_path) f.write('\n') f.write("###############absREL ERROR:" + str(smoothed_criteria['err_absRel'].GetGlobalAverageValue())) f.write('\n') f.write("###############silog ERROR:" + str( np.sqrt(smoothed_criteria['err_silog2'].GetGlobalAverageValue() - (smoothed_criteria['err_silog'].GetGlobalAverageValue())**2))) f.write('\n') f.write("###############log10 ERROR:" + str(smoothed_criteria['err_log10'].GetGlobalAverageValue())) f.write('\n') f.write("###############RMS ERROR:" + str(np.sqrt(smoothed_criteria['err_rms'].GetGlobalAverageValue()))) f.write('\n') f.write("###############delta_1 ERROR:" + str(smoothed_criteria['err_delta1'].GetGlobalAverageValue())) f.write('\n') f.write("###############delta_2 ERROR:" + str(smoothed_criteria['err_delta2'].GetGlobalAverageValue())) f.write('\n') f.write("###############delta_3 ERROR:" + str(smoothed_criteria['err_delta3'].GetGlobalAverageValue())) f.write('\n') f.write("###############squaRel ERROR:" + str(smoothed_criteria['err_squaRel'].GetGlobalAverageValue())) f.write('\n') f.write( "###############logRms ERROR:" + str(np.sqrt(smoothed_criteria['err_logRms'].GetGlobalAverageValue()))) f.write('\n') f.write( '-----------------------------------------------------------------------------' ) f.write('\n')
from lib.utils.logging import setup_logging, SmoothedValue import matplotlib.pyplot as plt logger = setup_logging(__name__) if __name__ == '__main__': test_args = TestOptions().parse() test_args.thread = 1 test_args.batchsize = 1 merge_cfg_from_file(test_args) data_loader = CustomerDataLoader(test_args) test_datasize = len(data_loader) logger.info('{:>15}: {:<30}'.format('test_data_size', test_datasize)) # load model model = MetricDepthModel() model.eval() # load checkpoint if test_args.load_ckpt: load_ckpt(test_args, model) model.cuda() model = torch.nn.DataParallel(model) # test smoothed_absRel = SmoothedValue(test_datasize) smoothed_rms = SmoothedValue(test_datasize) smoothed_logRms = SmoothedValue(test_datasize) smoothed_squaRel = SmoothedValue(test_datasize) smoothed_silog = SmoothedValue(test_datasize)