def test(model_path): test_args = TestOptions().parse() test_args.thread = 0 test_args.batchsize = 1 merge_cfg_from_file(test_args) data_loader = CustomerDataLoader(test_args) test_datasize = len(data_loader) logger.info('{:>15}: {:<30}'.format('test_data_size', test_datasize)) # load model model = MetricDepthModel() model.eval() test_args.load_ckpt = model_path # load checkpoint if test_args.load_ckpt: load_ckpt(test_args, model) model.cuda() # model = torch.nn.DataParallel(model) # test smoothed_absRel = SmoothedValue(test_datasize) smoothed_rms = SmoothedValue(test_datasize) smoothed_logRms = SmoothedValue(test_datasize) smoothed_squaRel = SmoothedValue(test_datasize) smoothed_silog = SmoothedValue(test_datasize) smoothed_silog2 = SmoothedValue(test_datasize) smoothed_log10 = SmoothedValue(test_datasize) smoothed_delta1 = SmoothedValue(test_datasize) smoothed_delta2 = SmoothedValue(test_datasize) smoothed_delta3 = SmoothedValue(test_datasize) smoothed_whdr = SmoothedValue(test_datasize) smoothed_criteria = { 'err_absRel': smoothed_absRel, 'err_squaRel': smoothed_squaRel, 'err_rms': smoothed_rms, 'err_silog': smoothed_silog, 'err_logRms': smoothed_logRms, 'err_silog2': smoothed_silog2, 'err_delta1': smoothed_delta1, 'err_delta2': smoothed_delta2, 'err_delta3': smoothed_delta3, 'err_log10': smoothed_log10, 'err_whdr': smoothed_whdr } for i, data in enumerate(data_loader): out = model.inference(data) pred_depth = torch.squeeze(out['b_fake']) img_path = data['A_paths'] invalid_side = data['invalid_side'][0] pred_depth = pred_depth[invalid_side[0]:pred_depth.size(0) - invalid_side[1], :] pred_depth = pred_depth / data['ratio'].cuda() # scale the depth pred_depth = resize_image(pred_depth, torch.squeeze(data['B_raw']).shape) smoothed_criteria = evaluate_err(pred_depth, data['B_raw'], smoothed_criteria, mask=(45, 471, 41, 601), scale=10.) # save images model_name = test_args.load_ckpt.split('/')[-1].split('.')[0] image_dir = os.path.join(cfg.ROOT_DIR, './evaluation', cfg.MODEL.ENCODER, model_name) if not os.path.exists(image_dir): os.makedirs(image_dir) img_name = img_path[0].split('/')[-1] #plt.imsave(os.path.join(image_dir, 'd_' + img_name), pred_depth, cmap='rainbow') #cv2.imwrite(os.path.join(image_dir, 'rgb_' + img_name), data['A_raw'].numpy().squeeze()) # print('processing (%04d)-th image... %s' % (i, img_path)) # print("###############absREL ERROR: %f", smoothed_criteria['err_absRel'].GetGlobalAverageValue()) # print("###############silog ERROR: %f", np.sqrt(smoothed_criteria['err_silog2'].GetGlobalAverageValue() - ( # smoothed_criteria['err_silog'].GetGlobalAverageValue()) ** 2)) # print("###############log10 ERROR: %f", smoothed_criteria['err_log10'].GetGlobalAverageValue()) # print("###############RMS ERROR: %f", np.sqrt(smoothed_criteria['err_rms'].GetGlobalAverageValue())) # print("###############delta_1 ERROR: %f", smoothed_criteria['err_delta1'].GetGlobalAverageValue()) # print("###############delta_2 ERROR: %f", smoothed_criteria['err_delta2'].GetGlobalAverageValue()) # print("###############delta_3 ERROR: %f", smoothed_criteria['err_delta3'].GetGlobalAverageValue()) # print("###############squaRel ERROR: %f", smoothed_criteria['err_squaRel'].GetGlobalAverageValue()) # print("###############logRms ERROR: %f", np.sqrt(smoothed_criteria['err_logRms'].GetGlobalAverageValue())) f.write("tested model:" + model_path) f.write('\n') f.write("###############absREL ERROR:" + str(smoothed_criteria['err_absRel'].GetGlobalAverageValue())) f.write('\n') f.write("###############silog ERROR:" + str( np.sqrt(smoothed_criteria['err_silog2'].GetGlobalAverageValue() - (smoothed_criteria['err_silog'].GetGlobalAverageValue())**2))) f.write('\n') f.write("###############log10 ERROR:" + str(smoothed_criteria['err_log10'].GetGlobalAverageValue())) f.write('\n') f.write("###############RMS ERROR:" + str(np.sqrt(smoothed_criteria['err_rms'].GetGlobalAverageValue()))) f.write('\n') f.write("###############delta_1 ERROR:" + str(smoothed_criteria['err_delta1'].GetGlobalAverageValue())) f.write('\n') f.write("###############delta_2 ERROR:" + str(smoothed_criteria['err_delta2'].GetGlobalAverageValue())) f.write('\n') f.write("###############delta_3 ERROR:" + str(smoothed_criteria['err_delta3'].GetGlobalAverageValue())) f.write('\n') f.write("###############squaRel ERROR:" + str(smoothed_criteria['err_squaRel'].GetGlobalAverageValue())) f.write('\n') f.write( "###############logRms ERROR:" + str(np.sqrt(smoothed_criteria['err_logRms'].GetGlobalAverageValue()))) f.write('\n') f.write( '-----------------------------------------------------------------------------' ) f.write('\n')
} bg_val_metrics = { 'abs_rel': bg_smoothed_criteria['err_absRel'].GetGlobalAverageValue(), 'silog': np.sqrt(bg_smoothed_criteria['err_silog2'].GetGlobalAverageValue() - (bg_smoothed_criteria['err_silog'].GetGlobalAverageValue())**2) } print("global: ", val_metrics) print("roi: ", rois_val_metrics) print("bg: ", bg_val_metrics) return val_metrics if __name__ == '__main__': train_dataloader = CustomerDataLoader(train_args) train_datasize = len(train_dataloader) gpu_num = torch.cuda.device_count() merge_cfg_from_file(train_datasize, gpu_num) val_dataloader = CustomerDataLoader(val_args) val_datasize = len(val_dataloader) # tensorboard logger if train_args.use_tfboard: from tensorboardX import SummaryWriter tblogger = SummaryWriter(cfg.TRAIN.LOG_DIR) # training status for logging training_stats = TrainingStats( train_args, cfg.TRAIN.LOG_INTERVAL,
from data.load_dataset import CustomerDataLoader from lib.models.image_transfer import resize_image from lib.utils.evaluate_depth_error import evaluate_err from lib.models.metric_depth_model import MetricDepthModel from lib.utils.logging import setup_logging, SmoothedValue import matplotlib.pyplot as plt logger = setup_logging(__name__) if __name__ == '__main__': test_args = TestOptions().parse() test_args.thread = 1 test_args.batchsize = 1 merge_cfg_from_file(test_args) data_loader = CustomerDataLoader(test_args) test_datasize = len(data_loader) logger.info('{:>15}: {:<30}'.format('test_data_size', test_datasize)) # load model model = MetricDepthModel() model.eval() # load checkpoint if test_args.load_ckpt: load_ckpt(test_args, model) model.cuda() model = torch.nn.DataParallel(model) # test smoothed_absRel = SmoothedValue(test_datasize)
def main(): test_args = TestOptions().parse() test_args.thread = 1 # test code only supports thread = 1 test_args.batchsize = 1 # test code only supports batchSize = 1 data_loader = CustomerDataLoader(test_args) test_datasize = len(data_loader) logger.info('{:>15}: {:<30}'.format('test_data_size', test_datasize)) # load model model = DepthNormal() # evaluate mode model.eval() # load checkpoint if test_args.load_ckpt: load_ckpt(test_args, model) model.cuda() model = torch.nn.DataParallel(model) for i, data in enumerate(data_loader): out = model.module.inference(data) pred_depth = np.squeeze(out['b_fake']) * 80. # [h, w] pred_conf = np.squeeze(out['b_fake_conf']) # [c, h, w] # the image size has been padded to the size (385, 1243) pred_depth_crop = pred_depth[data['pad_raw'][0][0]:, data['pad_raw'][0][2]:] pred_conf_crop = pred_conf[:, data['pad_raw'][0][0]:, data['pad_raw'][0][2]:] sample_th = 0.15 sample_mask = get_sample_mask(pred_conf_crop.cpu().numpy(), threshold=sample_th) # [h, w] ####################################################################################### # add by users img_name = data['A_paths'][0].split('/')[-1][:-4] calib_name = img_name + '.txt' calib_dir = os.path.join(calib_fold, calib_name) camera_para = np.genfromtxt(calib_dir, delimiter=' ', skip_footer=3, dtype=None) P3_0 = camera_para[3] P2_0 = camera_para[2] P3_2 = P3_0 P3_2[4] -= P2_0[4] R0_rect = np.genfromtxt(calib_dir, delimiter=' ', skip_header=4, skip_footer=2) Tr_velo_to_cam0 = np.genfromtxt(calib_dir, delimiter=' ', skip_header=5, skip_footer=1) pcd_cam2 = reconstruct_3D(pred_depth_crop.cpu().numpy(), P3_2[3], P3_2[7], P3_2[1], P3_2[6]) # Transfer points in cam2 coordinate to cam0 coordinate pcd_cam0 = pcd_cam2 - np.array([[[P2_0[4] / P2_0[1]]], [[P2_0[8] / P2_0[1]]], [[P2_0[12] / P2_0[1]]]]) # Transfer points in cam0 coordinate to velo coordinate pcd_velo = transfer_points_in_cam0_to_velo(pcd_cam0, R0_rect, Tr_velo_to_cam0) rgb = data['A_raw'][0].cpu().numpy() save_ply(pcd_velo, rgb, os.path.join(pcd_folder, img_name) + '_sample.ply', sample_mask=sample_mask) #save_ply(pcd_cam2, rgb, os.path.join(pcd_folder, img_name) + '.ply') print('saved', img_name)