def load_test_data(test_data_zip_file='nyu_test.zip'): print('Loading test data...', end='') data = extract_zip(test_data_zip_file) rgb = np.load(BytesIO(data['eigen_test_rgb.npy'])) depth = np.load(BytesIO(data['eigen_test_depth.npy'])) crop = np.load(BytesIO(data['eigen_test_crop.npy'])) print('Test data loaded.\n') return {'rgb': rgb, 'depth': depth, 'crop': crop}
def load_test_data(test_data_zip_file='nyu_test.zip'): print('Loading test data...') import numpy as np from data import extract_zip data = extract_zip(test_data_zip_file) from io import BytesIO rgb = np.load(BytesIO(data['eigen_test_rgb.npy'])) depth = np.load(BytesIO(data['eigen_test_depth.npy'])) crop = np.load(BytesIO(data['eigen_test_crop.npy'])) print('Test data loaded.\n') return {'rgb': rgb, 'depth': depth, 'crop': crop}
# Custom object needed for inference and training custom_objects = { 'BilinearUpSampling2D': BilinearUpSampling2D, 'depth_loss_function': depth_loss_function } # Load model into GPU / CPU print('Loading model...') model = load_model(args.model, custom_objects=custom_objects, compile=False) # Load test data print('Loading test data...', end='') import numpy as np from data import extract_zip data = extract_zip('nyu_test.zip') from io import BytesIO rgb = np.load(BytesIO(data['eigen_test_rgb.npy'])) depth = np.load(BytesIO(data['eigen_test_depth.npy'])) crop = np.load(BytesIO(data['eigen_test_crop.npy'])) print('Test data loaded.\n') start = time.time() print('Testing...') e = evaluate(model, rgb, depth, crop, batch_size=4) print("{:>10}, {:>10}, {:>10}, {:>10}, {:>10}, {:>10}".format( 'a1', 'a2', 'a3', 'rel', 'rms', 'log_10')) print("{:10.4f}, {:10.4f}, {:10.4f}, {:10.4f}, {:10.4f}, {:10.4f}".format( e[0], e[1], e[2], e[3], e[4], e[5]))
model = create_model() load_multigpu_checkpoint_weights(model, args.model) model.save(file + '.h5') else: model = load_model(args.model, custom_objects=custom_objects, compile=False) model.save(file + '.h5') # Load test data print('Loading test data...', end='') if not args.eval_csv: import numpy as np from data import extract_zip data = extract_zip('../data/nyu_test.zip') from io import BytesIO rgb = np.load(BytesIO(data['eigen_test_rgb.npy'])) depth = np.load(BytesIO(data['eigen_test_depth.npy'])) crop = np.load(BytesIO(data['eigen_test_crop.npy'])) else: from data import get_evaluation_data eval_data = get_evaluation_data(args.eval_csv, '../data/') rgb = eval_data['rgb'] depth = eval_data['depth'] crop = None print('Test data loaded.\n') start = time.time() print('Testing...')
# Custom object needed for inference and training custom_objects = {'depth_loss_function': depth_loss_function} # Load model into GPU / CPU print('Loading model...') #model = load_model('/home/user01/storage/NYU Depth Analysis/src/models/1595602642-n25344-e25-bs2-lr0.0001-densedepth_nyu/model', custom_objects=custom_objects, compile=False) model = depth_estimate_model.DepthEstimate() model_weights = 'F:/Work/Work/Outdu Internship/nyu_depth_v2_dataset/src/models/1596012197-n25344-e25-bs2-lr0.0001-densedepth_nyu/weights.23-0.12.ckpt' model.load_weights(model_weights) #, by_name = True, skip_mismatch = True) print('Model weights loaded from path - ', model_weights) # Load test data print('Loading test data...', end='') import numpy as np from data import extract_zip data = extract_zip( 'F:/Work/Work/Outdu Internship/nyu_depth_v2_dataset/nyu_test.zip') from io import BytesIO rgb = np.load(BytesIO(data['eigen_test_rgb.npy'])) depth = np.load(BytesIO(data['eigen_test_depth.npy'])) crop = np.load(BytesIO(data['eigen_test_crop.npy'])) print('Test data loaded.\n') start = time.time() print('Testing...') e = evaluate(model, rgb, depth, crop, batch_size=6) print("{:>10}, {:>10}, {:>10}, {:>10}, {:>10}, {:>10}".format( 'a1', 'a2', 'a3', 'rel', 'rms', 'log_10')) print("{:10.4f}, {:10.4f}, {:10.4f}, {:10.4f}, {:10.4f}, {:10.4f}".format( e[0], e[1], e[2], e[3], e[4], e[5]))
def evaluate(model, batch_size=6, verbose=True, data_zip_file='MPI_2.zip'): # Evaluation on MPI-Sintel dataset from data import extract_zip from io import BytesIO data = extract_zip(data_zip_file) mpi_test = list( (row.split(',') for row in (data['MPI_2/data_test.csv']).decode("utf-8").split('\n') if len(row) > 0)) N = len(mpi_test) def compute_errors(gt, pred): thresh = np.maximum((gt / pred), (pred / gt)) a1 = (thresh < 1.25).mean() a2 = (thresh < 1.25**2).mean() a3 = (thresh < 1.25**3).mean() abs_rel = np.mean(np.abs(gt - pred) / gt) mse = ssq_error(gt, pred) / (448 * 1024) lmse = local_error(gt, pred, 20, 10) return a1, a2, a3, abs_rel, mse, lmse alb_scores = np.zeros((6, N)) # six metrics shad_scores = np.zeros((6, N)) bs = batch_size shape_rgb = (bs, 448, 1024, 3) shape_alb = (bs, 448, 1024, 3) shape_shad = (bs, 448, 1024, 3) rgb, alb, shad = np.zeros(shape_rgb), np.zeros(shape_alb), np.zeros( shape_shad) for i in range(N // bs): index = i * bs #Loading Test data for j in range(bs): sample = mpi_test[index + j] x = np.float32( np.clip( np.asarray((Image.open(BytesIO(data[sample[0]]))).resize( (1024, 448))) / 255, 0, 1)) y = np.float32( np.clip( np.asarray((Image.open(BytesIO(data[sample[1]]))).resize( (1024, 448))) / 255, 0, 1)) z = np.float32( np.clip( np.asarray((Image.open(BytesIO(data[sample[2]]))).resize( (1024, 448))) / 255, 0, 1)) rgb[j] = x #input alb[j] = y #albedo shad[j] = z #shading # Compute results true_alb, true_shad = alb, shad (pred_alb, pred_shad) = predict(model, rgb, batch_size=bs) # Compute errors per image in batch for j in range(len(true_alb)): alb_errors = compute_errors(true_alb[j], pred_alb[j]) shad_errors = compute_errors(true_shad[j], pred_shad[j]) for k in range(len(alb_errors)): alb_scores[k][(i * bs) + j] = alb_errors[k] shad_scores[k][(i * bs) + j] = shad_errors[k] e_alb = alb_scores.mean(axis=1) e_shad = shad_scores.mean(axis=1) if verbose: print("{:>10}, {:>10}, {:>10}, {:>10}, {:>10}, {:>10}".format( 'a1', 'a2', 'a3', 'rel', 'mse', 'lmse')) print("{:10.4f}, {:10.4f}, {:10.4f}, {:10.4f}, {:10.4f}, {:10.4f}". format(e_alb[0], e_alb[1], e_alb[2], e_alb[3], e_alb[4], e_alb[5])) print("{:>10}, {:>10}, {:>10}, {:>10}, {:>10}, {:>10}".format( 'a1', 'a2', 'a3', 'rel', 'mse', 'lmse')) print("{:10.4f}, {:10.4f}, {:10.4f}, {:10.4f}, {:10.4f}, {:10.4f}". format(e_shad[0], e_shad[1], e_shad[2], e_shad[3], e_shad[4], e_shad[5])) return e_alb, e_shad
parser = argparse.ArgumentParser(description='High Quality Monocular Depth Estimation via Transfer Learning') parser.add_argument('--model', default='nyu.h5', type=str, help='Trained Keras model file.') args = parser.parse_args() # Custom object needed for inference and training custom_objects = {'BilinearUpSampling2D': BilinearUpSampling2D, 'depth_loss_function': depth_loss_function} # Load model into GPU / CPU print('Loading model...') model = load_model(args.model, custom_objects=custom_objects, compile=False) # Load test data print('Loading test data...', end='') import numpy as np from data import extract_zip data = extract_zip('input.zip') from io import BytesIO rgb = np.load(BytesIO(data['eigen_test_rgb.npy'])) depth = np.load(BytesIO(data['eigen_test_depth.npy'])) crop = np.load(BytesIO(data['eigen_test_crop.npy'])) print('Test data loaded.\n') start = time.time() print('Testing...') e = evaluate(model, rgb, depth, crop, batch_size=6) print("{:>10}, {:>10}, {:>10}, {:>10}, {:>10}, {:>10}".format('a1', 'a2', 'a3', 'rel', 'rms', 'log_10')) print("{:10.4f}, {:10.4f}, {:10.4f}, {:10.4f}, {:10.4f}, {:10.4f}".format(e[0],e[1],e[2],e[3],e[4],e[5])) end = time.time()