def get_depth(img_path):
    parameters = os.path.basename(parameter)
    parameters = parameters[0:-10:]
    opt = TrainOptions().parse(
    )  # set CUDA_VISIBLE_DEVICES before import torch
    model = create_model(opt, parameters)
    pred_depth = test_simple(model, img_path)
    return pred_depth
Exemple #2
0
def get_depthmap_img(img_path, save_path, size, checkpoints_dir):
    opt = TrainOptions()
    #print(opt)
    opt.gpu_ids = '0,1'
    opt.isTrain = True
    opt.checkpoints_dir = checkpoints_dir
    opt.name = 'test_local/'
    model = create_model(opt)
    path = test_simple(model, img_path, size,save_path)
    
    print("We are done")
    return(path)
Exemple #3
0
def run(image_in, parameter):
    global img_path, parameters
    image_in_number = io.imread(image_in)  # 从指定位置读取图片
    image_in_path = 'MegaDepth/demo_img/demo.jpg'
    io.imsave(image_in_path, image_in_number)  # 将图片保存到指定文件夹
    parameters = os.path.basename(parameter)  # 获得参数文件名
    parameters = parameters[0:-10:]  # 将参数文件名转化为指定文件名
    opt = TrainOptions().parse(
    )  # set CUDA_VISIBLE_DEVICES before import torch
    model = create_model(opt, parameters)
    pred_depth = test_simple(model)  # 获得深度信息
    position = 'MegaDepth/demo_img/demo1.jpg'
    io.imsave(position, pred_depth)  # 保存预测深度图
    print("We are done")
    return pred_depth, position  # 返回深度信息和深度图位置
Exemple #4
0
def get_depthmap_from_list(img_path_list, save_path, checkpoints_dir):
    """
    get the depthmap from a list of images paths 
    """
    opt = TrainOptions()
    #print(opt)
    opt.gpu_ids = '0,1'
    opt.isTrain = True
    opt.checkpoints_dir = checkpoints_dir
    opt.name = 'test_local/'
    paths = []
    model = create_model(opt)
    for img_path in img_path_list: 
        size = Image.open(img_path).size
        print(size)
        path = test_simple(model, img_path, size,save_path)
        paths.append(path)
    
    print("We are done")
    return(paths)
Exemple #5
0
    # visualize prediction using inverse depth, so that we don't need sky segmentation (if you want to use RGB map for visualization, \
    # you have to run semantic segmentation to mask the sky first since the depth of sky is random from CNN)
    pred_inv_depth = 1/pred_depth
    pred_inv_depth = pred_inv_depth.data.cpu().numpy()
    # you might also use percentile for better visualization
    pred_inv_depth = pred_inv_depth/np.amax(pred_inv_depth)

    pred_inv_depth[indices_flip] = np.flip(pred_inv_depth[1:3], 2)

    for i, im in enumerate(pred_inv_depth):
        pred_inv_depth[i] -= np.min(im)
        pred_inv_depth[i] /= np.max(pred_inv_depth[i])

    mean_pred = np.mean(pred_inv_depth, axis = 0)

    save_filename = os.path.splitext(os.path.basename(img_file))[0] + 'meanpred_depth'
    io.imsave(os.path.join(save_path, save_filename + '.jpg'), mean_pred)
    np.save(os.path.join(save_path, save_filename + '.npy'), mean_pred )
    print("Image saved to "+ os.path.join(save_path, save_filename))
    
if __name__ == "__main__":
    img_path = '../DataSV/milacropresize.jpg'
    save_path = '.'
    #size = [512,512]
    opt = TrainOptions().parse()
    model = create_model(opt)
    test_simple(model, img_path, save_path)#size,save_path)
    print("We are done")


Exemple #6
0
def run():
    opt = TrainOptions().parse(
    )  # set CUDA_VISIBLE_DEVICES before import torch
    model = create_model(opt)
    test_simple(model)