コード例 #1
0
ファイル: viz3D.py プロジェクト: vbelissen/packnet-sfm
def main(args):

    # Initialize horovod
    hvd_init()

    # Parse arguments
    config, state_dict = parse_test_file(args.checkpoint)

    # If no image shape is provided, use the checkpoint one
    image_shape = args.image_shape
    if image_shape is None:
        image_shape = config.datasets.augmentation.image_shape

    # Set debug if requested
    set_debug(config.debug)

    # Initialize model wrapper from checkpoint arguments
    model_wrapper = ModelWrapper(config, load_datasets=False)
    # Restore monodepth_model state
    model_wrapper.load_state_dict(state_dict)

    # change to half precision for evaluation if requested
    dtype = torch.float16 if args.half else None

    # Send model to GPU if available
    if torch.cuda.is_available():
        model_wrapper = model_wrapper.to('cuda:{}'.format(rank()), dtype=dtype)

    # Set to eval mode
    model_wrapper.eval()

    if os.path.isdir(args.input):
        # If input file is a folder, search for image files
        files = []
        for ext in ['png', 'jpg']:
            files.extend(glob((os.path.join(args.input, '*.{}'.format(ext)))))
        files.sort()
        print0('Found {} files'.format(len(files)))
    else:
        # Otherwise, use it as is
        files = [args.input]

    # Process each file
    for fn in files[rank()::world_size()]:
        infer_plot_and_save_3D_pcl(fn, args.output, model_wrapper, image_shape,
                                   args.half, args.save)
コード例 #2
0
def main(args):

    # Initialize horovod
    hvd_init()

    # Parse arguments
    config1, state_dict1 = parse_test_file(args.checkpoint1)
    config2, state_dict2 = parse_test_file(args.checkpoint2)
    config3, state_dict3 = parse_test_file(args.checkpoint3)
    config4, state_dict4 = parse_test_file(args.checkpoint4)

    # If no image shape is provided, use the checkpoint one
    image_shape = args.image_shape
    if image_shape is None:
        image_shape = config1.datasets.augmentation.image_shape

    # Set debug if requested
    set_debug(config1.debug)

    # Initialize model wrapper from checkpoint arguments
    model_wrapper1 = ModelWrapper(config1, load_datasets=False)
    model_wrapper2 = ModelWrapper(config2, load_datasets=False)
    model_wrapper3 = ModelWrapper(config3, load_datasets=False)
    model_wrapper4 = ModelWrapper(config4, load_datasets=False)
    # Restore monodepth_model state
    model_wrapper1.load_state_dict(state_dict1)
    model_wrapper2.load_state_dict(state_dict2)
    model_wrapper3.load_state_dict(state_dict3)
    model_wrapper4.load_state_dict(state_dict4)

    # change to half precision for evaluation if requested
    dtype = torch.float16 if args.half else None

    # Send model to GPU if available
    if torch.cuda.is_available():
        model_wrapper1 = model_wrapper1.to('cuda:{}'.format(rank()),
                                           dtype=dtype)
        model_wrapper2 = model_wrapper2.to('cuda:{}'.format(rank()),
                                           dtype=dtype)
        model_wrapper3 = model_wrapper3.to('cuda:{}'.format(rank()),
                                           dtype=dtype)
        model_wrapper4 = model_wrapper4.to('cuda:{}'.format(rank()),
                                           dtype=dtype)

    # Set to eval mode
    model_wrapper1.eval()
    model_wrapper2.eval()
    model_wrapper3.eval()
    model_wrapper4.eval()

    if os.path.isdir(args.input1):
        # If input file is a folder, search for image files
        files1 = []
        for ext in ['png', 'jpg']:
            files1.extend(glob((os.path.join(args.input1,
                                             '*.{}'.format(ext)))))
        files1.sort()
        print0('Found {} files'.format(len(files1)))
    else:
        # Otherwise, use it as is
        files1 = [args.input1]
    if os.path.isdir(args.input2):
        # If input file is a folder, search for image files
        files2 = []
        for ext in ['png', 'jpg']:
            files2.extend(glob((os.path.join(args.input2,
                                             '*.{}'.format(ext)))))
        files2.sort()
        print0('Found {} files'.format(len(files2)))
    else:
        # Otherwise, use it as is
        files2 = [args.input2]
    if os.path.isdir(args.input3):
        # If input file is a folder, search for image files
        files3 = []
        for ext in ['png', 'jpg']:
            files3.extend(glob((os.path.join(args.input3,
                                             '*.{}'.format(ext)))))
        files3.sort()
        print0('Found {} files'.format(len(files3)))
    else:
        # Otherwise, use it as is
        files3 = [args.input3]
    if os.path.isdir(args.input4):
        # If input file is a folder, search for image files
        files4 = []
        for ext in ['png', 'jpg']:
            files4.extend(glob((os.path.join(args.input4,
                                             '*.{}'.format(ext)))))
        files4.sort()
        print0('Found {} files'.format(len(files4)))
    else:
        # Otherwise, use it as is
        files4 = [args.input4]

    n_files = len(files1)
    # Process each file
    for fn1, fn2, fn3, fn4 in zip(files1[rank()::world_size()],
                                  files2[rank()::world_size()],
                                  files3[rank()::world_size()],
                                  files4[rank()::world_size()]):
        infer_plot_and_save_3D_pcl(fn1, fn2, fn3, fn4, args.output1,
                                   args.output2, args.output3, args.output4,
                                   model_wrapper1, model_wrapper2,
                                   model_wrapper3, model_wrapper4,
                                   bool(int(args.hasGTdepth1)),
                                   bool(int(args.hasGTdepth2)),
                                   bool(int(args.hasGTdepth3)),
                                   bool(int(args.hasGTdepth4)), image_shape,
                                   args.half, args.save)
コード例 #3
0
ファイル: infer_pose.py プロジェクト: rainleong/packnet-sfm
def main(args):

    # Initialize horovod
    hvd_init()

    # Parse arguments
    config, state_dict = parse_test_file(args.checkpoint)

    # If no image shape is provided, use the checkpoint one
    image_shape = args.image_shape
    if image_shape is None:
        image_shape = config.datasets.augmentation.image_shape

    # Set debug if requested
    set_debug(config.debug)

    # Initialize model wrapper from checkpoint arguments
    model_wrapper = ModelWrapper(config, load_datasets=False)
    # Restore monodepth_model state
    model_wrapper.load_state_dict(state_dict)

    # change to half precision for evaluation if requested
    dtype = torch.float16 if args.half else None

    # Send model to GPU if available
    if torch.cuda.is_available():
        model_wrapper = model_wrapper.to('cuda:{}'.format(rank()), dtype=dtype)

    # Set to eval mode
    model_wrapper.eval()

    if os.path.isdir(args.input):
        # If input file is a folder, search for image files
        files = []
        for ext in ['png', 'jpg']:
            files.extend(glob((os.path.join(args.input, '*.{}'.format(ext)))))
        files.sort()
        print0('Found {} files'.format(len(files)))
    else:
        raise RuntimeError("Input needs directory, not file")

    if not os.path.isdir(args.output):
        root, file_name = os.path.split(args.output)
        os.makedirs(root, exist_ok=True)
    else:
        raise RuntimeError("Output needs to be a file")
        

    # Process each file
    list_of_files = list(zip(files[rank()  :-2:world_size()],
                              files[rank()+1:-1:world_size()],
                              files[rank()+2:  :world_size()]))
    if args.offset:
        list_of_files = list_of_files[args.offset:]
    if args.limit:
        list_of_files = list_of_files[:args.limit]
    for fn1, fn2, fn3 in list_of_files:
        infer_and_save_pose([fn1, fn3], fn2, model_wrapper, image_shape, args.half, args.save)

    position = np.zeros(3)
    orientation = np.eye(3)
    f = open(args.output + ".txt", 'w')

    for key in sorted(poses.keys()):
        
        rot_matrix, translation = poses[key]

        # print(rot_matrix, translation)

        # print("orientation, position")
        orientation = orientation.dot(rot_matrix.tolist())
        position += orientation.dot(translation.tolist())

        # print(torch.tensor(orientation))
        q = transforms.matrix_to_quaternion(torch.tensor(orientation))
        q = q.numpy()
        # print(q[0])
        # print(position)

        f.write("%.10f %.10f %.10f %.10f %.10f %.10f %.10f\n" % (position[0], position[1], position[2], q[0][3], q[0][2], q[0][1], q[0][0]))
        # f.write("{.10f} {.10f} {.10f} {.10f} {.10f} {.10f} {.10f}"
                # .format(position[0], position[1], position[2], q[0][1], q[0][2], q[0][3], q[0][0]))
        # poses[key] = {"rot": rot_matrix.tolist(),
        #               "trans": translation.tolist(),
        #               "pose": [*orientation[0], position[0],
        #                        *orientation[1], position[1],
        #                        *orientation[2], position[2],
        #                        0, 0, 0, 1]}

    f.close()
                               
    # json.dump(poses, open(args.output, "w"), sort_keys=True)
    print(f"Written pose of {len(list_of_files)} images to {args.output}")