示例#1
0
def main():
    parser = argparse.ArgumentParser(description="Atlas Testing")
    parser.add_argument("--model",
                        required=True,
                        metavar="FILE",
                        help="path to checkpoint")
    parser.add_argument("--scenes",
                        default="data/scannet_test.txt",
                        help="which scene(s) to run on")
    args = parser.parse_args()

    # get all the info_file.json's from the command line
    # .txt files contain a list of info_file.json's
    info_files = parse_splits_list(args.scenes)
    # info_files=[info_files[0]]

    metrics = {}
    for i, info_file in enumerate(info_files):
        # run model on each scene
        scene, temp = process(info_file, args.model, i, len(info_files))
        metrics[scene] = temp

    rslt_file = os.path.join(args.model, 'metrics.json')
    json.dump(metrics, open(rslt_file, 'w'))

    # display results
    visualize(rslt_file)
示例#2
0
def main():
    parser = argparse.ArgumentParser(description='Inference with COLMAP')
    parser.add_argument("--scenes", default="data/scannet_test.txt",
        help="path to raw dataset")
    parser.add_argument("--pathout", required=True, metavar="DIR",
        help="path to store processed (derived) dataset")
    parser.add_argument('--stride', default=2, type=int,
        help='number of frames to skip (imroves runtime)')
    parser.add_argument('--scale', default=4, type=int,
        help='factor to downsample images by (imroves runtime and quality)')
    parser.add_argument('--i', default=0, type=int,
        help='index of part for parallel processing')
    parser.add_argument('--n', default=1, type=int,
        help='number of parts to devide data into for parallel processing')
    args = parser.parse_args()

    i=args.i
    n=args.n
    assert 0<=i and i<n

    scenes = parse_splits_list(args.scenes)

    scenes = scenes[i::n]
    for scene in scenes:
        process(scene, args.pathout, args.stride, args.scale)
示例#3
0
def main():
    parser = argparse.ArgumentParser(description="Atlas Testing")
    parser.add_argument("--model",
                        required=True,
                        metavar="FILE",
                        help="path to checkpoint")
    parser.add_argument("--scenes",
                        default="data/scannet_test.txt",
                        help="which scene(s) to run on")
    parser.add_argument("--num_frames",
                        default=-1,
                        type=int,
                        help="number of frames to use (-1 for all)")
    parser.add_argument("--voxel_dim",
                        nargs=3,
                        default=[-1, -1, -1],
                        type=int,
                        help="override voxel dim")
    parser.add_argument("--result_folder_name", default="final", type=str)
    parser.add_argument("--log_step_3d_visualize", default=-1, type=int)
    parser.add_argument("--log_step_mesh_save", default=-1, type=int)
    parser.add_argument("--scale", default=1, type=float)
    args = parser.parse_args()

    # get all the info_file.json's from the command line
    # .txt files contain a list of info_file.json's
    info_files = parse_splits_list(args.scenes)

    model = VoxelNet.load_from_checkpoint(args.model)
    model = model.cuda().eval()
    torch.set_grad_enabled(False)

    # overwrite default values of voxel_dim_test
    if args.voxel_dim[0] != -1:
        model.voxel_dim_test = args.voxel_dim
    # TODO: implement voxel_dim_test
    model.voxel_dim_val = model.voxel_dim_test

    model_name = args.result_folder_name
    pass
    #model_name = os.path.splitext(os.path.split(args.model)[1])[0]
    #model_name = 'EuRoC_VC_01_EASY_scale_0.5'
    #model_name = 'EuRoC_MH_01_scale_new_0.02'
    #model_name = 'sample_scale_new_test'
    #model_name = 'kitti_scale_new_0.01'
    #model_name = 'ICL_NUIM_scale_1'
    save_path = os.path.join(model.cfg.LOG_DIR, model.cfg.TRAINER.NAME,
                             model.cfg.TRAINER.VERSION, 'test_' + model_name)
    if args.num_frames > -1:
        save_path = '%s_%d' % (save_path, args.num_frames)
    os.makedirs(save_path, exist_ok=True)

    model.scale = args.scale

    print(info_files)
    for i, info_file in enumerate(info_files):
        # run model on each scene
        process(info_file, model, args.num_frames, save_path, i,
                len(info_files), args)
示例#4
0
 def val_dataloader(self):
     transform = self.get_transform(False)
     info_files = parse_splits_list(self.cfg.DATASETS_VAL)
     dataset = ScenesDataset(info_files, self.num_frames_val, transform,
                             self.frame_types, self.frame_selection,
                             self.voxel_types, self.voxel_sizes)
     dataloader = torch.utils.data.DataLoader(dataset,
                                              batch_size=1,
                                              num_workers=1,
                                              collate_fn=collate_fn,
                                              shuffle=False,
                                              drop_last=False)
     return dataloader
示例#5
0
 def train_dataloader(self):
     transform = self.get_transform(True)
     info_files = parse_splits_list(self.cfg.DATASETS_TRAIN)
     dataset = ScenesDataset(info_files, self.num_frames_train, transform,
                             self.frame_types, self.frame_selection,
                             self.voxel_types, self.voxel_sizes)
     dataloader = torch.utils.data.DataLoader(
         dataset,
         batch_size=self.batch_size_train,
         num_workers=2,
         collate_fn=collate_fn,
         shuffle=True,
         drop_last=True)
     return dataloader
示例#6
0
文件: inference.py 项目: zmurez/Atlas
def main():
    parser = argparse.ArgumentParser(description="Atlas Testing")
    parser.add_argument("--model",
                        required=True,
                        metavar="FILE",
                        help="path to checkpoint")
    parser.add_argument("--scenes",
                        default="data/scannet_test.txt",
                        help="which scene(s) to run on")
    parser.add_argument("--num_frames",
                        default=-1,
                        type=int,
                        help="number of frames to use (-1 for all)")
    parser.add_argument("--voxel_dim",
                        nargs=3,
                        default=[-1, -1, -1],
                        type=int,
                        help="override voxel dim")
    args = parser.parse_args()

    # get all the info_file.json's from the command line
    # .txt files contain a list of info_file.json's
    info_files = parse_splits_list(args.scenes)

    model = VoxelNet.load_from_checkpoint(args.model)
    model = model.cuda().eval()
    torch.set_grad_enabled(False)

    # overwrite default values of voxel_dim_test
    if args.voxel_dim[0] != -1:
        model.voxel_dim_test = args.voxel_dim
    # TODO: implement voxel_dim_test
    model.voxel_dim_val = model.voxel_dim_test

    model_name = os.path.splitext(os.path.split(args.model)[1])[0]
    save_path = os.path.join(model.cfg.LOG_DIR, model.cfg.TRAINER.NAME,
                             model.cfg.TRAINER.VERSION, 'test_' + model_name)
    if args.num_frames > -1:
        save_path = '%s_%d' % (save_path, args.num_frames)
    os.makedirs(save_path, exist_ok=True)

    for i, info_file in enumerate(info_files):
        # run model on each scene
        process(info_file, model, args.num_frames, save_path, i,
                len(info_files))
示例#7
0
def main():
    parser = argparse.ArgumentParser(description='Evaluate COLMAP')
    parser.add_argument("--scenes",
                        default="data/scannet_test.txt",
                        help="path to raw dataset")
    parser.add_argument("--pathout",
                        required=True,
                        metavar="DIR",
                        help="path to store processed (derived) dataset")
    args = parser.parse_args()

    scenes = parse_splits_list(args.scenes)

    metrics = {}
    for scene in scenes:
        metrics[scene] = eval_scene(scene, args.pathout)

    rslt_file = os.path.join(args.pathout, 'metrics.json')
    json.dump(metrics, open(rslt_file, 'w'))
示例#8
0
def main():

    parser = argparse.ArgumentParser(
        description='Fuse ground truth tsdf on Scannet')
    parser.add_argument("--path",
                        required=True,
                        metavar="DIR",
                        help="path to raw dataset")
    parser.add_argument("--path_meta",
                        required=True,
                        metavar="DIR",
                        help="path to store processed (derived) dataset")
    parser.add_argument("--dataset",
                        required=True,
                        type=str,
                        help="which dataset to prepare")
    parser.add_argument('--i',
                        default=0,
                        type=int,
                        help='index of part for parallel processing')
    parser.add_argument(
        '--n',
        default=1,
        type=int,
        help='number of parts to devide data into for parallel processing')
    parser.add_argument(
        '--test',
        action='store_true',
        help=
        'only prepare the test set (for rapid testing if you dont plan to train)'
    )
    parser.add_argument(
        '--max_depth',
        default=3.,
        type=float,
        help='mask out large depth values since they are noisy')

    parser.add_argument("--model",
                        required=True,
                        metavar="FILE",
                        help="path to checkpoint")
    parser.add_argument("--scenes",
                        default="data/scannet_test.txt",
                        help="which scene(s) to run on")
    parser.add_argument("--num_frames",
                        default=-1,
                        type=int,
                        help="number of frames to use (-1 for all)")
    parser.add_argument("--voxel_dim",
                        nargs=3,
                        default=[-1, -1, -1],
                        type=int,
                        help="override voxel dim")
    args = parser.parse_args()

    i = args.i
    n = args.n
    assert 0 <= i and i < n

    if args.dataset == 'sample':
        scenes = ['sample1']
        scenes = scenes[i::n]  # distribute among workers
        for scene in scenes:
            prepare_sample_scene(
                scene,
                os.path.join(args.path, 'sample'),
                os.path.join(args.path_meta, 'sample'),
            )

    elif args.dataset == 'scannet':
        prepare_scannet(os.path.join(args.path, 'scannet'),
                        os.path.join(args.path_meta, 'scannet'), i, n,
                        args.test, args.max_depth)

    else:
        raise NotImplementedError('unknown dataset %s' % args.dataset)

    name = "Thread #%s" % (2)
    thread1 = MyThread1(name)
    thread1.start()

    # get all the info_file.json's from the command line
    # .txt files contain a list of info_file.json's
    info_files = parse_splits_list(args.scenes)

    model = VoxelNet.load_from_checkpoint(args.model)
    model = model.cuda().eval()
    torch.set_grad_enabled(False)

    # overwrite default values of voxel_dim_test
    if args.voxel_dim[0] != -1:
        model.voxel_dim_test = args.voxel_dim
    # TODO: implement voxel_dim_test
    model.voxel_dim_val = model.voxel_dim_test

    model_name = os.path.splitext(os.path.split(args.model)[1])[0]
    save_path = os.path.join(model.cfg.LOG_DIR, model.cfg.TRAINER.NAME,
                             model.cfg.TRAINER.VERSION, 'test_' + model_name)
    if args.num_frames > -1:
        save_path = '%s_%d' % (save_path, args.num_frames)
    os.makedirs(save_path, exist_ok=True)

    i = 0
    # run model on each scene
    for info_file in info_files:
        window_conf = gl.Config(double_buffer=True, depth_size=24)
        process(info_file, model, args.num_frames, save_path, i,
                len(info_files), window_conf)