Ejemplo n.º 1
0
def train_parallel(rank, world_size, seed, config, args):
    # This function is performed in parallel in several processes, one for each available GPU
    os.environ['MASTER_ADDR'] = 'localhost'
    os.environ['MASTER_PORT'] = '8880'
    dist.init_process_group(backend='nccl', world_size=world_size, rank=rank)
    set_seed(seed)
    torch.cuda.set_device(rank)
    device = 'cuda:%d' % torch.cuda.current_device()
    print("process %d, GPU: %s" % (rank, device))

    # load from models/PointDSC.py
    from models.PointDSC import PointDSC
    model = PointDSC(
        in_dim=config.in_dim,
        num_layers=config.num_layers,
        num_channels=config.num_channels,
        num_iterations=config.num_iterations,
        ratio=config.ratio,
        inlier_threshold=config.inlier_threshold,
        sigma_d=config.sigma_d,
        k=config.k,
        nms_radius=config.inlier_threshold,
    )
    device = 'cuda:%d' % torch.cuda.current_device()
    checkpoint = torch.load(
        f'snapshot/{args.chosen_snapshot}/models/model_best.pkl',
        map_location=device)
    miss = model.load_state_dict(checkpoint)
    if rank == 0:
        print(miss)
    model.eval()

    # evaluate on the test set
    stats = eval_KITTI(model.cuda(), config, args.use_icp, world_size, seed,
                       rank, args)
Ejemplo n.º 2
0
def test_subset(rank, world_size, seed, config, args):
    # This function is performed in parallel in several processes, one for each available GPU

    set_seed(seed)
    device = 'cuda:%d' % torch.cuda.current_device()
    print("process %d, GPU: %s" % (rank, device))

    if args.algo == "PointDSC":
        # load from models/PointDSC.py
        from models.PointDSC import PointDSC
        model = PointDSC(
            in_dim=config.in_dim,
            num_layers=config.num_layers,
            num_channels=config.num_channels,
            num_iterations=config.num_iterations,
            ratio=config.ratio,
            inlier_threshold=config.inlier_threshold,
            sigma_d=config.sigma_d,
            k=config.k,
            nms_radius=config.inlier_threshold,
        )
        device = 'cuda:%d' % torch.cuda.current_device()
        checkpoint = torch.load(
            f'snapshot/{args.chosen_snapshot}/models/model_best.pkl',
            map_location=device)
        miss = model.load_state_dict(checkpoint)
        if rank == 0:
            print(miss)
        model.eval()
        model = model.cuda()
    else:
        model = None

    # evaluate on the test set
    eval_KITTI(model, config, world_size, seed, rank, args)
Ejemplo n.º 3
0
    parser.add_argument('--use_icp', default=True, type=str2bool)
    parser.add_argument('--save_npy', default=False, type=str2bool)
    parser.add_argument('--visualize', default=False, type=str2bool)
    args = parser.parse_args()

    config_path = f'snapshot/{args.chosen_snapshot}/config.json'
    config = json.load(open(config_path, 'r'))
    config = edict(config)
    config.root = '/data/Augmented_ICL-NUIM'
    config.descriptor = 'fpfh'

    from models.PointDSC import PointDSC

    model = PointDSC(
        in_dim=config.in_dim,
        num_layers=config.num_layers,
        num_channels=config.num_channels,
        num_iterations=config.num_iterations,
        ratio=config.ratio,
        sigma_d=config.sigma_d,
        k=config.k,
        nms_radius=config.inlier_threshold,
    )
    miss = model.load_state_dict(
        torch.load(f'snapshot/{args.chosen_snapshot}/models/model_best.pkl'),
        strict=False)
    print(miss)
    model.eval()

    eval_redwood(model.cuda(), config, args)
Ejemplo n.º 4
0
def train_parallel(rank, world_size, seed, config):
    # This function is performed in parallel in several processes, one for each available GPU
    os.environ['MASTER_ADDR'] = 'localhost'
    os.environ['MASTER_PORT'] = '8882'
    dist.init_process_group(backend='nccl', world_size=world_size, rank=rank)
    torch.manual_seed(seed)
    np.random.seed(seed)
    torch.cuda.set_device(rank)
    device = 'cuda:%d' % torch.cuda.current_device()
    print("process %d, GPU: %s" % (rank, device))

    # create model
    config.model = PointDSC(
        in_dim=config.in_dim,
        num_layers=config.num_layers,
        num_channels=config.num_channels,
        num_iterations=config.num_iterations,
        inlier_threshold=config.inlier_threshold,
        sigma_d=config.sigma_d,
        ratio=config.ratio,
        k=config.k,
    )

    # create optimizer
    if config.optimizer == 'SGD':
        config.optimizer = optim.SGD(
            config.model.parameters(),
            lr=config.lr,
            momentum=config.momentum,
            weight_decay=config.weight_decay,
        )
    elif config.optimizer == 'ADAM':
        config.optimizer = optim.Adam(
            config.model.parameters(),
            lr=config.lr,
            betas=(0.9, 0.999),
            # momentum=config.momentum,
            weight_decay=config.weight_decay,
        )
    config.scheduler = optim.lr_scheduler.ExponentialLR(
        config.optimizer,
        gamma=config.scheduler_gamma,
    )

    # create dataset and dataloader
    DL_config = edict({
        'voxel_size': 0.3,
        'positive_pair_search_voxel_size_multiplier': 4,
        'use_random_rotation': False,
        'use_random_scale': False
    })
    config.train_loader = make_data_loader(config.dataset,
                                           DL_config,
                                           'train',
                                           config.batch_size,
                                           rank,
                                           world_size,
                                           seed,
                                           config.num_workers,
                                           shuffle=True)
    config.val_loader = make_data_loader(config.dataset,
                                         DL_config,
                                         'val',
                                         config.batch_size,
                                         rank,
                                         world_size,
                                         seed,
                                         config.num_workers,
                                         shuffle=False)

    config.train_feature_extractor = LidarFeatureExtractor(
        split='train',
        in_dim=config.in_dim,
        inlier_threshold=config.inlier_threshold,
        num_node=config.num_node,
        use_mutual=config.use_mutual,
        augment_axis=config.augment_axis,
        augment_rotation=config.augment_rotation,
        augment_translation=config.augment_translation,
        fcgf_weights_file=config.fcgf_weights_file)

    config.val_feature_extractor = LidarFeatureExtractor(
        split='val',
        in_dim=config.in_dim,
        inlier_threshold=config.inlier_threshold,
        num_node=config.num_node,
        use_mutual=config.use_mutual,
        augment_axis=0,
        augment_rotation=0.0,
        augment_translation=0.0,
        fcgf_weights_file=config.fcgf_weights_file)

    # create evaluation
    config.evaluate_metric = {
        "ClassificationLoss":
        ClassificationLoss(balanced=config.balanced),
        "SpectralMatchingLoss":
        SpectralMatchingLoss(balanced=config.balanced),
        "TransformationLoss":
        TransformationLoss(re_thre=config.re_thre, te_thre=config.te_thre),
    }
    config.metric_weight = {
        "ClassificationLoss": config.weight_classification,
        "SpectralMatchingLoss": config.weight_spectralmatching,
        "TransformationLoss": config.weight_transformation,
    }

    trainer = Trainer(config, rank)
    trainer.train()
Ejemplo n.º 5
0
    shutil.copy2(os.path.join('.', 'libs/trainer.py'), os.path.join(config.snapshot_dir, 'trainer.py'))
    shutil.copy2(os.path.join('.', 'models/PointDSC.py'), os.path.join(config.snapshot_dir, 'model.py'))  # for the model setting.
    shutil.copy2(os.path.join('.', 'libs/loss.py'), os.path.join(config.snapshot_dir, 'loss.py'))
    shutil.copy2(os.path.join('.', 'datasets/ThreeDMatch.py'), os.path.join(config.snapshot_dir, 'dataset.py'))
    json.dump(
        config,
        open(os.path.join(config.snapshot_dir, 'config.json'), 'w'),
        indent=4,
    )

    # create model 
    config.model = PointDSC(
        in_dim=config.in_dim,
        num_layers=config.num_layers, 
        num_channels=config.num_channels,
        num_iterations=config.num_iterations,
        inlier_threshold=config.inlier_threshold,
        sigma_d=config.sigma_d,
        ratio=config.ratio,
        k=config.k,
    )

    # create optimizer 
    if config.optimizer == 'SGD':
        config.optimizer = optim.SGD(
            config.model.parameters(), 
            lr=config.lr,
            momentum=config.momentum,
            weight_decay=config.weight_decay,
            )
    elif config.optimizer == 'ADAM':
        config.optimizer = optim.Adam(
Ejemplo n.º 6
0
    args = parser.parse_args()

    config_path = f'snapshot/{args.chosen_snapshot}/config.json'
    config = json.load(open(config_path, 'r'))
    config = edict(config)

    if args.use_gpu:
        device = torch.device('cuda')
    else:
        device = torch.device('cpu')

    model = PointDSC(
        in_dim=config.in_dim,
        num_layers=config.num_layers,
        num_channels=config.num_channels,
        num_iterations=config.num_iterations,
        ratio=config.ratio,
        sigma_d=config.sigma_d,
        k=config.k,
        nms_radius=config.inlier_threshold,
    ).to(device)
    miss = model.load_state_dict(torch.load(
        f'snapshot/{args.chosen_snapshot}/models/model_best.pkl',
        map_location=device),
                                 strict=False)
    print(miss)
    model.eval()

    # extract features
    if args.descriptor == 'fpfh':
        raw_src_pcd, src_pts, src_features = extract_fpfh_features(
            args.pcd1, config.downsample, device)