Ejemplo n.º 1
0
    ###########################
    # Load the model parameters
    ###########################

    config = ThreeDMatchConfig()

    ##############
    # Prepare Data
    ##############

    print()
    print('Dataset Preparation')
    print('*******************')

    # Initiate dataset configuration
    dataset = ThreeDMatchDataset(config.input_threads,
                                 voxel_size=config.first_subsampling_dl)

    # Create subsampled input clouds
    dl0 = config.first_subsampling_dl
    # dataset.load_subsampled_clouds(dl0)

    # Initialize input pipelines
    dataset.init_input_pipeline(config)

    # Test the input pipeline alone with this debug function
    # dataset.check_input_pipeline_timing(config)

    ##############
    # Define Model
    ##############
Ejemplo n.º 2
0
def test_caller(path, step_ind, on_val):

    # Disable warnings
    os.environ['TF_CPP_MIN_LOG_LEVEL'] = '0'

    ###########################
    # Load the model parameters
    ###########################

    # Load model parameters
    config = Config()
    config.load(path)

    ##################################
    # Change model parameters for test
    ##################################

    # Change parameters for the test here. For example, you can stop augmenting the input data.

    #config.augment_noise = 0.0001
    #config.augment_color = 1.0
    #config.validation_size = 500
    #config.batch_num = 10

    ##############
    # Prepare Data
    ##############

    print()
    print('Dataset Preparation')
    print('*******************')

    # Initiate dataset configuration
    dataset = ThreeDMatchDataset(1, load_test=True)

    # Initialize input pipelines
    dataset.init_test_input_pipeline(config)

    ##############
    # Define Model
    ##############

    print('Creating Model')
    print('**************\n')
    t1 = time.time()

    model = KernelPointFCNN(dataset.flat_inputs, config)

    # Find all snapshot in the chosen training folder
    snap_path = os.path.join(path, 'snapshots')
    snap_steps = [
        int(f[:-5].split('-')[-1]) for f in os.listdir(snap_path)
        if f[-5:] == '.meta'
    ]

    # Find which snapshot to restore
    chosen_step = np.sort(snap_steps)[step_ind]
    chosen_snap = os.path.join(path, 'snapshots',
                               'snap-{:d}'.format(chosen_step))

    # Create a tester class
    tester = ModelTester(model, restore_snap=chosen_snap)
    t2 = time.time()

    print('\n----------------')
    print('Done in {:.1f} s'.format(t2 - t1))
    print('----------------\n')

    ############
    # Start test
    ############

    print('Start Test')
    print('**********\n')

    tester.generate_descriptor(model, dataset)
Ejemplo n.º 3
0
    config_path = f'snapshot/{chosen_snap}/config.json'
    config = json.load(open(config_path, 'r'))
    config = edict(config)
    config.architecture = [
        'simple',
        'resnetb',
    ]
    for i in range(config.num_layers - 1):
        config.architecture.append('resnetb_strided')
        config.architecture.append('resnetb')
        config.architecture.append('resnetb')
    for i in range(config.num_layers - 1):
        config.architecture.append('nearest_upsample')
        config.architecture.append('unary')
    dset = ThreeDMatchDataset(root='/ssd2/xuyang/3DMatch',
                              downsample=0.05,
                              config=config)

    dataloader, neighborhood_limits = get_dataloader(dset,
                                                     batch_size=1,
                                                     num_workers=10)
    import pdb
    pdb.set_trace()
    import time
    s_time = time.time()
    for iter, inputs in enumerate(dataloader):
        print(iter)
        if iter == 1000:
            break
    e_time = time.time()
    print(f"{e_time - s_time:.2f}s")
Ejemplo n.º 4
0
         # momentum=config.momentum,
         weight_decay=config.weight_decay,
     )
 
 config.scheduler = optim.lr_scheduler.ExponentialLR(
     config.optimizer,
     gamma=config.scheduler_gamma,
 )
 
 # create dataset and dataloader
 train_set = ThreeDMatchDataset(root=config.root,
                                     split='train',
                                     downsample=config.downsample,
                                     self_augment=config.self_augment,
                                     num_node=config.num_node,
                                     augment_noise=config.augment_noise,
                                     augment_axis=config.augment_axis, 
                                     augment_rotation=config.augment_rotation,
                                     augment_translation=config.augment_translation,
                                     config=config,
                                     )
 val_set = ThreeDMatchDataset(root=config.root,
                                 split='val',
                                 num_node=64,
                                 downsample=config.downsample,
                                 self_augment=config.self_augment,
                                 augment_noise=config.augment_noise,
                                 augment_axis=config.augment_axis, 
                                 augment_rotation=config.augment_rotation,
                                 augment_translation=config.augment_translation,
                                 config=config,