Example #1
0
def get_inference(model, point_cloud):
    if torch.cuda.is_available():
        device = 'cuda:0'
    else:
        device = 'cpu'

    if 'MinkUNet' in model:
        model = minkunet(model, pretrained=True)
    elif 'SPVCNN' in model:
        model = spvcnn(model, pretrained=True)
    elif 'SPVNAS' in model:
        model = spvnas_specialized(model, pretrained=True)
    else:
        raise NotImplementedError

    model = model.to(device)
    with torch.no_grad():
        feed_dict = process_point_cloud(point_cloud)
        inputs = feed_dict['lidar'].to(device)

        outputs = model(inputs)
        predictions = outputs.argmax(1).cpu().numpy()
        predictions = predictions[feed_dict['inverse_map']]

    return predictions.astype(np.uint8)
Example #2
0
 def __init__(self):
     # Model Loading
     rospy.loginfo("Model Loading!")
     model_name = rospy.get_param("~model_name")
     rospy.logdebug(f"Model {model_name} is loaded!!")
     global model
     if 'MinkUNet' in model_name:
         model = minkunet(model_name, pretrained=True)
     elif 'SPVCNN' in model_name:
         model = spvcnn(model_name, pretrained=True)
     elif 'SPVNAS' in model_name:
         model = spvnas_specialized(model_name, pretrained=True)
     else:
         raise NotImplementedError
     model = model.to(device)
Example #3
0
    parser.add_argument('--model',
                        type=str,
                        default='SemanticKITTI_val_SPVNAS@65GMACs')
    args = parser.parse_args()
    output_dir = os.path.join(args.velodyne_dir, 'outputs')
    os.makedirs(output_dir, exist_ok=True)

    if torch.cuda.is_available():
        device = 'cuda:0'
    else:
        device = 'cpu'

    if 'MinkUNet' in args.model:
        model = minkunet(args.model, pretrained=True)
    elif 'SPVCNN' in args.model:
        model = spvcnn(args.model, pretrained=True)
    elif 'SPVNAS' in args.model:
        model = spvnas_specialized(args.model, pretrained=True)
    else:
        raise NotImplementedError

    model = model.to(device)

    input_point_clouds = sorted(os.listdir(args.velodyne_dir))
    for point_cloud_name in input_point_clouds:
        if not point_cloud_name.endswith('.bin'):
            continue
        label_file_name = point_cloud_name.replace('.bin', '.label')
        vis_file_name = point_cloud_name.replace('.bin', '.png')
        gt_file_name = point_cloud_name.replace('.bin', '_GT.png')
Example #4
0
def main() -> None:
    dist.init()

    torch.backends.cudnn.benchmark = True
    torch.cuda.set_device(dist.local_rank())

    parser = argparse.ArgumentParser()
    parser.add_argument('config', metavar='FILE', help='config file')
    parser.add_argument('--run-dir', metavar='DIR', help='run directory')
    parser.add_argument('--name', type=str, help='model name')
    args, opts = parser.parse_known_args()

    configs.load(args.config, recursive=True)
    configs.update(opts)

    if args.run_dir is None:
        args.run_dir = auto_set_run_dir()
    else:
        set_run_dir(args.run_dir)

    logger.info(' '.join([sys.executable] + sys.argv))
    logger.info(f'Experiment started: "{args.run_dir}".' + '\n' + f'{configs}')

    dataset = builder.make_dataset()
    dataflow = dict()
    for split in dataset:
        sampler = torch.utils.data.distributed.DistributedSampler(
            dataset[split],
            num_replicas=dist.size(),
            rank=dist.rank(),
            shuffle=(split == 'train'))
        dataflow[split] = torch.utils.data.DataLoader(
            dataset[split],
            batch_size=configs.batch_size if split == 'train' else 1,
            sampler=sampler,
            num_workers=configs.workers_per_gpu,
            pin_memory=True,
            collate_fn=dataset[split].collate_fn)

    
    if 'spvnas' in args.name.lower():
        model = spvnas_specialized(args.name)
    elif 'spvcnn' in args.name.lower():
        model = spvcnn(args.name)
    elif 'mink' in args.name.lower():
        model = minkunet(args.name)
    else:
        raise NotImplementedError
    
    #model = builder.make_model()
    model = torch.nn.parallel.DistributedDataParallel(
        model.cuda(),
        device_ids=[dist.local_rank()],
        find_unused_parameters=True)
    model.eval()

    criterion = builder.make_criterion()
    optimizer = builder.make_optimizer(model)
    scheduler = builder.make_scheduler(optimizer)
    meter = MeanIoU(configs.data.num_classes, 
                    configs.data.ignore_label)

    trainer = SemanticKITTITrainer(model=model,
                          criterion=criterion,
                          optimizer=optimizer,
                          scheduler=scheduler,
                          num_workers=configs.workers_per_gpu,
                          seed=configs.train.seed
                          )
    callbacks=Callbacks([
                          SaverRestore(),
                          MeanIoU(
                              configs.data.num_classes, 
                              configs.data.ignore_label
                          )
                      ])
    callbacks._set_trainer(trainer)
    trainer.callbacks = callbacks
    trainer.dataflow = dataflow['test']
    
    
    trainer.before_train()
    trainer.before_epoch()
    
    # important
    model.eval()
    
    for feed_dict in tqdm(dataflow['test'], desc='eval'):
        _inputs = dict()
        for key, value in feed_dict.items():
            if not 'name' in key:
                _inputs[key] = value.cuda()

        inputs = _inputs['lidar']
        targets = feed_dict['targets'].F.long().cuda(non_blocking=True)
        outputs = model(inputs)
       
        invs = feed_dict['inverse_map']
        all_labels = feed_dict['targets_mapped']
        _outputs = []
        _targets = []
        for idx in range(invs.C[:, -1].max()+1):
            cur_scene_pts = (inputs.C[:, -1] == idx).cpu().numpy()
            cur_inv = invs.F[invs.C[:, -1] == idx].cpu().numpy()
            cur_label = (all_labels.C[:, -1] == idx).cpu().numpy()
            outputs_mapped = outputs[cur_scene_pts][
                cur_inv].argmax(1)
            targets_mapped = all_labels.F[cur_label]
            _outputs.append(outputs_mapped)
            _targets.append(targets_mapped)
        outputs = torch.cat(_outputs, 0)
        targets = torch.cat(_targets, 0)
        output_dict = {
            'outputs': outputs,
            'targets': targets
        }
        trainer.after_step(output_dict)
    
    trainer.after_epoch()