Ejemplo n.º 1
0
def main():
    args = parser.parse_args()
    with open(args.config) as f:
        config = yaml.load(f)
    print("\n**************************")
    for k, v in config['common'].items():
        setattr(args, k, v)
        print('\n[%s]:'%(k), v)
    print("\n**************************\n")
    
    try:
        os.makedirs(args.save_path)
    except OSError:
        pass
    
    train_transforms = transforms.Compose([
        d_utils.PointcloudToTensor()
    ])
    test_transforms = transforms.Compose([
        d_utils.PointcloudToTensor()
    ])
    
    train_dataset = ShapeNetPart(root = args.data_root, num_points = args.num_points, split = 'trainval', normalize = True, transforms = train_transforms)
    train_dataloader = DataLoader(
        train_dataset, 
        batch_size=args.batch_size,
        shuffle=True, 
        num_workers=int(args.workers), 
        pin_memory=True
    )
    
    global test_dataset
    test_dataset = ShapeNetPart(root = args.data_root, num_points = args.num_points, split = 'test', normalize = True, transforms = test_transforms)
    test_dataloader = DataLoader(
        test_dataset, 
        batch_size=args.batch_size,
        shuffle=False, 
        num_workers=int(args.workers), 
        pin_memory=True
    )
    
    model = RSCNN_MSN(num_classes = args.num_classes, input_channels = args.input_channels, relation_prior = args.relation_prior, use_xyz = True)
    model.cuda()
    optimizer = optim.Adam(
        model.parameters(), lr=args.base_lr, weight_decay=args.weight_decay)

    lr_lbmd = lambda e: max(args.lr_decay**(e // args.decay_step), args.lr_clip / args.base_lr)
    bnm_lmbd = lambda e: max(args.bn_momentum * args.bn_decay**(e // args.decay_step), args.bnm_clip)
    lr_scheduler = lr_sched.LambdaLR(optimizer, lr_lbmd)
    bnm_scheduler = pt_utils.BNMomentumScheduler(model, bnm_lmbd)
    
    if args.checkpoint is not '':
        model.load_state_dict(torch.load(args.checkpoint))
        print('Load model successfully: %s' % (args.checkpoint))

    criterion = nn.CrossEntropyLoss()
    num_batch = len(train_dataset)/args.batch_size
    
    # training
    train(train_dataloader, test_dataloader, model, criterion, optimizer, lr_scheduler, bnm_scheduler, args, num_batch)
Ejemplo n.º 2
0
def main():
    args = parser.parse_args()
    with open(args.config) as f:
        config = yaml.load(f)
    for k, v in config['common'].items():
        setattr(args, k, v)

    test_transforms = transforms.Compose([d_utils.PointcloudToTensor()])

    test_dataset = ModelNet40Cls(num_points=args.num_points,
                                 root=args.data_root,
                                 transforms=test_transforms,
                                 train=False)
    test_dataloader = DataLoader(test_dataset,
                                 batch_size=args.batch_size,
                                 shuffle=False,
                                 num_workers=int(args.workers),
                                 pin_memory=True)

    model = DensePoint(num_classes=args.num_classes,
                       input_channels=args.input_channels,
                       use_xyz=True)
    model.cuda()

    if args.checkpoint is not '':
        model.load_state_dict(torch.load(args.checkpoint))
        print('Load model successfully: %s' % (args.checkpoint))

    # evaluate
    PointcloudScale = d_utils.PointcloudScale()  # initialize random scaling
    model.eval()
    global_acc = 0
    for i in range(NUM_REPEAT):
        preds = []
        labels = []

        s = time.time()
        for j, data in enumerate(test_dataloader, 0):
            points, target = data
            points, target = points.cuda(), target.cuda()
            points, target = Variable(points,
                                      volatile=True), Variable(target,
                                                               volatile=True)
            # points [batch_size, num_points, dimensions], e.g., [256, 2048, 3]

            # furthest point sampling
            # fps_idx = pointnet2_utils.furthest_point_sample(points, 1200)  # (B, npoint)

            # random sampling
            fps_idx = np.random.randint(0,
                                        points.shape[1] - 1,
                                        size=[points.shape[0], 1200])
            fps_idx = torch.from_numpy(fps_idx).type(torch.IntTensor).cuda()

            pred = 0
            for v in range(NUM_VOTE):
                new_fps_idx = fps_idx[:,
                                      np.random.choice(1200, args.
                                                       num_points, False)]
                new_points = pointnet2_utils.gather_operation(
                    points.transpose(1, 2).contiguous(),
                    new_fps_idx).transpose(1, 2).contiguous()
                if v > 0:
                    new_points.data = PointcloudScale(new_points.data)
                pred += F.softmax(model(new_points), dim=1)
            pred /= NUM_VOTE
            target = target.view(-1)
            _, pred_choice = torch.max(pred.data, -1)

            preds.append(pred_choice)
            labels.append(target.data)
        e = time.time()

        preds = torch.cat(preds, 0)
        labels = torch.cat(labels, 0)
        acc = (preds == labels).sum() / labels.numel()
        if acc > global_acc:
            global_acc = acc
        print('Repeat %3d \t Acc: %0.6f' % (i + 1, acc))
        print('time (secs) for 1 epoch: ', (e - s))
    print('\nBest voting acc: %0.6f' % (global_acc))
def main():
    args = parser.parse_args()
    with open(args.config) as f:
        config = yaml.load(f)
    for k, v in config['common'].items():
        setattr(args, k, v)
    
    test_transforms = transforms.Compose([
        d_utils.PointcloudToTensor()
    ])
    
    test_dataset = ShapeNetPart(root = args.data_root, num_points = args.num_points, split = 'test', normalize = True, transforms = test_transforms)
    test_dataloader = DataLoader(
        test_dataset, 
        batch_size=args.batch_size,
        shuffle=False, 
        num_workers=int(args.workers), 
        pin_memory=True
    )
    
    model = RSCNN_MSN(num_classes = args.num_classes, input_channels = args.input_channels, relation_prior = args.relation_prior, use_xyz = True)
    model.cuda()

    if args.checkpoint is not '':
        model.load_state_dict(torch.load(args.checkpoint))
        print('Load model successfully: %s' % (args.checkpoint))

    # evaluate
    PointcloudScale = d_utils.PointcloudScale(scale_low=0.87, scale_high=1.15)   # initialize random scaling
    model.eval()
    global_Class_mIoU, global_Inst_mIoU = 0, 0
    seg_classes = test_dataset.seg_classes
    seg_label_to_cat = {}           # {0:Airplane, 1:Airplane, ...49:Table}
    for cat in seg_classes.keys():
        for label in seg_classes[cat]:
            seg_label_to_cat[label] = cat
    
    for i in range(NUM_REPEAT):
        shape_ious = {cat:[] for cat in seg_classes.keys()}
        for _, data in enumerate(test_dataloader, 0):
            points, target, cls = data
            points, target = Variable(points, volatile=True), Variable(target, volatile=True)
            points, target = points.cuda(), target.cuda()

            batch_one_hot_cls = np.zeros((len(cls), 16))   # 16 object classes
            for b in range(len(cls)):
                batch_one_hot_cls[b, int(cls[b])] = 1
            batch_one_hot_cls = torch.from_numpy(batch_one_hot_cls)
            batch_one_hot_cls = Variable(batch_one_hot_cls.float().cuda())

            pred = 0
            new_points = Variable(torch.zeros(points.size()[0], points.size()[1], points.size()[2]).cuda(), volatile=True)
            for v in range(NUM_VOTE):
                if v > 0:
                    new_points.data = PointcloudScale(points.data)
                pred += F.softmax(model(new_points, batch_one_hot_cls), dim = 2)
            pred /= NUM_VOTE
            
            pred = pred.data.cpu()
            target = target.data.cpu()
            pred_val = torch.zeros(len(cls), args.num_points).type(torch.LongTensor)
            # pred to the groundtruth classes (selected by seg_classes[cat])
            for b in range(len(cls)):
                cat = seg_label_to_cat[target[b, 0]]
                logits = pred[b, :, :]   # (num_points, num_classes)
                pred_val[b, :] = logits[:, seg_classes[cat]].max(1)[1] + seg_classes[cat][0]
            
            for b in range(len(cls)):
                segp = pred_val[b, :]
                segl = target[b, :]
                cat = seg_label_to_cat[segl[0]]
                part_ious = [0.0 for _ in range(len(seg_classes[cat]))]
                for l in seg_classes[cat]:
                    if torch.sum((segl == l) | (segp == l)) == 0:
                        # part is not present in this shape
                        part_ious[l - seg_classes[cat][0]] = 1.0
                    else:
                        part_ious[l - seg_classes[cat][0]] = torch.sum((segl == l) & (segp == l)) / float(torch.sum((segl == l) | (segp == l)))
                shape_ious[cat].append(np.mean(part_ious))
        
        instance_ious = []
        for cat in shape_ious.keys():
            for iou in shape_ious[cat]:
                instance_ious.append(iou)
            shape_ious[cat] = np.mean(shape_ious[cat])
        mean_class_ious = np.mean(list(shape_ious.values()))
        
        print('\n------ Repeat %3d ------' % (i + 1))
        for cat in sorted(shape_ious.keys()):
            print('%s: %0.6f'%(cat, shape_ious[cat]))
        print('Class_mIoU: %0.6f' % (mean_class_ious))
        print('Instance_mIoU: %0.6f' % (np.mean(instance_ious)))

        if mean_class_ious > global_Class_mIoU:
            global_Class_mIoU = mean_class_ious
            global_Inst_mIoU = np.mean(instance_ious)
                
    print('\nBest voting Class_mIoU = %0.6f, Instance_mIoU = %0.6f' % (global_Class_mIoU, global_Inst_mIoU))
Ejemplo n.º 4
0
def main():
    #    os.system('cp models/rscnn_ssn_cls.py cls/') # bkp of train procedure
    #    os.system('cp utils/pointnet2_utils.py cls/')
    #    os.system('cp utils/pointnet2_modules.py cls/')
    #    os.system('cp utils/csrc/ellipsoid_query_gpu.cu cls/')
    #    os.system('cp utils/csrc/ellipsoid_query.c cls/')
    #    os.system('cp cfgs/config_ssn_cls.yaml cls/')
    args = parser.parse_args()
    with open(args.config) as f:
        config = yaml.load(f)
    print("\n**************************")
    for k, v in config['common'].items():
        setattr(args, k, v)
        print('\n[%s]:' % (k), v)
    print("\n**************************\n")

    try:
        os.makedirs(args.save_path)
    except OSError:
        pass

    train_transforms = transforms.Compose([d_utils.PointcloudToTensor()])
    test_transforms = transforms.Compose([d_utils.PointcloudToTensor()])

    train_dataset = ModelNet40Cls(num_points=args.num_points,
                                  root=args.data_root,
                                  transforms=train_transforms)
    train_dataloader = DataLoader(train_dataset,
                                  batch_size=args.batch_size,
                                  shuffle=True,
                                  num_workers=int(args.workers),
                                  pin_memory=True)

    test_dataset = ModelNet40Cls(num_points=args.num_points,
                                 root=args.data_root,
                                 transforms=test_transforms,
                                 train=False)
    test_dataloader = DataLoader(test_dataset,
                                 batch_size=args.batch_size,
                                 shuffle=False,
                                 num_workers=int(args.workers),
                                 pin_memory=True)

    model = RSCNN_SSN(num_classes=args.num_classes,
                      input_channels=args.input_channels,
                      relation_prior=args.relation_prior,
                      use_xyz=True)
    model.cuda()
    optimizer = optim.Adam(model.parameters(),
                           lr=args.base_lr,
                           weight_decay=args.weight_decay)

    lr_lbmd = lambda e: max(args.lr_decay**(e // args.decay_step), args.lr_clip
                            / args.base_lr)
    bnm_lmbd = lambda e: max(
        args.bn_momentum * args.bn_decay**
        (e // args.decay_step), args.bnm_clip)
    lr_scheduler = lr_sched.LambdaLR(optimizer, lr_lbmd)
    bnm_scheduler = pt_utils.BNMomentumScheduler(model, bnm_lmbd)

    if args.checkpoint is not '':
        model.load_state_dict(torch.load(args.checkpoint))
        print('Load model successfully: %s' % (args.checkpoint))

    criterion = nn.CrossEntropyLoss()
    num_batch = len(train_dataset) / args.batch_size

    # training
    train(train_dataloader, test_dataloader, model, criterion, optimizer,
          lr_scheduler, bnm_scheduler, args, num_batch)
Ejemplo n.º 5
0
                        help="Name for run in tensorboard_logger")
    parser.add_argument('--visdom-port', type=int, default=8097)

    return parser.parse_args()


lr_clip = 1e-5
bnm_clip = 1e-2

if __name__ == "__main__":
    args = parse_args()

    BASE_DIR = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'data')

    transforms = transforms.Compose([
        d_utils.PointcloudToTensor(),
        d_utils.PointcloudScale(),
        d_utils.PointcloudRotate(),
        d_utils.PointcloudRotatePerturbation(),
        d_utils.PointcloudTranslate(),
        d_utils.PointcloudJitter(),
        d_utils.PointcloudRandomInputDropout()
    ])

    test_set = ModelNet40Cls(args.num_points,
                             BASE_DIR,
                             transforms=transforms,
                             train=False)
    test_loader = DataLoader(test_set,
                             batch_size=args.batch_size,
                             shuffle=True,
def main():
    args = parser.parse_args()
    with open(args.config) as f:
        config = yaml.load(f)
    for k, v in config['common'].items():
        setattr(args, k, v)

    test_transforms = transforms.Compose([d_utils.PointcloudToTensor()])

    test_dataset = ModelNet40Cls(num_points=args.num_points,
                                 root=args.data_root,
                                 transforms=test_transforms,
                                 train=False)
    test_dataloader = DataLoader(test_dataset,
                                 batch_size=args.batch_size,
                                 shuffle=False,
                                 num_workers=int(args.workers),
                                 pin_memory=False)

    model = RSCNN_SSN(num_classes=args.num_classes,
                      input_channels=args.input_channels,
                      relation_prior=args.relation_prior,
                      use_xyz=True)
    # for multi GPU
    device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
    if torch.cuda.is_available() and torch.cuda.device_count() >= 2:
        model = nn.DataParallel(model, device_ids=[0, 1])
        model.to(device)
    elif torch.cuda.is_available() and torch.cuda.device_count() == 1:
        model.cuda()

    if args.checkpoint is not '':
        model.load_state_dict(torch.load(args.checkpoint))
        print('Load model successfully: %s' % (args.checkpoint))

    # evaluate
    PointcloudScale = d_utils.PointcloudScale()  # initialize random scaling
    model.eval()
    global_acc = 0
    for i in range(NUM_REPEAT):
        preds = []
        labels = []
        for j, data in enumerate(test_dataloader, 0):
            points, target = data
            points, target = points.cuda(), target.cuda()
            points, target = Variable(points,
                                      volatile=True), Variable(target,
                                                               volatile=True)

            # fastest point sampling
            fps_idx = pointnet2_utils.furthest_point_sample(
                points, 1200)  # (B, npoint)
            pred = 0
            for v in range(NUM_VOTE):
                new_fps_idx = fps_idx[:,
                                      np.random.choice(1200, args.
                                                       num_points, False)]
                new_points = pointnet2_utils.gather_operation(
                    points.transpose(1, 2).contiguous(),
                    new_fps_idx).transpose(1, 2).contiguous()
                if v > 0:
                    new_points.data = PointcloudScale(new_points.data)
                pred += F.softmax(model(new_points), dim=1)
            pred /= NUM_VOTE
            target = target.view(-1)
            _, pred_choice = torch.max(pred.data, -1)

            preds.append(pred_choice)
            labels.append(target.data)

        preds = torch.cat(preds, 0)
        labels = torch.cat(labels, 0)
        acc = (preds == labels).sum() / labels.numel()
        if acc > global_acc:
            global_acc = acc
        print('Repeat %3d \t Acc: %0.6f' % (i + 1, acc))
    print('\nBest voting acc: %0.6f' % (global_acc))
Ejemplo n.º 7
0
def main():
    global svm_best_acc40
    svm_best_acc40 = 0
    args = parser.parse_args()
    with open(args.config) as f:
        config = yaml.load(f)
    print("\n**************************")
    for k, v in config['common'].items():
        setattr(args, k, v)
        print('\n[%s]:' % (k), v)
    print("\n**************************\n")

    os.makedirs('./ckpts/', exist_ok=True)

    # dataset
    train_transforms = transforms.Compose([d_utils.PointcloudToTensor()])
    test_transforms = transforms.Compose([d_utils.PointcloudToTensor()])

    ss_dataset = ModelNetCls(transforms=train_transforms,
                             self_supervision=True,
                             use_normal=True,
                             dataset_rate=1)
    ss_dataloader = DataLoader(ss_dataset,
                               batch_size=args.batch_size,
                               shuffle=True,
                               num_workers=int(args.workers),
                               pin_memory=True,
                               worker_init_fn=worker_init_fn)

    if args.dataset == 'modelnet':
        train_dataset = ModelNetCls(transforms=train_transforms,
                                    self_supervision=False,
                                    train=True)
        train_dataloader = DataLoader(train_dataset,
                                      batch_size=args.batch_size,
                                      shuffle=True,
                                      num_workers=int(args.workers),
                                      pin_memory=True,
                                      worker_init_fn=worker_init_fn)

        test_dataset = ModelNetCls(transforms=test_transforms,
                                   self_supervision=False,
                                   train=False)
        test_dataloader = DataLoader(test_dataset,
                                     batch_size=args.batch_size,
                                     shuffle=False,
                                     num_workers=int(args.workers),
                                     pin_memory=True)
    elif args.dataset == 'scannet':
        train_dataset = ScanNetCls(transforms=train_transforms,
                                   self_supervision=False,
                                   train=True)
        train_dataloader = DataLoader(train_dataset,
                                      batch_size=args.batch_size,
                                      shuffle=True,
                                      num_workers=int(args.workers),
                                      pin_memory=True,
                                      worker_init_fn=worker_init_fn)

        test_dataset = ScanNetCls(transforms=test_transforms,
                                  self_supervision=False,
                                  train=False)
        test_dataloader = DataLoader(test_dataset,
                                     batch_size=args.batch_size,
                                     shuffle=False,
                                     num_workers=int(args.workers),
                                     pin_memory=True)
    elif args.dataset == 'scanobjectnn':
        train_dataset = ScanObjectNNCls(transforms=train_transforms,
                                        self_supervision=False,
                                        train=True)
        train_dataloader = DataLoader(train_dataset,
                                      batch_size=args.batch_size,
                                      shuffle=True,
                                      num_workers=int(args.workers),
                                      pin_memory=True,
                                      worker_init_fn=worker_init_fn)
        test_dataset = ScanObjectNNCls(transforms=test_transforms,
                                       self_supervision=False,
                                       train=False)
        test_dataloader = DataLoader(test_dataset,
                                     batch_size=args.batch_size,
                                     shuffle=False,
                                     num_workers=int(args.workers),
                                     pin_memory=True)
    else:
        raise NotImplementedError

    # models
    n_rkhs = 512

    if args.arch == 'pointnet2':
        encoder = PointNet2(n_rkhs=n_rkhs,
                            input_channels=args.input_channels,
                            use_xyz=True,
                            point_wise_out=True,
                            multi=args.multiplier)
        print('Using PointNet++ backbone')
    elif args.arch == 'rscnn':
        encoder = RSCNN_SSN(n_rkhs=n_rkhs,
                            input_channels=args.input_channels,
                            relation_prior=args.relation_prior,
                            use_xyz=True,
                            point_wise_out=True,
                            multi=args.multiplier)
        print('Using RSCNN backbone')
    else:
        raise NotImplementedError

    encoder = nn.DataParallel(encoder).cuda()
    decoer = FoldingNet(in_channel=n_rkhs * 3)
    decoer = nn.DataParallel(decoer).cuda()

    # optimizer
    optimizer = optim.Adam(list(encoder.parameters()) +
                           list(decoer.parameters()),
                           lr=args.base_lr,
                           weight_decay=args.weight_decay)

    # resume
    begin_epoch = -1
    checkpoint_name = './ckpts/' + args.name + '.pth'
    if os.path.isfile(checkpoint_name):
        checkpoint = torch.load(checkpoint_name)
        encoder.load_state_dict(checkpoint['encoder_state_dict'])
        decoer.load_state_dict(checkpoint['decoder_state_dict'])
        optimizer.load_state_dict(checkpoint['optimizer_state_dict'])
        svm_best_acc40 = checkpoint['svm_best_acc40']
        begin_epoch = checkpoint['epoch'] - 1
        print("-> loaded checkpoint %s (epoch: %d)" %
              (checkpoint_name, begin_epoch))

    lr_lbmd = lambda e: max(args.lr_decay**(e // args.decay_step), args.lr_clip
                            / args.base_lr)
    bnm_lmbd = lambda e: max(
        args.bn_momentum * args.bn_decay**
        (e // args.decay_step), args.bnm_clip)
    lr_scheduler = lr_sched.LambdaLR(optimizer,
                                     lr_lbmd,
                                     last_epoch=begin_epoch)
    bnm_scheduler = pt_utils.BNMomentumScheduler(encoder,
                                                 bnm_lmbd,
                                                 last_epoch=begin_epoch)

    num_batch = len(ss_dataset) / args.batch_size

    args.val_freq_epoch = 1.0

    # training & evaluation
    train(ss_dataloader, train_dataloader, test_dataloader, encoder, decoer,
          optimizer, lr_scheduler, bnm_scheduler, args, num_batch, begin_epoch)
Ejemplo n.º 8
0
def main():
    args = parser.parse_args()
    with open(args.config) as f:
        config = yaml.load(f)
    print("\n**************************")
    for k, v in config['common'].items():
        setattr(args, k, v)
        print('\n[%s]:'%(k), v)
    print("\n**************************\n")
    
    try:
        os.makedirs(args.save_path)
    except OSError:
        pass
    
    train_transforms = transforms.Compose([
        d_utils.PointcloudToTensor(),
        d_utils.PointcloudScaleAndTranslate(),
        d_utils.PointcloudRandomInputDropout()
    ])
    test_transforms = transforms.Compose([
        d_utils.PointcloudToTensor(),
        #d_utils.PointcloudScaleAndTranslate()
    ])
    
    train_dataset = ModelNet40Cls(num_points = args.num_points, root = args.data_root, transforms=train_transforms)
    train_dataloader = DataLoader(
        train_dataset, 
        batch_size=args.batch_size,
        shuffle=True, 
        num_workers=int(args.workers)
    )

    test_dataset = ModelNet40Cls(num_points = args.num_points, root = args.data_root, transforms=test_transforms, train=False)
    test_dataloader = DataLoader(
        test_dataset, 
        batch_size=args.batch_size,
        shuffle=False, 
        num_workers=int(args.workers)
    )
    
    model = RSCNN_SSN(num_classes = args.num_classes, input_channels = args.input_channels, relation_prior = args.relation_prior, use_xyz = True)
    # for multi GPU
    device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
    if torch.cuda.is_available() and torch.cuda.device_count()>=2:
        model = nn.DataParallel(model, device_ids=[0, 1])
        model.to(device)
    elif  torch.cuda.is_available() and torch.cuda.device_count()==1:
        model.cuda()

    optimizer = optim.Adam(
        model.parameters(), lr=args.base_lr, weight_decay=args.weight_decay)

    lr_lbmd = lambda e: max(args.lr_decay**(e // args.decay_step), args.lr_clip / args.base_lr)
    bnm_lmbd = lambda e: max(args.bn_momentum * args.bn_decay**(e // args.decay_step), args.bnm_clip)
    lr_scheduler = lr_sched.LambdaLR(optimizer, lr_lbmd)
    bnm_scheduler = pt_utils.BNMomentumScheduler(model, bnm_lmbd)
    
    if args.checkpoint is not '':
        model.load_state_dict(torch.load(args.checkpoint))
        print('Load model successfully: %s' % (args.checkpoint))

    criterion = nn.CrossEntropyLoss()
    num_batch = len(train_dataset)/args.batch_size
    
    # training
    train(train_dataloader, test_dataloader, model, criterion, optimizer, lr_scheduler, bnm_scheduler, args, num_batch)
Ejemplo n.º 9
0
def main():
    global logger

    args = parser.parse_args()
    with open(args.config) as f:
        config = yaml.safe_load(f)
    for k, v in config['common'].items():
        setattr(args, k, v)

    output_dir = args.save_path
    if output_dir:
        import time
        msg = 'init_train'
        output_dir = os.path.join(
            output_dir, "train_{}_{}".format(time.strftime("%m_%d_%H_%M_%S"),
                                             msg))
        os.makedirs(output_dir)

    logger = get_logger("RS-CNN", output_dir, prefix="train")
    logger.info("Running with config:\n{}".format(args))

    train_transforms = transforms.Compose([d_utils.PointcloudToTensor()])
    test_transforms = transforms.Compose([d_utils.PointcloudToTensor()])

    train_dataset = ModelNet40Cls(num_points=args.num_points,
                                  root=args.data_root,
                                  transforms=train_transforms)
    train_dataloader = DataLoader(train_dataset,
                                  batch_size=args.batch_size,
                                  shuffle=True,
                                  num_workers=int(args.workers),
                                  pin_memory=True)

    test_dataset = ModelNet40Cls(num_points=args.num_points,
                                 root=args.data_root,
                                 transforms=test_transforms,
                                 train=False)
    test_dataloader = DataLoader(test_dataset,
                                 batch_size=args.batch_size,
                                 shuffle=False,
                                 num_workers=int(args.workers),
                                 pin_memory=True)

    model = RSCNN_SSN(num_classes=args.num_classes,
                      input_channels=args.input_channels,
                      relation_prior=args.relation_prior,
                      use_xyz=True)
    model.cuda()
    optimizer = optim.Adam(model.parameters(),
                           lr=args.base_lr,
                           weight_decay=args.weight_decay)

    lr_lbmd = lambda e: max(args.lr_decay**(e // args.decay_step), args.lr_clip
                            / args.base_lr)
    bnm_lmbd = lambda e: max(
        args.bn_momentum * args.bn_decay**
        (e // args.decay_step), args.bnm_clip)
    lr_scheduler = lr_sched.LambdaLR(optimizer, lr_lbmd)
    bnm_scheduler = pt_utils.BNMomentumScheduler(model, bnm_lmbd)

    if args.checkpoint is not '':
        model.load_state_dict(torch.load(args.checkpoint))
        logger.info('Load model successfully: %s' % (args.checkpoint))

    criterion = nn.CrossEntropyLoss()
    num_batch = len(train_dataset) / args.batch_size

    # training
    train(train_dataloader, test_dataloader, model, criterion, optimizer,
          lr_scheduler, bnm_scheduler, args, num_batch)
Ejemplo n.º 10
0
def main():
    args = parser.parse_args()
    with open(args.config) as f:
        config = yaml.load(f)
    for k, v in config['common'].items():
        setattr(args, k, v)

    test_transforms = transforms.Compose([d_utils.PointcloudToTensor()])

    test_dataset = Bosphorus_eval(num_points=args.num_points,
                                  root=args.data_root,
                                  transforms=test_transforms)
    test_dataloader = DataLoader(test_dataset,
                                 batch_size=args.batch_size,
                                 shuffle=False,
                                 num_workers=int(args.workers))

    model = RSCNN_SSN(num_classes=args.num_classes,
                      input_channels=args.input_channels,
                      relation_prior=args.relation_prior,
                      use_xyz=True)
    model.cuda()

    if args.checkpoint is not '':
        model.load_state_dict(torch.load(args.checkpoint))
        print('Load model successfully: %s' % (args.checkpoint))

    # model is used for feature extraction, so no need FC layers
    model.FC_layer = nn.Linear(1024, 1024, bias=False).cuda()
    for para in model.parameters():
        para.requires_grad = False
    nn.init.eye_(model.FC_layer.weight)

    # evaluate
    #PointcloudScale = d_utils.PointcloudScale()   # initialize random scaling
    model.eval()
    global_acc = 0
    with torch.no_grad():
        Total_samples = 0
        Correct = 0
        gallery_points, gallery_labels = test_dataset.get_gallery()
        gallery_points, gallery_labels = gallery_points.cuda(
        ), gallery_labels.cuda()
        gallery_points = Variable(gallery_points)
        gallery_pred = model(gallery_points)
        print(gallery_pred.size())
        gallery_pred = F.normalize(gallery_pred)

        for j, data in enumerate(test_dataloader, 0):
            probe_points, probe_labels = data
            probe_points, probe_labels = probe_points.cuda(
            ), probe_labels.cuda()
            probe_points = Variable(probe_points)

            # get feature vetor for probe and gallery set from model
            probe_pred = model(probe_points)
            probe_pred = F.normalize(probe_pred)

            # make tensor to size (probe_num, gallery_num, C)
            probe_tmp = probe_pred.unsqueeze(1).expand(probe_pred.shape[0],
                                                       gallery_pred.shape[0],
                                                       probe_pred.shape[1])
            gallery_tmp = gallery_pred.unsqueeze(0).expand(
                probe_pred.shape[0], gallery_pred.shape[0],
                gallery_pred.shape[1])
            results = torch.sum(torch.mul(probe_tmp, gallery_tmp),
                                dim=2)  # cosine distance
            results = torch.argmax(results, dim=1)

            Total_samples += probe_points.shape[0]
            for i in np.arange(0, results.shape[0]):
                if gallery_labels[results[i]] == probe_labels[i]:
                    Correct += 1
        print('Total_samples:{}'.format(Total_samples))
        acc = float(Correct / Total_samples)
        if acc > global_acc:
            global_acc = acc
        print('Repeat %3d \t Acc: %0.6f' % (i + 1, acc))
    print('\nBest voting acc: %0.6f' % (global_acc))