def main():
    args = parser.parse_args()
    with open(args.config) as f:
        config = yaml.load(f)
    print("\n**************************")
    for k, v in config['common'].items():
        setattr(args, k, v)
        print('\n[%s]:'%(k), v)
    print("\n**************************\n")
    
    try:
        os.makedirs(args.save_path)
    except OSError:
        pass
    
    train_transforms = transforms.Compose([
        d_utils.PointcloudToTensor()
    ])
    test_transforms = transforms.Compose([
        d_utils.PointcloudToTensor()
    ])
    
    train_dataset = ShapeNetPart(root = args.data_root, num_points = args.num_points, split = 'trainval', normalize = True, transforms = train_transforms)
    train_dataloader = DataLoader(
        train_dataset, 
        batch_size=args.batch_size,
        shuffle=True, 
        num_workers=int(args.workers), 
        pin_memory=True
    )
    
    global test_dataset
    test_dataset = ShapeNetPart(root = args.data_root, num_points = args.num_points, split = 'test', normalize = True, transforms = test_transforms)
    test_dataloader = DataLoader(
        test_dataset, 
        batch_size=args.batch_size,
        shuffle=False, 
        num_workers=int(args.workers), 
        pin_memory=True
    )
    
    model = RSCNN_MSN(num_classes = args.num_classes, input_channels = args.input_channels, relation_prior = args.relation_prior, use_xyz = True)
    model.cuda()
    optimizer = optim.Adam(
        model.parameters(), lr=args.base_lr, weight_decay=args.weight_decay)

    lr_lbmd = lambda e: max(args.lr_decay**(e // args.decay_step), args.lr_clip / args.base_lr)
    bnm_lmbd = lambda e: max(args.bn_momentum * args.bn_decay**(e // args.decay_step), args.bnm_clip)
    lr_scheduler = lr_sched.LambdaLR(optimizer, lr_lbmd)
    bnm_scheduler = pt_utils.BNMomentumScheduler(model, bnm_lmbd)
    
    if args.checkpoint is not '':
        model.load_state_dict(torch.load(args.checkpoint))
        print('Load model successfully: %s' % (args.checkpoint))

    criterion = nn.CrossEntropyLoss()
    num_batch = len(train_dataset)/args.batch_size
    
    # training
    train(train_dataloader, test_dataloader, model, criterion, optimizer, lr_scheduler, bnm_scheduler, args, num_batch)
Exemple #2
0
def main():
    args = parser.parse_args()
    with open(args.config) as f:
        config = yaml.load(f)
    print("\n**************************")
    for k, v in config['common'].items():
        setattr(args, k, v)
        print('\n[%s]:' % (k), v)
    print("\n**************************\n")

    try:
        os.makedirs(args.save_path)
    except OSError:
        pass

    train_dataset = ModelNet40Cls(num_points=args.num_points,
                                  root=args.data_root,
                                  transforms=None)
    train_dataloader = DataLoader(train_dataset,
                                  batch_size=args.batch_size,
                                  shuffle=True,
                                  num_workers=int(args.workers),
                                  pin_memory=True)

    test_dataset_z = ModelNet40Cls(num_points=args.num_points,
                                   root=args.data_root,
                                   transforms=None,
                                   train=False)
    test_dataloader_z = DataLoader(test_dataset_z,
                                   batch_size=args.batch_size,
                                   shuffle=False,
                                   num_workers=int(args.workers),
                                   pin_memory=True)

    test_dataset_so3 = ModelNet40Cls(num_points=args.num_points,
                                     root=args.data_root,
                                     transforms=None,
                                     train=False)
    test_dataloader_so3 = DataLoader(test_dataset_so3,
                                     batch_size=args.batch_size,
                                     shuffle=False,
                                     num_workers=int(args.workers),
                                     pin_memory=True)
    if args.model == "pointnet2_ssn":
        model = PointNet2_SSN(num_classes=args.num_classes)
        model.cuda()
    elif args.model == "rscnn_ssn":
        model = RSCNN_SSN(num_classes=args.num_classes)
        model.cuda()
        model = torch.nn.DataParallel(model)
    elif args.model == "rscnn_msn":
        model = RSCNN_MSN(num_classes=args.num_classes)
        model.cuda()
        model = torch.nn.DataParallel(model)
    else:
        print("Doesn't support this model")
        return

    optimizer = optim.Adam(model.parameters(),
                           lr=args.base_lr,
                           weight_decay=args.weight_decay)
    lr_lbmd = lambda e: max(args.lr_decay**(e // args.decay_step), args.lr_clip
                            / args.base_lr)
    bnm_lmbd = lambda e: max(
        args.bn_momentum * args.bn_decay**
        (e // args.decay_step), args.bnm_clip)
    lr_scheduler = lr_sched.LambdaLR(optimizer, lr_lbmd)
    bnm_scheduler = pt_utils.BNMomentumScheduler(model, bnm_lmbd)

    if args.checkpoint is not '':
        model.load_state_dict(torch.load(args.checkpoint))
        print('Load model successfully: %s' % args.checkpoint)

    criterion = nn.CrossEntropyLoss()
    num_batch = len(train_dataset) / args.batch_size

    # training
    train(train_dataloader, test_dataloader_z, test_dataloader_so3, model,
          criterion, optimizer, lr_scheduler, bnm_scheduler, args, num_batch)
Exemple #3
0
def main():
    #    os.system('cp models/rscnn_ssn_cls.py cls/') # bkp of train procedure
    #    os.system('cp utils/pointnet2_utils.py cls/')
    #    os.system('cp utils/pointnet2_modules.py cls/')
    #    os.system('cp utils/csrc/ellipsoid_query_gpu.cu cls/')
    #    os.system('cp utils/csrc/ellipsoid_query.c cls/')
    #    os.system('cp cfgs/config_ssn_cls.yaml cls/')
    args = parser.parse_args()
    with open(args.config) as f:
        config = yaml.load(f)
    print("\n**************************")
    for k, v in config['common'].items():
        setattr(args, k, v)
        print('\n[%s]:' % (k), v)
    print("\n**************************\n")

    try:
        os.makedirs(args.save_path)
    except OSError:
        pass

    train_transforms = transforms.Compose([d_utils.PointcloudToTensor()])
    test_transforms = transforms.Compose([d_utils.PointcloudToTensor()])

    train_dataset = ModelNet40Cls(num_points=args.num_points,
                                  root=args.data_root,
                                  transforms=train_transforms)
    train_dataloader = DataLoader(train_dataset,
                                  batch_size=args.batch_size,
                                  shuffle=True,
                                  num_workers=int(args.workers),
                                  pin_memory=True)

    test_dataset = ModelNet40Cls(num_points=args.num_points,
                                 root=args.data_root,
                                 transforms=test_transforms,
                                 train=False)
    test_dataloader = DataLoader(test_dataset,
                                 batch_size=args.batch_size,
                                 shuffle=False,
                                 num_workers=int(args.workers),
                                 pin_memory=True)

    model = RSCNN_SSN(num_classes=args.num_classes,
                      input_channels=args.input_channels,
                      relation_prior=args.relation_prior,
                      use_xyz=True)
    model.cuda()
    optimizer = optim.Adam(model.parameters(),
                           lr=args.base_lr,
                           weight_decay=args.weight_decay)

    lr_lbmd = lambda e: max(args.lr_decay**(e // args.decay_step), args.lr_clip
                            / args.base_lr)
    bnm_lmbd = lambda e: max(
        args.bn_momentum * args.bn_decay**
        (e // args.decay_step), args.bnm_clip)
    lr_scheduler = lr_sched.LambdaLR(optimizer, lr_lbmd)
    bnm_scheduler = pt_utils.BNMomentumScheduler(model, bnm_lmbd)

    if args.checkpoint is not '':
        model.load_state_dict(torch.load(args.checkpoint))
        print('Load model successfully: %s' % (args.checkpoint))

    criterion = nn.CrossEntropyLoss()
    num_batch = len(train_dataset) / args.batch_size

    # training
    train(train_dataloader, test_dataloader, model, criterion, optimizer,
          lr_scheduler, bnm_scheduler, args, num_batch)
Exemple #4
0
    lr_lbmd = lambda it: max(
        args.lr_decay**
        (int(it * args.batch_size / args.decay_step)), lr_clip / args.lr)
    bn_lbmd = lambda it: max(
        args.bn_momentum * args.bnm_decay**
        (int(it * args.batch_size / args.decay_step)), bnm_clip)

    if args.checkpoint is not None:
        start_epoch, best_loss = pt_utils.load_checkpoint(
            model, optimizer, filename=args.checkpoint.split(".")[0])

        lr_scheduler = lr_sched.LambdaLR(optimizer,
                                         lr_lambda=lr_lbmd,
                                         last_epoch=start_epoch)
        bnm_scheduler = pt_utils.BNMomentumScheduler(model,
                                                     bn_lambda=bn_lbmd,
                                                     last_epoch=start_epoch)
    else:
        lr_scheduler = lr_sched.LambdaLR(optimizer, lr_lambda=lr_lbmd)
        bnm_scheduler = pt_utils.BNMomentumScheduler(model, bn_lambda=bn_lbmd)

        best_loss = 1e10
        start_epoch = 1

    model_fn = model_fn_decorator(nn.CrossEntropyLoss())

    viz = pt_utils.VisdomViz(port=args.visdom_port)
    viz.text(str(vars(args)))

    trainer = pt_utils.Trainer(model,
                               model_fn,
                              shuffle=True)

    model = Pointnet(num_classes=13, use_xyz=False)
    model.cuda()
    optimizer = optim.Adam(model.parameters(),
                           lr=args.lr,
                           weight_decay=args.weight_decay)

    lr_lbmd = lambda e: max(args.lr_decay**
                            (e // args.decay_step), lr_clip / args.lr)
    bnm_lmbd = lambda e: max(
        args.bn_momentum * args.bn_decay**(e // args.decay_step), bnm_clip)

    if args.checkpoint is None:
        lr_scheduler = lr_sched.LambdaLR(optimizer, lr_lbmd)
        bnm_scheduler = pt_utils.BNMomentumScheduler(model, bnm_lmbd)
        start_epoch = 1
        best_prec = 0
        best_loss = 1e10
    else:
        start_epoch, best_loss = pt_utils.load_checkpoint(
            model, optimizer, filename=args.checkpoint.split(".")[0])

        lr_scheduler = lr_sched.LambdaLR(optimizer,
                                         lr_lbmd,
                                         last_epoch=start_epoch)
        bnm_scheduler = pt_utils.BNMomentumScheduler(model,
                                                     bnm_lmbd,
                                                     last_epoch=start_epoch)

    model_fn = model_fn_decorator(nn.CrossEntropyLoss())
Exemple #6
0
def main():
    global svm_best_acc40
    svm_best_acc40 = 0
    args = parser.parse_args()
    with open(args.config) as f:
        config = yaml.load(f)
    print("\n**************************")
    for k, v in config['common'].items():
        setattr(args, k, v)
        print('\n[%s]:' % (k), v)
    print("\n**************************\n")

    os.makedirs('./ckpts/', exist_ok=True)

    # dataset
    train_transforms = transforms.Compose([d_utils.PointcloudToTensor()])
    test_transforms = transforms.Compose([d_utils.PointcloudToTensor()])

    ss_dataset = ModelNetCls(transforms=train_transforms,
                             self_supervision=True,
                             use_normal=True,
                             dataset_rate=1)
    ss_dataloader = DataLoader(ss_dataset,
                               batch_size=args.batch_size,
                               shuffle=True,
                               num_workers=int(args.workers),
                               pin_memory=True,
                               worker_init_fn=worker_init_fn)

    if args.dataset == 'modelnet':
        train_dataset = ModelNetCls(transforms=train_transforms,
                                    self_supervision=False,
                                    train=True)
        train_dataloader = DataLoader(train_dataset,
                                      batch_size=args.batch_size,
                                      shuffle=True,
                                      num_workers=int(args.workers),
                                      pin_memory=True,
                                      worker_init_fn=worker_init_fn)

        test_dataset = ModelNetCls(transforms=test_transforms,
                                   self_supervision=False,
                                   train=False)
        test_dataloader = DataLoader(test_dataset,
                                     batch_size=args.batch_size,
                                     shuffle=False,
                                     num_workers=int(args.workers),
                                     pin_memory=True)
    elif args.dataset == 'scannet':
        train_dataset = ScanNetCls(transforms=train_transforms,
                                   self_supervision=False,
                                   train=True)
        train_dataloader = DataLoader(train_dataset,
                                      batch_size=args.batch_size,
                                      shuffle=True,
                                      num_workers=int(args.workers),
                                      pin_memory=True,
                                      worker_init_fn=worker_init_fn)

        test_dataset = ScanNetCls(transforms=test_transforms,
                                  self_supervision=False,
                                  train=False)
        test_dataloader = DataLoader(test_dataset,
                                     batch_size=args.batch_size,
                                     shuffle=False,
                                     num_workers=int(args.workers),
                                     pin_memory=True)
    elif args.dataset == 'scanobjectnn':
        train_dataset = ScanObjectNNCls(transforms=train_transforms,
                                        self_supervision=False,
                                        train=True)
        train_dataloader = DataLoader(train_dataset,
                                      batch_size=args.batch_size,
                                      shuffle=True,
                                      num_workers=int(args.workers),
                                      pin_memory=True,
                                      worker_init_fn=worker_init_fn)
        test_dataset = ScanObjectNNCls(transforms=test_transforms,
                                       self_supervision=False,
                                       train=False)
        test_dataloader = DataLoader(test_dataset,
                                     batch_size=args.batch_size,
                                     shuffle=False,
                                     num_workers=int(args.workers),
                                     pin_memory=True)
    else:
        raise NotImplementedError

    # models
    n_rkhs = 512

    if args.arch == 'pointnet2':
        encoder = PointNet2(n_rkhs=n_rkhs,
                            input_channels=args.input_channels,
                            use_xyz=True,
                            point_wise_out=True,
                            multi=args.multiplier)
        print('Using PointNet++ backbone')
    elif args.arch == 'rscnn':
        encoder = RSCNN_SSN(n_rkhs=n_rkhs,
                            input_channels=args.input_channels,
                            relation_prior=args.relation_prior,
                            use_xyz=True,
                            point_wise_out=True,
                            multi=args.multiplier)
        print('Using RSCNN backbone')
    else:
        raise NotImplementedError

    encoder = nn.DataParallel(encoder).cuda()
    decoer = FoldingNet(in_channel=n_rkhs * 3)
    decoer = nn.DataParallel(decoer).cuda()

    # optimizer
    optimizer = optim.Adam(list(encoder.parameters()) +
                           list(decoer.parameters()),
                           lr=args.base_lr,
                           weight_decay=args.weight_decay)

    # resume
    begin_epoch = -1
    checkpoint_name = './ckpts/' + args.name + '.pth'
    if os.path.isfile(checkpoint_name):
        checkpoint = torch.load(checkpoint_name)
        encoder.load_state_dict(checkpoint['encoder_state_dict'])
        decoer.load_state_dict(checkpoint['decoder_state_dict'])
        optimizer.load_state_dict(checkpoint['optimizer_state_dict'])
        svm_best_acc40 = checkpoint['svm_best_acc40']
        begin_epoch = checkpoint['epoch'] - 1
        print("-> loaded checkpoint %s (epoch: %d)" %
              (checkpoint_name, begin_epoch))

    lr_lbmd = lambda e: max(args.lr_decay**(e // args.decay_step), args.lr_clip
                            / args.base_lr)
    bnm_lmbd = lambda e: max(
        args.bn_momentum * args.bn_decay**
        (e // args.decay_step), args.bnm_clip)
    lr_scheduler = lr_sched.LambdaLR(optimizer,
                                     lr_lbmd,
                                     last_epoch=begin_epoch)
    bnm_scheduler = pt_utils.BNMomentumScheduler(encoder,
                                                 bnm_lmbd,
                                                 last_epoch=begin_epoch)

    num_batch = len(ss_dataset) / args.batch_size

    args.val_freq_epoch = 1.0

    # training & evaluation
    train(ss_dataloader, train_dataloader, test_dataloader, encoder, decoer,
          optimizer, lr_scheduler, bnm_scheduler, args, num_batch, begin_epoch)
def main():
    args = parser.parse_args()
    with open(args.config) as f:
        config = yaml.load(f)
    print("\n**************************")
    for k, v in config['common'].items():
        setattr(args, k, v)
        print('\n[%s]:'%(k), v)
    print("\n**************************\n")
    
    try:
        os.makedirs(args.save_path)
    except OSError:
        pass
    
    train_transforms = transforms.Compose([
        d_utils.PointcloudToTensor(),
        d_utils.PointcloudScaleAndTranslate(),
        d_utils.PointcloudRandomInputDropout()
    ])
    test_transforms = transforms.Compose([
        d_utils.PointcloudToTensor(),
        #d_utils.PointcloudScaleAndTranslate()
    ])
    
    train_dataset = ModelNet40Cls(num_points = args.num_points, root = args.data_root, transforms=train_transforms)
    train_dataloader = DataLoader(
        train_dataset, 
        batch_size=args.batch_size,
        shuffle=True, 
        num_workers=int(args.workers)
    )

    test_dataset = ModelNet40Cls(num_points = args.num_points, root = args.data_root, transforms=test_transforms, train=False)
    test_dataloader = DataLoader(
        test_dataset, 
        batch_size=args.batch_size,
        shuffle=False, 
        num_workers=int(args.workers)
    )
    
    model = RSCNN_SSN(num_classes = args.num_classes, input_channels = args.input_channels, relation_prior = args.relation_prior, use_xyz = True)
    # for multi GPU
    device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
    if torch.cuda.is_available() and torch.cuda.device_count()>=2:
        model = nn.DataParallel(model, device_ids=[0, 1])
        model.to(device)
    elif  torch.cuda.is_available() and torch.cuda.device_count()==1:
        model.cuda()

    optimizer = optim.Adam(
        model.parameters(), lr=args.base_lr, weight_decay=args.weight_decay)

    lr_lbmd = lambda e: max(args.lr_decay**(e // args.decay_step), args.lr_clip / args.base_lr)
    bnm_lmbd = lambda e: max(args.bn_momentum * args.bn_decay**(e // args.decay_step), args.bnm_clip)
    lr_scheduler = lr_sched.LambdaLR(optimizer, lr_lbmd)
    bnm_scheduler = pt_utils.BNMomentumScheduler(model, bnm_lmbd)
    
    if args.checkpoint is not '':
        model.load_state_dict(torch.load(args.checkpoint))
        print('Load model successfully: %s' % (args.checkpoint))

    criterion = nn.CrossEntropyLoss()
    num_batch = len(train_dataset)/args.batch_size
    
    # training
    train(train_dataloader, test_dataloader, model, criterion, optimizer, lr_scheduler, bnm_scheduler, args, num_batch)
Exemple #8
0
def main():
    global logger

    args = parser.parse_args()
    with open(args.config) as f:
        config = yaml.safe_load(f)
    for k, v in config['common'].items():
        setattr(args, k, v)

    output_dir = args.save_path
    if output_dir:
        import time
        msg = 'init_train'
        output_dir = os.path.join(
            output_dir, "train_{}_{}".format(time.strftime("%m_%d_%H_%M_%S"),
                                             msg))
        os.makedirs(output_dir)

    logger = get_logger("RS-CNN", output_dir, prefix="train")
    logger.info("Running with config:\n{}".format(args))

    train_transforms = transforms.Compose([d_utils.PointcloudToTensor()])
    test_transforms = transforms.Compose([d_utils.PointcloudToTensor()])

    train_dataset = ModelNet40Cls(num_points=args.num_points,
                                  root=args.data_root,
                                  transforms=train_transforms)
    train_dataloader = DataLoader(train_dataset,
                                  batch_size=args.batch_size,
                                  shuffle=True,
                                  num_workers=int(args.workers),
                                  pin_memory=True)

    test_dataset = ModelNet40Cls(num_points=args.num_points,
                                 root=args.data_root,
                                 transforms=test_transforms,
                                 train=False)
    test_dataloader = DataLoader(test_dataset,
                                 batch_size=args.batch_size,
                                 shuffle=False,
                                 num_workers=int(args.workers),
                                 pin_memory=True)

    model = RSCNN_SSN(num_classes=args.num_classes,
                      input_channels=args.input_channels,
                      relation_prior=args.relation_prior,
                      use_xyz=True)
    model.cuda()
    optimizer = optim.Adam(model.parameters(),
                           lr=args.base_lr,
                           weight_decay=args.weight_decay)

    lr_lbmd = lambda e: max(args.lr_decay**(e // args.decay_step), args.lr_clip
                            / args.base_lr)
    bnm_lmbd = lambda e: max(
        args.bn_momentum * args.bn_decay**
        (e // args.decay_step), args.bnm_clip)
    lr_scheduler = lr_sched.LambdaLR(optimizer, lr_lbmd)
    bnm_scheduler = pt_utils.BNMomentumScheduler(model, bnm_lmbd)

    if args.checkpoint is not '':
        model.load_state_dict(torch.load(args.checkpoint))
        logger.info('Load model successfully: %s' % (args.checkpoint))

    criterion = nn.CrossEntropyLoss()
    num_batch = len(train_dataset) / args.batch_size

    # training
    train(train_dataloader, test_dataloader, model, criterion, optimizer,
          lr_scheduler, bnm_scheduler, args, num_batch)