示例#1
0
def run(config):
    model = get_model(config).cuda()
    criterion = get_loss(config)
    optimizer = get_optimizer(config, model.parameters())

    checkpoint = utils.checkpoint.get_initial_checkpoint(config)
    if checkpoint is not None:
        last_epoch, score = utils.checkpoint.load_checkpoint(config, model, checkpoint)
    else:
        print('[*] no checkpoint found')
        last_epoch, score = -1, -1

    print('last epoch:{} score:{:.4f}'.format(last_epoch, score))

    optimizer.param_groups[0]['initial_lr'] = config.OPTIMIZER.LR
    scheduler = get_scheduler(config, optimizer, last_epoch)
    if last_epoch != -1:
        scheduler.step()

    writer = SummaryWriter(os.path.join(config.TRAIN_DIR, 'logs'))

    train_loader = get_dataloader(config, 'train', transform=transforms.Compose([Albu(),
                                                                                 Normalize(),
                                                                                 ToTensor()]))
    test_loader = get_dataloader(config, 'val', transform=transforms.Compose([Normalize(),
                                                                               ToTensor()]))

    train(config, model, train_loader, test_loader, criterion, optimizer, scheduler, writer, last_epoch+1, score)
示例#2
0
def run(config):

    model = get_model(config).to(device)
    criterion = get_loss(config.LOSS.NAME)
    optimizer = get_optimizer(config, model.parameters())

    checkpoint = utils.checkpoint.get_initial_checkpoint(config)
    if checkpoint is not None:
        last_epoch, score, loss = utils.checkpoint.load_checkpoint(
            config, model, checkpoint)
    else:
        print('[*] no checkpoint found')
        last_epoch, score, loss = -1, -1, float('inf')

    print('last epoch:{} score:{:.4f} loss:{:.4f}'.format(
        last_epoch, score, loss))

    optimizer.param_groups[0]['initial_lr'] = config.OPTIMIZER.LR
    scheduler = get_scheduler(config, optimizer, last_epoch)

    if config.SCHEDULER.NAME == 'multi_step':
        milestones = scheduler.state_dict()['milestones']
        step_count = len([i for i in milestones if i < last_epoch])
        optimizer.param_groups[0]['lr'] *= scheduler.state_dict(
        )['gamma']**step_count

    if last_epoch != -1:
        scheduler.step()

    writer = SummaryWriter(os.path.join(config.TRAIN_DIR, 'logs'))

    train_loader = get_dataloader(config,
                                  'train',
                                  transform=transforms.Compose([
                                      Albu(),
                                      CV2_Resize(config.DATA.IMG_W,
                                                 config.DATA.IMG_H),
                                      Normalize(),
                                      ToTensor()
                                  ]))
    val_loader = get_dataloader(config,
                                'val',
                                transform=transforms.Compose([
                                    CV2_Resize(config.DATA.IMG_W,
                                               config.DATA.IMG_H),
                                    Normalize(),
                                    ToTensor()
                                ]))

    train(config, model, train_loader, val_loader, criterion, optimizer,
          scheduler, writer, last_epoch + 1, score, loss)
示例#3
0
def process_split(split='test', descriptor='fcgf'):
    if split == 'test':
        dset = ThreeDMatchTest(root='/data/3DMatch', 
                                descriptor=descriptor,
                                num_node='all', 
                                augment_axis=0,
                                augment_rotation=0.0,
                                augment_translation=0.0,
                                )
    else:
        dset = ThreeDMatchTrainVal(root='/data/3DMatch', 
                                descriptor=descriptor,
                                split=split,  
                                num_node='all', 
                                augment_axis=0,
                                augment_rotation=0.0,
                                augment_translation=0.0,
                                )
    dloader = get_dataloader(dset, batch_size=1, num_workers=16, shuffle=False)
    dloader_iter = dloader.__iter__()
    inlier_ratio_list = []
    for i in tqdm(range(len(dset))):
        corr, src_keypts, tgt_keypts, gt_trans, gt_labels = dloader_iter.next()
        inlier_ratio = gt_labels.mean()
        inlier_ratio_list.append(float(inlier_ratio)*100)
    return np.array(inlier_ratio_list)
示例#4
0
def eval_KITTI(method, args):
    dset = KITTIDataset(
        root=kitti_dir,
        split='test',
        descriptor=args.descriptor,
        in_dim=6,
        inlier_threshold=args.inlier_threshold,
        num_node=15000,
        use_mutual=args.use_mutual,
        augment_axis=0,
        augment_rotation=0.00,
        augment_translation=0.0,
    )
    dloader = get_dataloader(dset, batch_size=1, num_workers=8, shuffle=False)
    stats = eval_KITTI_scene(method, dloader, args)

    # pair level average
    allpair_stats = stats
    allpair_average = allpair_stats.mean(0)
    correct_pair_average = allpair_stats[allpair_stats[:, 0] == 1].mean(0)
    logging.info(f"*" * 40)
    logging.info(
        f"All {allpair_stats.shape[0]} pairs, Mean Reg Recall={allpair_average[0] * 100:.2f}%, Mean Re={correct_pair_average[1]:.2f}, Mean Te={correct_pair_average[2]:.2f}"
    )
    logging.info(
        f"\tInput:  Mean Inlier Num={allpair_average[3]:.2f}(ratio={allpair_average[4] * 100:.2f}%)"
    )
    logging.info(
        f"\tOutput: Mean Inlier Num={allpair_average[5]:.2f}(precision={allpair_average[6] * 100:.2f}%, recall={allpair_average[7] * 100:.2f}%, f1={allpair_average[8] * 100:.2f}%)"
    )
    logging.info(
        f"\tMean model time: {allpair_average[9]:.2f}s, Mean data time: {allpair_average[10]:.2f}s"
    )

    return allpair_stats
示例#5
0
def eval_KITTI(model, config, use_icp):
    dset = KITTIDataset(root='/data/KITTI',
                    split='test',
                    descriptor=config.descriptor,
                    in_dim=config.in_dim,
                    inlier_threshold=config.inlier_threshold,
                    num_node=12000,
                    use_mutual=config.use_mutual,
                    augment_axis=0, 
                    augment_rotation=0.00, 
                    augment_translation=0.0,
                    )
    dloader = get_dataloader(dset, batch_size=1, num_workers=16, shuffle=False)
    stats = eval_KITTI_per_pair(model, dloader, config, use_icp)
    logging.info(f"Max memory allicated: {torch.cuda.max_memory_allocated() / 1024 ** 3:.2f}GB")

    # pair level average 
    allpair_stats = stats
    allpair_average = allpair_stats.mean(0)
    correct_pair_average = allpair_stats[allpair_stats[:, 0] == 1].mean(0)
    logging.info(f"*"*40)
    logging.info(f"All {allpair_stats.shape[0]} pairs, Mean Success Rate={allpair_average[0]*100:.2f}%, Mean Re={correct_pair_average[1]:.2f}, Mean Te={correct_pair_average[2]:.2f}")
    logging.info(f"\tInput:  Mean Inlier Num={allpair_average[3]:.2f}(ratio={allpair_average[4]*100:.2f}%)")
    logging.info(f"\tOutput: Mean Inlier Num={allpair_average[5]:.2f}(precision={allpair_average[6]*100:.2f}%, recall={allpair_average[7]*100:.2f}%, f1={allpair_average[8]*100:.2f}%)")
    logging.info(f"\tMean model time: {allpair_average[9]:.2f}s, Mean data time: {allpair_average[10]:.2f}s")

    return allpair_stats
示例#6
0
def run(config):
    model = get_model(config).to(device)
    # model_params = [{'params': model.encoder.parameters(), 'lr': config.OPTIMIZER.ENCODER_LR},
    #                 {'params': model.decoder.parameters(), 'lr': config.OPTIMIZER.DECODER_LR}]

    optimizer = get_optimizer(config, model.parameters())
    # optimizer = get_optimizer(config, model_params)

    checkpoint = utils.checkpoint.get_initial_checkpoint(config)
    if checkpoint is not None:
        last_epoch, score, loss = utils.checkpoint.load_checkpoint(config, model, checkpoint)
    else:
        print('[*] no checkpoint found')
        last_epoch, score, loss = -1, -1, float('inf')
    print('last epoch:{} score:{:.4f} loss:{:.4f}'.format(last_epoch, score, loss))

    optimizer.param_groups[0]['initial_lr'] = config.OPTIMIZER.LR
    # optimizer.param_groups[0]['initial_lr'] = config.OPTIMIZER.ENCODER_LR
    # optimizer.param_groups[1]['initial_lr'] = config.OPTIMIZER.DECODER_LR

    scheduler = get_scheduler(config, optimizer, last_epoch)

    if config.SCHEDULER.NAME == 'multi_step':
        milestones = scheduler.state_dict()['milestones']
        step_count = len([i for i in milestones if i < last_epoch])
        optimizer.param_groups[0]['lr'] *= scheduler.state_dict()['gamma'] ** step_count
        # optimizer.param_groups[0]['lr'] *= scheduler.state_dict()['gamma'] ** step_count
        # optimizer.param_groups[1]['lr'] *= scheduler.state_dict()['gamma'] ** step_count

    if last_epoch != -1:
        scheduler.step()

    log_train = Logger()
    log_val = Logger()
    log_train.open(os.path.join(config.TRAIN_DIR, 'log_train.txt'), mode='a')
    log_val.open(os.path.join(config.TRAIN_DIR, 'log_val.txt'), mode='a')

    train_loader = get_dataloader(config, 'train', transform=Albu(config.ALBU))
    val_loader = get_dataloader(config, 'val')

    train(config, model, train_loader, val_loader, optimizer, scheduler, log_train, log_val, last_epoch+1, score, loss)
示例#7
0
def run(config):
    model = get_model(config).to(device)
    optimizer = get_optimizer(config, model.parameters())

    checkpoint = utils.checkpoint.get_initial_checkpoint(config)
    if checkpoint is not None:
        last_epoch, score, loss = utils.checkpoint.load_checkpoint(
            config, model, checkpoint)
    else:
        print('[*] no checkpoint found')
        last_epoch, score, loss = -1, -1, float('inf')
    print('last epoch:{} score:{:.4f} loss:{:.4f}'.format(
        last_epoch, score, loss))

    optimizer.param_groups[0]['initial_lr'] = config.OPTIMIZER.LR

    scheduler = get_scheduler(config, optimizer, last_epoch)

    if config.SCHEDULER.NAME == 'multi_step':
        milestones = scheduler.state_dict()['milestones']
        step_count = len([i for i in milestones if i < last_epoch])
        optimizer.param_groups[0]['lr'] *= scheduler.state_dict(
        )['gamma']**step_count

    if last_epoch != -1:
        scheduler.step()

    # writer = SummaryWriter(os.path.join(config.TRAIN_DIR, 'logs'))
    log_train = Logger()
    log_val = Logger()
    log_train.open(os.path.join(config.TRAIN_DIR, 'log_train.txt'), mode='a')
    log_val.open(os.path.join(config.TRAIN_DIR, 'log_val.txt'), mode='a')

    augmentation = Albu_Seg() if config.TASK == 'seg' else Albu_Cls()
    train_loader = get_dataloader(config, 'train', transform=augmentation)
    val_loader = get_dataloader(config, 'val')

    train(config, model, train_loader, val_loader, optimizer, scheduler,
          log_train, log_val, last_epoch + 1, score, loss)
示例#8
0
def run(config, num_checkpoint, epoch_end, output_filename):
    dataloader = get_dataloader(config, split='val', transform=None)

    model = get_model(config).cuda()
    checkpoints = get_checkpoints(config, num_checkpoint, epoch_end)

    utils.checkpoint.load_checkpoint(config, model, checkpoints[0])
    for i, checkpoint in enumerate(checkpoints[1:]):
        model2 = get_model(config).cuda()
        last_epoch, _, _ = utils.checkpoint.load_checkpoint(config, model2, checkpoint)
        swa.moving_average(model, model2, 1. / (i + 2))

    with torch.no_grad():
        swa.bn_update(dataloader, model)

    # output_name = '{}.{}.{:03d}'.format(output_filename, num_checkpoint, last_epoch)
    # print('save {}'.format(output_name))
    utils.checkpoint.save_checkpoint(config, model, None, None, epoch_end,
                                     weights_dict={'state_dict': model.state_dict()},
                                     name=output_filename)
示例#9
0
def eval_3DMatch(model, config, args):
    dset = ThreeDLOMatchTest(
        root='/data/3DMatch',
        descriptor='fcgf',
        in_dim=config.in_dim,
        inlier_threshold=config.inlier_threshold,
        num_node=args.num_points,
        use_mutual=config.use_mutual,
        augment_axis=0,
        augment_rotation=0.00,
        augment_translation=0.0,
    )
    dloader = get_dataloader(dset, batch_size=1, num_workers=16, shuffle=False)
    allpair_stats, allpair_poses = eval_3DMatch_scene(model, 0, dloader,
                                                      config, args)

    # benchmarking using the registration recall defined in 3DMatch to compare with Predator
    # np.save('predator.npy', allpair_poses)
    benchmark_predator(
        allpair_poses,
        gt_folder='/data/OverlapPredator/configs/benchmarks/3DLoMatch')

    # benchmarking using the registration recall defined in DGR
    allpair_average = allpair_stats.mean(0)
    correct_pair_average = allpair_stats[allpair_stats[:, 0] == 1].mean(0)
    logging.info(f"*" * 40)
    logging.info(
        f"All {allpair_stats.shape[0]} pairs, Mean Success Rate={allpair_average[0] * 100:.2f}%, Mean Re={correct_pair_average[1]:.2f}, Mean Te={correct_pair_average[2]:.2f}"
    )
    logging.info(
        f"\tInput:  Mean Inlier Num={allpair_average[3]:.2f}(ratio={allpair_average[4] * 100:.2f}%)"
    )
    logging.info(
        f"\tOutput: Mean Inlier Num={allpair_average[5]:.2f}(precision={allpair_average[6] * 100:.2f}%, recall={allpair_average[7] * 100:.2f}%, f1={allpair_average[8] * 100:.2f}%)"
    )
    logging.info(
        f"\tMean model time: {allpair_average[9]:.2f}s, Mean data time: {allpair_average[10]:.2f}s"
    )

    return allpair_stats
示例#10
0
    if args.generate_features:
        import importlib
        cfg = importlib.import_module("configs.config")

        dset = KITTIMapDataset("test",
                               cfg,
                               config_d3feat=config,
                               root=config.root)
        # dset = ThreeDMatchTestset(root=config.root,
        #                    downsample=config.downsample,
        #                    config=config,
        #                    last_scene=False,
        #                )
        dloader, _ = get_dataloader(
            dataset=dset,
            batch_size=1,
            shuffle=False,
            num_workers=config.num_workers,
        )
        generate_features(model.cuda(), dloader, config, args.chosen_snapshot)

    def test_kitti(model, dataset, config):
        # self.sess.run(dataset.test_init_op)
        import sys
        use_random_points = False
        icp_save_path = "d3feat_output"
        if use_random_points:
            num_keypts = 5000
            # icp_save_path = f'geometric_registration_kitti/D3Feat_{self.experiment_str}-rand{num_keypts}'
        else:
            num_keypts = 250
            # icp_save_path = f'geometric_registration_kitti/D3Feat_{self.experiment_str}-pred{num_keypts}'
def main(args):
    args.best_acc = 0
    best_acc5 = 0

    # Data
    print('==> Preparing data..')
    train_loader, test_loader, ndata = get_dataloader(args, add_erasing=args.erasing, aug_plus=args.aug_plus)

    logger.info(f"length of training dataset: {ndata}")

    # Model
    model, model_ema = build_model(args)
    contrast = MemoryMoCo(128, args.nce_k, args.nce_t, thresh=0).cuda()
    criterion = NCESoftmaxLoss().cuda()
    optimizer = torch.optim.SGD(model.parameters(),
                                lr=args.batch_size * dist.get_world_size() / 128 * args.base_learning_rate,
                                momentum=args.momentum,
                                weight_decay=args.weight_decay)
    scheduler = get_scheduler(optimizer, len(train_loader), args)

    if args.amp_opt_level != "O0":
        if amp is None:
            logger.warning(f"apex is not installed but amp_opt_level is set to {args.amp_opt_level}, ignoring.\n"
                           "you should install apex from https://github.com/NVIDIA/apex#quick-start first")
            args.amp_opt_level = "O0"
        else:
            model, optimizer = amp.initialize(model, optimizer, opt_level=args.amp_opt_level)
            model_ema = amp.initialize(model_ema, opt_level=args.amp_opt_level)

    model = DistributedDataParallel(model, device_ids=[args.local_rank], broadcast_buffers=False)

    # optionally resume from a checkpoint
    if args.resume:
        assert os.path.isfile(args.resume)
        load_checkpoint(args, model, model_ema, contrast, optimizer, scheduler)

    # tensorboard
    if dist.get_rank() == 0:
        summary_writer = SummaryWriter(log_dir=args.save_dir)
    else:
        summary_writer = None

    # routine
    for epoch in range(args.start_epoch, args.epochs + 1):
        if args.lr_scheduler == 'cosine':
            train_loader.sampler.set_epoch(epoch)

        tic = time.time()
        loss, prob = train_moco(epoch, train_loader, model, model_ema, contrast, criterion, optimizer, scheduler, args)

        logger.info('epoch {}, total time {:.2f}'.format(epoch, time.time() - tic))

        if summary_writer is not None:
            # tensorboard logger
            summary_writer.add_scalar('ins_loss', loss, epoch)
            summary_writer.add_scalar('ins_prob', prob, epoch)
            summary_writer.add_scalar('learning_rate', optimizer.param_groups[0]['lr'], epoch)
        if args.dataset == 'stl10-full':
            acc, acc5 = kNN(epoch, model, contrast, labeledTrainloader, test_loader, 200, args.nce_t, True)
        else:
            acc, acc5 = kNN(epoch, model, contrast, train_loader, test_loader, 200, args.nce_t, True)
        if acc >= args.best_acc: 
            args.best_acc = acc
            best_acc5 = acc5
        logger.info('KNN top-1 precion: {:.4f} {:.4f}, best is: {:.4f} {:.4f}'.format(acc*100., \
            acc5*100., args.best_acc*100., best_acc5*100))
        logger.info(str(args))

        if dist.get_rank() == 0:
            # save model
            save_checkpoint(args, epoch, model, model_ema, contrast, optimizer, scheduler, args.best_acc)
    if args.dataset == 'stl10-full':
        acc1, acc5 = kNN(epoch, model, contrast, labeledTrainloader, test_loader, 200, args.nce_t, True)
    else:
        acc1, acc5 = kNN(epoch, model, contrast, train_loader, test_loader, 200, args.nce_t, True)

    logger.info('KNN top-1 and top-5 precion with recomputed memory bank: {:.4f} {:.4f}'.format(acc1*100., acc5*100))
    logger.info('Best KNN top-1 and top-5 precion: {:.4f} {:.4f}'.format(args.best_acc*100., best_acc5*100))
    logger.info(str(args))
示例#12
0
        config.optimizer,
        gamma=config.scheduler_gamma,
    )
    
    # create dataset and dataloader
    info_train = load_obj(config.train_info)
    info_val = load_obj(config.val_info)
    info_benchmark = load_obj(f'configs/{config.test_info}.pkl')

    train_set = ThreeDMatchDownsampled(info_train,config,data_augmentation=True)
    val_set = ThreeDMatchDownsampled(info_val,config,data_augmentation=False)
    benchmark_set = ThreeDMatchDownsampled(info_benchmark,config, data_augmentation=False)

    config.train_loader, neighborhood_limits = get_dataloader(dataset=train_set,
                                        batch_size=config.batch_size,
                                        shuffle=True,
                                        num_workers=config.num_workers,
                                        )
    config.val_loader, _ = get_dataloader(dataset=val_set,
                                        batch_size=config.batch_size,
                                        shuffle=False,
                                        num_workers=1,
                                        neighborhood_limits=neighborhood_limits
                                        )
    config.test_loader, _ = get_dataloader(dataset=benchmark_set,
                                        batch_size=config.batch_size,
                                        shuffle=False,
                                        num_workers=1,
                                        neighborhood_limits=neighborhood_limits)
    
    # create evaluation metrics
示例#13
0
        config.architecture.append('resnetb')
    for i in range(config.num_layers - 2):
        config.architecture.append('nearest_upsample')
        config.architecture.append('unary')
    config.architecture.append('nearest_upsample')
    config.architecture.append('last_unary')
    config.model = KPFCNN(config).to(config.device)

    # create dataset and dataloader
    info_train = load_obj(config.train_info)
    train_set = IndoorDataset(info_train, config, data_augmentation=True)
    demo_set = ThreeDMatchDemo(config, config.src_pcd, config.tgt_pcd)

    _, neighborhood_limits = get_dataloader(
        dataset=train_set,
        batch_size=config.batch_size,
        shuffle=True,
        num_workers=config.num_workers,
    )
    demo_loader, _ = get_dataloader(dataset=demo_set,
                                    batch_size=config.batch_size,
                                    shuffle=False,
                                    num_workers=1,
                                    neighborhood_limits=neighborhood_limits)

    # load pretrained weights
    assert config.pretrain != None
    state = torch.load(config.pretrain)
    config.model.load_state_dict(state['state_dict'])

    # do pose estimation
    main(config, demo_loader)
示例#14
0
def eval_3DMatch(method, args):
    """
    Collect the evaluation results on each scene of 3DMatch testset, write the result to a .log file.
    """
    scene_list = [
        '7-scenes-redkitchen', 'sun3d-home_at-home_at_scan1_2013_jan_1',
        'sun3d-home_md-home_md_scan9_2012_sep_30', 'sun3d-hotel_uc-scan3',
        'sun3d-hotel_umd-maryland_hotel1', 'sun3d-hotel_umd-maryland_hotel3',
        'sun3d-mit_76_studyroom-76-1studyroom2',
        'sun3d-mit_lab_hj-lab_hj_tea_nov_2_2012_scan1_erika'
    ]
    all_stats = {}
    for scene_ind, scene in enumerate(scene_list):
        dset = ThreeDMatchTest(
            root='/data/3DMatch/',
            descriptor=args.descriptor,
            in_dim=6,
            inlier_threshold=args.inlier_threshold,
            num_node='all',
            use_mutual=args.use_mutual,
            augment_axis=0,
            augment_rotation=0.00,
            augment_translation=0.0,
            select_scene=scene,
        )
        dloader = get_dataloader(dset,
                                 batch_size=1,
                                 num_workers=8,
                                 shuffle=False)
        scene_stats = eval_3DMatch_scene(method, scene, scene_ind, dloader,
                                         args)
        all_stats[scene] = scene_stats

    scene_vals = np.zeros([len(scene_list), 12])
    scene_ind = 0
    for scene, stats in all_stats.items():
        correct_pair = np.where(stats[:, 0] == 1)
        scene_vals[scene_ind] = stats.mean(0)
        # for Re and Te, we only count the matched pairs.
        scene_vals[scene_ind, 1] = stats[correct_pair].mean(0)[1]
        scene_vals[scene_ind, 2] = stats[correct_pair].mean(0)[2]
        logging.info(f"Scene {scene_ind}th:"
                     f" Reg Recall={scene_vals[scene_ind, 0]*100:.2f}% "
                     f" Mean RE={scene_vals[scene_ind, 1]:.2f} "
                     f" Mean TE={scene_vals[scene_ind, 2]:.2f} "
                     f" Mean Precision={scene_vals[scene_ind, 6]*100:.2f}% "
                     f" Mean Recall={scene_vals[scene_ind, 7]*100:.2f}% "
                     f" Mean F1={scene_vals[scene_ind, 8]*100:.2f}%")
        scene_ind += 1

    # scene level average
    average = scene_vals.mean(0)
    logging.info(
        f"All {len(scene_list)} scenes, Mean Reg Recall={average[0]*100:.2f}%, Mean Re={average[1]:.2f}, Mean Te={average[2]:.2f}"
    )
    logging.info(
        f"\tInput:  Mean Inlier Num={average[3]:.2f}(ratio={average[4]*100:.2f}%)"
    )
    logging.info(
        f"\tOutput: Mean Inlier Num={average[5]:.2f}(precision={average[6]*100:.2f}%, recall={average[7]*100:.2f}%, f1={average[8]*100:.2f}%)"
    )
    logging.info(
        f"\tMean model time: {average[9]:.4f}s, Mean data time: {average[10]:.4f}s"
    )

    # pair level average
    stats_list = [stats for _, stats in all_stats.items()]
    allpair_stats = np.concatenate(stats_list, axis=0)
    allpair_average = allpair_stats.mean(0)
    correct_pair_average = allpair_stats[allpair_stats[:, 0] == 1].mean(0)
    logging.info(f"*" * 40)
    logging.info(
        f"All {allpair_stats.shape[0]} pairs, Mean Reg Recall={allpair_average[0]*100:.2f}%, Mean Re={correct_pair_average[1]:.2f}, Mean Te={correct_pair_average[2]:.2f}"
    )
    logging.info(
        f"\tInput:  Mean Inlier Num={allpair_average[3]:.2f}(ratio={allpair_average[4]*100:.2f}%)"
    )
    logging.info(
        f"\tOutput: Mean Inlier Num={allpair_average[5]:.2f}(precision={allpair_average[6]*100:.2f}%, recall={allpair_average[7]*100:.2f}%, f1={allpair_average[8]*100:.2f}%)"
    )
    logging.info(
        f"\tMean model time: {allpair_average[9]:.4f}s, Mean data time: {allpair_average[10]:.4f}s"
    )

    all_stats_npy = np.concatenate([v for k, v in all_stats.items()], axis=0)
    return all_stats_npy

device = 'cuda' if torch.cuda.is_available() else 'cpu'

torch.cuda.set_device(args.local_rank)
torch.distributed.init_process_group(backend='nccl', init_method='env://')

best_acc1 = 0  # best test accuracy
start_epoch = 0  # start from epoch 0 or last checkpoint epoch

if not os.path.exists(args.save_dir):
    os.makedirs(args.save_dir)

# Data
print('==> Preparing data..')
trainloader, testloader, ndata = get_dataloader(args)

print('==> Building model..')
net = models.__dict__['ResNet18'](low_dim=args.low_dim, pool_len=args.pool_len)

# define leminiscate
if args.nce_k > 0:
    lemniscate = NCEAverage(args.low_dim, ndata, args.nce_k, args.nce_t,
                            args.nce_m)
else:
    lemniscate = LinearAverage(args.low_dim, ndata, args.nce_t, args.nce_m)

net.to(device)
optimizer = optim.SGD(net.parameters(),
                      lr=args.lr,
                      momentum=0.9,
示例#16
0
    def __init__(self, config):
        is_test = False
        if is_test:
            self.experiment_id = "KPConvNet" + time.strftime('%m%d%H%M') + 'Test'
        else:
            self.experiment_id = "KPConvNet" + time.strftime('%m%d%H%M')

        self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
        self.verbose = True

        # snapshot
        self.snapshot_interval = 5
        snapshot_root = f'snapshot/{config.dataset}_{self.experiment_id}'
        tensorboard_root = f'tensorboard/{config.dataset}_{self.experiment_id}'
        os.makedirs(snapshot_root, exist_ok=True)
        os.makedirs(tensorboard_root, exist_ok=True)
        shutil.copy2(os.path.join('.', 'training_ShapeNetCls.py'), os.path.join(snapshot_root, 'train.py'))
        shutil.copy2(os.path.join('datasets', 'ShapeNet.py'), os.path.join(snapshot_root, 'dataset.py'))
        shutil.copy2(os.path.join('datasets', 'dataloader.py'), os.path.join(snapshot_root, 'dataloader.py'))
        self.save_dir = os.path.join(snapshot_root, 'models/')
        self.result_dir = os.path.join(snapshot_root, 'results/')
        self.tboard_dir = tensorboard_root

        # dataset & dataloader
        self.train_set = ShapeNetDataset(root=config.data_train_dir,
                                         split='train',
                                         first_subsampling_dl=config.first_subsampling_dl,
                                         classification=True,
                                         config=config,
                                         )
        self.test_set = ShapeNetDataset(root=config.data_test_dir,
                                        split='test',
                                        first_subsampling_dl=config.first_subsampling_dl,
                                        classification=True,
                                        config=config,
                                        )
        self.train_loader = get_dataloader(dataset=self.train_set,
                                           batch_size=config.train_batch_size,
                                           shuffle=True,
                                           num_workers=4,
                                           )
        self.test_loader = get_dataloader(dataset=self.test_set,
                                          batch_size=config.test_batch_size,
                                          shuffle=False,
                                          num_workers=4,
                                          )
        print("Training set size:", self.train_loader.dataset.__len__())
        print("Test set size:", self.test_loader.dataset.__len__())

        # model
        self.model = KPCNN(config)
        self.resume = config.resume
        # optimizer 
        self.start_epoch = 0
        self.epoch = config.max_epoch
        self.optimizer = optim.SGD(self.model.parameters(), lr=config.learning_rate, momentum=config.momentum, weight_decay=1e-6)
        self.scheduler = optim.lr_scheduler.ExponentialLR(self.optimizer, gamma=config.exp_gamma)
        self.scheduler_interval = config.exp_interval

        # evaluate
        self.evaluate_interval = 1
        self.evaluate_metric = nn.CrossEntropyLoss(reduction='mean')

        self.check_args()
示例#17
0
                     augment_translation=config.augment_translation,
                     )
 val_set = ThreeDMatchTrainVal(root=config.root,
                         split='val',
                         descriptor=config.descriptor,
                         in_dim=config.in_dim,
                         inlier_threshold=config.inlier_threshold,
                         num_node=config.num_node,
                         use_mutual=config.use_mutual,
                         downsample=config.downsample,
                         augment_axis=config.augment_axis,
                         augment_rotation=config.augment_rotation,
                         augment_translation=config.augment_translation,
                         )
 config.train_loader = get_dataloader(dataset=train_set, 
                                     batch_size=config.batch_size,
                                     num_workers=config.num_workers,
                                     )
 config.val_loader = get_dataloader(dataset=val_set,
                                     batch_size=config.batch_size,
                                     num_workers=config.num_workers,
                                     )
 
 # create evaluation
 config.evaluate_metric = {
     "ClassificationLoss": ClassificationLoss(balanced=config.balanced),
     "SpectralMatchingLoss": SpectralMatchingLoss(balanced=config.balanced),
     "TransformationLoss": TransformationLoss(re_thre=config.re_thre, te_thre=config.te_thre),
 }
 config.metric_weight = {
     "ClassificationLoss": config.weight_classification,
     "SpectralMatchingLoss": config.weight_spectralmatching,
def main():
    seed_everything()

    # yml = 'configs/base.yml'
    # config = utils.config.load(yml)
    # pprint.pprint(config, indent=2)

    model = get_model(config).cuda()
    bind_model(model)

    args = get_args()
    if args.pause:  ## test mode일 때
        print('Inferring Start...')
        nsml.paused(scope=locals())

    if args.mode == 'train':  ### training mode일 때
        print('Training Start...')

        # no bias decay
        if config.OPTIMIZER.NO_BIAS_DECAY:
            group_decay, group_no_decay = group_weight(model)
            params = [{'params': group_decay},
                      {'params': group_no_decay, 'weight_decay': 0.0}]
        else:
            params = model.parameters()

        optimizer = get_optimizer(config, params)
        optimizer.param_groups[0]['initial_lr'] = config.OPTIMIZER.LR
        if config.OPTIMIZER.NO_BIAS_DECAY:
            optimizer.param_groups[1]['initial_lr'] = config.OPTIMIZER.LR

        ###############################################################################################

        if IS_LOCAL:
            prepare_train_directories(config)
            utils.config.save_config(yml, config.LOCAL_TRAIN_DIR)

            checkpoint = utils.checkpoint.get_initial_checkpoint(config)
            if checkpoint is not None:
                last_epoch, score, loss = utils.checkpoint.load_checkpoint(config, model, checkpoint)
            else:
                print('[*] no checkpoint found')
                last_epoch, score, loss = -1, -1, float('inf')
            print('last epoch:{} score:{:.4f} loss:{:.4f}'.format(last_epoch, score, loss))

        else:
            last_epoch = -1
        ###############################################################################################

        scheduler = get_scheduler(config, optimizer, last_epoch=last_epoch)

        ###############################################################################################
        if IS_LOCAL:
            if config.SCHEDULER.NAME == 'multi_step':
                if config.SCHEDULER.WARMUP:
                    scheduler_dict = scheduler.state_dict()['after_scheduler'].state_dict()
                else:
                    scheduler_dict = scheduler.state_dict()

                milestones = scheduler_dict['milestones']
                step_count = len([i for i in milestones if i < last_epoch])
                optimizer.param_groups[0]['lr'] *= scheduler_dict['gamma'] ** step_count
                if config.OPTIMIZER.NO_BIAS_DECAY:
                    optimizer.param_groups[1]['initial_lr'] *= scheduler_dict['gamma'] ** step_count

            if last_epoch != -1:
                scheduler.step()
        ###############################################################################################
        # for dirname, _, filenames in os.walk(DATASET_PATH):
        #     for filename in filenames:
        #         print(os.path.join(dirname, filename))

        # if preprocessing possible
        preprocess_type = config.DATA.PREPROCESS
        cv2_size = (config.DATA.IMG_W, config.DATA.IMG_H)
        if not IS_LOCAL:
            preprocess(os.path.join(DATASET_PATH, 'train', 'train_data', 'NOR'), os.path.join(preprocess_type, 'NOR'), preprocess_type, cv2_size)
            preprocess(os.path.join(DATASET_PATH, 'train', 'train_data', 'AMD'), os.path.join(preprocess_type, 'AMD'), preprocess_type, cv2_size)
            preprocess(os.path.join(DATASET_PATH, 'train', 'train_data', 'RVO'), os.path.join(preprocess_type, 'RVO'), preprocess_type, cv2_size)
            preprocess(os.path.join(DATASET_PATH, 'train', 'train_data', 'DMR'), os.path.join(preprocess_type, 'DMR'), preprocess_type, cv2_size)
            data_dir = preprocess_type
            # data_dir = os.path.join(DATASET_PATH, 'train/train_data')
        else:  # IS_LOCAL
            data_dir = os.path.join(DATASET_PATH, preprocess_type)

        # eda
        # train_std(data_dir, preprocess_type, cv2_size)

        fold_df = split_cv(data_dir, n_splits=config.NUM_FOLDS)
        val_fold_idx = config.IDX_FOLD

        ###############################################################################################

        train_loader = get_dataloader(config, data_dir, fold_df, val_fold_idx, 'train', transform=Albu())
        val_loader = get_dataloader(config, data_dir, fold_df, val_fold_idx, 'val')

        postfix = dict()
        num_epochs = config.TRAIN.NUM_EPOCHS

        val_acc_list = []
        for epoch in range(last_epoch+1, num_epochs):

            if epoch >= config.LOSS.FINETUNE_EPOCH:
                criterion = get_loss(config.LOSS.FINETUNE_LOSS)
            else:
                criterion = get_loss(config.LOSS.NAME)

            train_values = train_single_epoch(config, model, train_loader, criterion, optimizer, scheduler, epoch)
            val_values = evaluate_single_epoch(config, model, val_loader, criterion, epoch)
            val_acc_list.append((epoch, val_values[2]))

            if config.SCHEDULER.NAME != 'one_cyle_lr':
                scheduler.step()

            if IS_LOCAL:
                utils.checkpoint.save_checkpoint(config, model, epoch, val_values[1], val_values[0])

            else:
                postfix['train_loss'] = train_values[0]
                postfix['train_res'] = train_values[1]
                postfix['train_acc'] = train_values[2]
                postfix['train_sens'] = train_values[3]
                postfix['train_spec'] = train_values[4]

                postfix['val_loss'] = val_values[0]
                postfix['val_res'] = val_values[1]
                postfix['val_acc'] = val_values[2]
                postfix['val_sens'] = val_values[3]
                postfix['val_spec'] = val_values[4]

                nsml.report(**postfix, summary=True, step=epoch)

                val_res = '%.10f' % val_values[1]
                val_res = val_res.replace(".", "")
                val_res = val_res[:4] + '.' + val_res[4:8] + '.' + val_res[8:]
                save_name = 'epoch_%02d_score%s_loss%.4f.pth' % (epoch, val_res, val_values[0])
                # nsml.save(save_name)
                nsml.save(epoch)

        for e, val_acc in val_acc_list:
            print('%02d %s' % (e, val_acc))
示例#19
0
            # return dict_inputs
            return point_set, features, cls
        else:
            # manually convert numpy array to Tensor.
            # seg = torch.from_numpy(seg) - 1  # change to 0-based labels
            # dict_inputs = segmentation_inputs(point_set, features, seg, self.config)
            # return dict_inputs
            seg = seg - 1
            return point_set, features, seg

    def __len__(self):
        return len(self.datapath)


if __name__ == '__main__':
    datapath = "./data/shapenetcore_partanno_segmentation_benchmark_v0"
    from training_ShapeNetCls import ShapeNetPartConfig

    config = ShapeNetPartConfig()

    print("Segmentation task:")
    dset = ShapeNetDataset(root=datapath, config=config, first_subsampling_dl=0.01, classification=False)
    input = dset[0]

    from datasets.dataloader import get_dataloader

    dataloader = get_dataloader(dset, batch_size=2)
    for iter, input in enumerate(dataloader):
        print(input)
        break