예제 #1
0
def main(args):
    os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
    os.environ["CUDA_VISIBLE_DEVICES"] = args.gpu

    ''' === Set up Loggers and Load Data === '''
    MyLogger = TrainLogger(args, name=args.model.upper(), subfold='completion')
    os.makedirs(os.path.join(MyLogger.experiment_dir, 'plots'), exist_ok=True)
    writer = SummaryWriter(os.path.join(MyLogger.experiment_dir, 'runs'))

    MyLogger.logger.info('Load dataset %s' % args.dataset)
    if args.dataset == 'modelnet':
        lmdb_train = './data/modelnet/train.lmdb'
        lmdb_valid = './data/modelnet/test.lmdb'
    elif args.dataset == 'shapenet':
        lmdb_train = 'data/shapenet/train.lmdb'
        lmdb_valid = 'data/shapenet/valid.lmdb'
    else:
        raise ValueError("Dataset is not available, it should be either ModelNet or ShapeNet")

    assert (args.gt_pts == args.grid_size ** 2 * args.num_coarse)
    df_train, num_train = lmdb_dataflow(
        lmdb_train, args.batch_size, args.input_pts, args.gt_pts, is_training=True)
    df_valid, num_valid = lmdb_dataflow(
        lmdb_valid, args.batch_size, args.input_pts, args.gt_pts, is_training=False)
    train_gen, valid_gen = df_train.get_data(), df_valid.get_data()
    total_steps = num_train // args.batch_size * args.epoch

    ''' === Load Model and Backup Scripts === '''
    MODEL = importlib.import_module(args.model)
    shutil.copy(os.path.abspath(__file__), MyLogger.log_dir)
    shutil.copy('./models/%s.py' % args.model, MyLogger.log_dir)

    # multiple GPUs usage
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    completer = MODEL.get_model(args=args, grid_size=args.grid_size,
                                grid_scale=args.grid_scale, num_coarse=args.num_coarse).to(device)
    criterion = MODEL.get_loss().to(device)
    completer = torch.nn.DataParallel(completer)
    # nn.DataParallel has its own issues (slow, memory expensive), bearable
    # some optional advanced solutions: https://zhuanlan.zhihu.com/p/145427849
    print('=' * 33)
    print('Using %d GPU,' % torch.cuda.device_count(), 'Indices are: %s' % args.gpu)
    print('=' * 33)

    ''' === Restore Model from Checkpoints, If there is any === '''
    if args.restore:
        checkpoint = torch.load(args.restore_path)
        completer = copy_parameters(completer, checkpoint, verbose=True)
        MyLogger.logger.info('Use pre-trained model from %s' % args.restore_path)
        MyLogger.step, MyLogger.epoch = checkpoint['step'], checkpoint['epoch']

    else:
        MyLogger.logger.info('No pre-trained model, start training from scratch...')

    ''' IMPORTANT: for completion, no weight decay in Adam, no batch norm in decoder!'''
    optimizer = torch.optim.Adam(
        completer.parameters(),
        lr=args.lr,
        betas=(0.9, 0.999),
        eps=1e-08,
        weight_decay=0)
    # weight_decay=1e-4)

    # For the sake of simplicity, we save the momentum decay in the batch norm
    # scheduler = StepLR(optimizer, step_size=20, gamma=0.7) -> instead we define these manually
    LEARNING_RATE_CLIP = 0.01 * args.lr

    def vary2fix(inputs, npts, batch_size=args.batch_size, num_point=args.input_pts):
        """upsample/downsample varied input points into fixed length
        :param inputs: input points cloud
        :param npts: describe how many points of each input object
        :param batch_size: training batch size
        :param num_point: number of points of per occluded object
        :return: fixed length of points of each object
        """

        inputs_ls = np.split(inputs[0], npts.cumsum())
        ret_inputs = np.zeros((1, batch_size * num_point, 3))
        ret_npts = npts.copy()

        for idx, obj in enumerate(inputs_ls[:-1]):

            if len(obj) <= num_point:
                select_idx = np.concatenate([
                    np.arange(len(obj)), np.random.choice(len(obj), num_point - len(obj))])
            else:
                select_idx = np.arange(len(obj))
                np.random.shuffle(select_idx)

            ret_inputs[0][idx * num_point:(idx + 1) * num_point] = obj[select_idx].copy()
            ret_npts[idx] = num_point

        return ret_inputs, ret_npts

    def piecewise_constant(global_step, boundaries, values):
        """substitute for tf.train.piecewise_constant:
        https://www.tensorflow.org/api_docs/python/tf/compat/v1/train/piecewise_constant
        global_step can be either training epoch or training step
        """
        if len(boundaries) != len(values) - 1:
            raise ValueError(
                "The length of boundaries should be 1 less than the length of values")

        if global_step <= boundaries[0]:
            return values[0]
        elif global_step > boundaries[-1]:
            return values[-1]
        else:
            for low, high, v in zip(boundaries[:-1], boundaries[1:], values[1:-1]):
                if (global_step > low) & (global_step <= high):
                    return v

    total_time, train_start = 0, time.time()
    for step in range(MyLogger.step + 1, total_steps + 1):

        ''' === Training === '''
        start = time.time()
        epoch = step * args.batch_size // num_train + 1
        lr = max(args.lr * (args.lr_decay ** (epoch // args.step_size)), LEARNING_RATE_CLIP)
        for param_group in optimizer.param_groups:
            param_group['lr'] = lr
        # follow the original alpha setting for ShapeNet Dataset in PCN paper:
        alpha = piecewise_constant(step, [10000, 20000, 50000], [0.01, 0.1, 0.5, 1.0])
        writer.add_scalar('Learning Rate', lr, step)
        writer.add_scalar('Alpha', alpha, step)

        ids, inputs, npts, gt = next(train_gen)
        if args.dataset == 'shapenet':
            inputs, _ = vary2fix(inputs, npts)

        completer.train()
        optimizer.zero_grad()
        inputs = inputs.reshape(args.batch_size, args.input_pts, 3)
        inputs, gt = torch.Tensor(inputs).transpose(2, 1).cuda(), torch.Tensor(gt).cuda()
        pred_coarse, pred_fine = completer(inputs)
        loss = criterion(pred_coarse, pred_fine, gt, alpha)
        loss.backward()
        optimizer.step()
        total_time += time.time() - start
        writer.add_scalar('Loss', loss, step)

        if step % args.steps_print == 0:
            MyLogger.logger.info('epoch %d  step %d  alpha %.2f  loss %.8f  time per step %.2f s' %
                                 (epoch, step, alpha, loss, total_time / args.steps_print))
            total_time = 0

        ''' === Validating === '''
        if step % args.steps_eval == 0:

            with torch.no_grad():
                completer.eval()
                MyLogger.logger.info('Testing...')
                num_eval_steps, eval_loss, eval_time = num_valid // args.batch_size, 0, 0

                for eval_step in range(num_eval_steps):
                    start = time.time()
                    _, inputs, npts, gt = next(valid_gen)
                    if args.dataset == 'shapenet':
                        inputs, _ = vary2fix(inputs, npts)

                    inputs = inputs.reshape(args.batch_size, args.input_pts, 3)
                    inputs, gt = torch.Tensor(inputs).transpose(2, 1).cuda(), torch.Tensor(gt).cuda()

                    pred_coarse, pred_fine = completer(inputs)
                    loss = criterion(pred_coarse, pred_fine, gt, alpha)
                    eval_loss += loss
                    eval_time += time.time() - start

                MyLogger.logger.info('epoch %d  step %d  validation  loss %.8f  time per step %.2f s' %
                                     (epoch, step, eval_loss / num_eval_steps, eval_time / num_eval_steps))

        ''' === Visualisation === '''
        if step % args.steps_visu == 0:
            all_pcds = [item.detach().cpu().numpy() for item in [
                inputs.transpose(2, 1), pred_coarse, pred_fine, gt]]
            for i in range(args.batch_size):
                plot_path = os.path.join(MyLogger.experiment_dir, 'plots',
                                         'epoch_%d_step_%d_%s.png' % (epoch, step, ids[i]))
                pcds = [x[i] for x in all_pcds]
                plot_pcd_three_views(plot_path, pcds,
                                     ['input', 'coarse output', 'fine output', 'ground truth'])

        trained_epoch = epoch - 1
        if (trained_epoch % args.epochs_save == 0) and (trained_epoch != 0) and \
                not os.path.exists(os.path.join(MyLogger.checkpoints_dir,
                                                'model_epoch_%d.pth' % trained_epoch)):
            state = {
                'step': step,
                'epoch': epoch,
                'model_state_dict': completer.state_dict(),
                'optimizer_state_dict': optimizer.state_dict(),
            }
            torch.save(state, os.path.join(MyLogger.checkpoints_dir,
                                           "model_epoch_%d.pth" % trained_epoch))
            MyLogger.logger.info('Model saved at %s/model_epoch_%d.pth\n'
                                 % (MyLogger.checkpoints_dir, trained_epoch))

    MyLogger.logger.info('Training Finished, Total Time: ' +
                         str(datetime.timedelta(seconds=time.time() - train_start)))
예제 #2
0
파일: train_cls.py 프로젝트: zeta1999/OcCo
def main(args):

    os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
    os.environ["CUDA_VISIBLE_DEVICES"] = args.gpu
    # seed_torch(args.seed)
    ''' === Set up Loggers and Load Data === '''
    MyLogger = TrainLogger(args,
                           name=args.model.upper(),
                           subfold='cls',
                           filename=args.mode + '_log')
    writer = SummaryWriter(os.path.join(MyLogger.experiment_dir, 'runs'))

    MyLogger.logger.info('Load dataset %s' % args.dataset)
    NUM_CLASSES, TRAIN_FILES, TEST_FILES = Dataset_Loc(dataset=args.dataset,
                                                       fname=args.fname,
                                                       partial=args.partial,
                                                       bn=args.bn)
    TRAIN_DATASET = General_CLSDataLoader_HDF5(file_list=TRAIN_FILES,
                                               num_point=1024)
    TEST_DATASET = General_CLSDataLoader_HDF5(file_list=TEST_FILES,
                                              num_point=1024)
    trainDataLoader = DataLoader(TRAIN_DATASET,
                                 batch_size=args.batch_size,
                                 shuffle=True,
                                 num_workers=4,
                                 drop_last=True)
    testDataLoader = DataLoader(TEST_DATASET,
                                batch_size=args.batch_size,
                                shuffle=False,
                                num_workers=4)
    ''' === Load Model and Backup Scripts === '''
    MODEL = importlib.import_module(args.model)
    shutil.copy(os.path.abspath(__file__), MyLogger.log_dir)
    shutil.copy('./models/%s.py' % args.model, MyLogger.log_dir)

    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    classifier = MODEL.get_model(args=args,
                                 num_channel=3,
                                 num_class=NUM_CLASSES).to(device)
    criterion = MODEL.get_loss().to(device)
    classifier = torch.nn.DataParallel(classifier)
    # nn.DataParallel has its own issues (slow, memory expensive),
    # here are some advanced solutions: https://zhuanlan.zhihu.com/p/145427849
    print('=' * 27)
    print('Using %d GPU,' % torch.cuda.device_count(),
          'Indices: %s' % args.gpu)
    print('=' * 27)
    ''' === Restore Model from Pre-Trained Checkpoints: OcCo/Jigsaw etc === '''
    if args.restore:
        checkpoint = torch.load(args.restore_path)
        classifier = copy_parameters(classifier, checkpoint, verbose=True)
        MyLogger.logger.info('Use pre-trained weights from %s' %
                             args.restore_path)
    else:
        MyLogger.logger.info(
            'No pre-trained weights, start training from scratch...')

    if not args.use_sgd:
        optimizer = torch.optim.Adam(classifier.parameters(),
                                     lr=args.lr,
                                     betas=(0.9, 0.999),
                                     eps=1e-08,
                                     weight_decay=1e-4)
    else:
        optimizer = torch.optim.SGD(classifier.parameters(),
                                    lr=args.lr * 100,
                                    momentum=args.momentum,
                                    weight_decay=1e-4)

    if args.scheduler == 'cos':
        scheduler = CosineAnnealingLR(optimizer,
                                      T_max=args.epoch,
                                      eta_min=1e-3)
    else:
        scheduler = StepLR(optimizer,
                           step_size=args.step_size,
                           gamma=args.lr_decay)
    LEARNING_RATE_CLIP = 0.01 * args.lr

    if args.mode == 'test':
        with torch.no_grad():
            classifier.eval()
            MyLogger.epoch_init(training=False)

            for points, target in tqdm(testDataLoader,
                                       total=len(testDataLoader),
                                       smoothing=0.9):
                points, target = points.float().transpose(
                    2, 1).cuda(), target.long().cuda()
                if args.model == 'pointnet_cls':
                    pred, trans_feat = classifier(points)
                    loss = criterion(pred, target, trans_feat)
                else:
                    pred = classifier(points)
                    loss = criterion(pred, target)
                MyLogger.step_update(
                    pred.data.max(1)[1].cpu().numpy(),
                    target.long().cpu().numpy(),
                    loss.cpu().detach().numpy())

            MyLogger.epoch_summary(writer=writer, training=False)
        sys.exit("Test Finished")

    for epoch in range(MyLogger.epoch, args.epoch + 1):
        ''' === Training === '''
        MyLogger.epoch_init()

        for points, target in tqdm(trainDataLoader,
                                   total=len(trainDataLoader),
                                   smoothing=0.9):
            writer.add_scalar('Learning Rate',
                              scheduler.get_lr()[-1], MyLogger.step)

            # Augmentation, might bring performance gains
            if args.data_aug:
                points = random_point_dropout(points.data.numpy())
                points[:, :, :3] = random_scale_point_cloud(points[:, :, :3])
                points[:, :, :3] = random_shift_point_cloud(points[:, :, :3])
                points = torch.Tensor(points)

            points, target = points.transpose(
                2, 1).float().cuda(), target.long().cuda()

            # FP and BP
            classifier.train()
            optimizer.zero_grad()
            if args.model == 'pointnet_cls':
                pred, trans_feat = classifier(points)
                loss = criterion(pred, target, trans_feat)
            else:
                pred = classifier(points)
                loss = criterion(pred, target)
            loss.backward()
            optimizer.step()
            MyLogger.step_update(
                pred.data.max(1)[1].cpu().numpy(),
                target.long().cpu().numpy(),
                loss.cpu().detach().numpy())
        MyLogger.epoch_summary(writer=writer, training=True)
        ''' === Validating === '''
        with torch.no_grad():
            classifier.eval()
            MyLogger.epoch_init(training=False)

            for points, target in tqdm(testDataLoader,
                                       total=len(testDataLoader),
                                       smoothing=0.9):
                points, target = points.float().transpose(
                    2, 1).cuda(), target.long().cuda()
                if args.model == 'pointnet_cls':
                    pred, trans_feat = classifier(points)
                    loss = criterion(pred, target, trans_feat)
                else:
                    pred = classifier(points)
                    loss = criterion(pred, target)
                MyLogger.step_update(
                    pred.data.max(1)[1].cpu().numpy(),
                    target.long().cpu().numpy(),
                    loss.cpu().detach().numpy())

            MyLogger.epoch_summary(writer=writer, training=False)
            if MyLogger.save_model:
                state = {
                    'step': MyLogger.step,
                    'epoch': MyLogger.best_instance_epoch,
                    'instance_acc': MyLogger.best_instance_acc,
                    'best_class_acc': MyLogger.best_class_acc,
                    'best_class_epoch': MyLogger.best_class_epoch,
                    'model_state_dict': classifier.state_dict(),
                    'optimizer_state_dict': optimizer.state_dict(),
                }
                torch.save(state, MyLogger.savepath)

        scheduler.step()
        if args.scheduler == 'step':
            for param_group in optimizer.param_groups:
                if optimizer.param_groups[0]['lr'] < LEARNING_RATE_CLIP:
                    param_group['lr'] = LEARNING_RATE_CLIP

    MyLogger.train_summary()
예제 #3
0
def main(args):

    os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
    os.environ["CUDA_VISIBLE_DEVICES"] = args.gpu

    root = 'data/indoor3d_sem_seg_hdf5_data'
    NUM_CLASSES = len(seg_label_to_cat)

    TRAIN_DATASET = S3DISDataset_HDF5(root=root, split='train', test_area=args.test_area)
    TEST_DATASET = S3DISDataset_HDF5(root=root, split='test', test_area=args.test_area)
    trainDataLoader = DataLoader(TRAIN_DATASET, batch_size=args.batch_size, shuffle=True, num_workers=4)
    testDataLoader = DataLoader(TEST_DATASET, batch_size=args.batch_size, shuffle=False, num_workers=4)

    MyLogger = TrainLogger(args, name=args.model.upper(), subfold='semseg',
                           cls2name=class2label, filename=args.mode + '_log')
    MyLogger.logger.info("The number of training data is: %d" % len(TRAIN_DATASET))
    MyLogger.logger.info("The number of testing data is: %d" % len(TEST_DATASET))

    ''' === Model Loading === '''
    MODEL = importlib.import_module(args.model)
    shutil.copy(os.path.abspath(__file__), MyLogger.log_dir)
    shutil.copy('./models/%s.py' % args.model, MyLogger.log_dir)
    writer = SummaryWriter(os.path.join(MyLogger.experiment_dir, 'runs'))

    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    classifier = MODEL.get_model(num_class=NUM_CLASSES, num_channel=9, args=args).to(device)
    criterion = MODEL.get_loss().to(device)
    classifier = torch.nn.DataParallel(classifier)
    print('=' * 27)
    print('Using %d GPU,' % torch.cuda.device_count(), 'Indices: %s' % args.gpu)
    print('=' * 27)

    if args.restore:
        checkpoint = torch.load(args.restore_path)
        classifier = copy_parameters(classifier, checkpoint, verbose=True)
        MyLogger.logger.info('Use pre-trained weights from %s' % args.restore_path)
    else:
        MyLogger.logger.info('No pre-trained weights, start training from scratch...')
        if args.xavier_init:
            classifier = classifier.apply(weights_init)
            MyLogger.logger.info("Using Xavier weight initialisation")

    if args.optimizer == 'adam':
        optimizer = torch.optim.Adam(
            classifier.parameters(),
            lr=args.lr,
            betas=(0.9, 0.999),
            eps=1e-08,
            weight_decay=1e-4)
        MyLogger.logger.info("Using Adam optimiser")
    else:
        optimizer = torch.optim.SGD(
            classifier.parameters(),
            lr=args.lr * 100,
            momentum=args.momentum)
        MyLogger.logger.info("Using SGD optimiser")
    # using the similar lr decay setting from
    # https://github.com/charlesq34/pointnet/blob/master/sem_seg/train.py
    # scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=40, gamma=0.5)

    if args.scheduler == 'cos':
        scheduler = CosineAnnealingLR(optimizer, T_max=args.epoch, eta_min=1e-3)
    else:
        scheduler = StepLR(optimizer, step_size=args.step_size, gamma=args.lr_decay)

    LEARNING_RATE_CLIP = 1e-5
    MOMENTUM_ORIGINAL = 0.1
    MOMENTUM_DECAY = 0.5
    MOMENTUM_DECAY_STEP = args.step_size

    ''' === Testing then Exit === '''
    if args.mode == 'test':
        with torch.no_grad():
            classifier.eval()
            MyLogger.epoch_init(training=False)

            for points, target in tqdm(testDataLoader, total=len(testDataLoader), smoothing=0.9):
                points, target = points.transpose(2, 1).float().cuda(), target.view(-1, 1)[:, 0].long().cuda()
                if args.model == 'pointnet_semseg':
                    seg_pred, trans_feat = classifier(points)
                    seg_pred = seg_pred.contiguous().view(-1, NUM_CLASSES)
                    loss = criterion(seg_pred, target, trans_feat)
                else:
                    seg_pred = classifier(points)
                    seg_pred = seg_pred.contiguous().view(-1, NUM_CLASSES)
                    loss = criterion(seg_pred, target)
                MyLogger.step_update(seg_pred.data.max(1)[1].cpu().numpy(),
                                     target.long().cpu().numpy(),
                                     loss.cpu().detach().numpy())

            MyLogger.epoch_summary(writer=writer, training=False, mode='semseg')
        sys.exit("Test Finished")

    for epoch in range(MyLogger.epoch, args.epoch + 1):

        ''' === Training === '''
        # scheduler.step()
        MyLogger.epoch_init()

        for points, target in tqdm(trainDataLoader, total=len(trainDataLoader), smoothing=0.9):
            writer.add_scalar('learning rate', scheduler.get_lr()[-1], MyLogger.step)
            points, target = points.float().transpose(2, 1).cuda(), target.view(-1, 1)[:, 0].long().cuda()

            classifier.train()
            optimizer.zero_grad()
            # pdb.set_trace()
            if args.model == 'pointnet_semseg':
                seg_pred, trans_feat = classifier(points)
                seg_pred = seg_pred.contiguous().view(-1, NUM_CLASSES)
                loss = criterion(seg_pred, target, trans_feat)
            else:
                seg_pred = classifier(points)
                seg_pred = seg_pred.contiguous().view(-1, NUM_CLASSES)
                loss = criterion(seg_pred, target)

            loss.backward()
            optimizer.step()

            MyLogger.step_update(seg_pred.data.max(1)[1].cpu().numpy(),
                                 target.long().cpu().numpy(),
                                 loss.cpu().detach().numpy())
        MyLogger.epoch_summary(writer=writer, training=True, mode='semseg')

        '''=== Evaluating ==='''
        with torch.no_grad():
            classifier.eval()
            MyLogger.epoch_init(training=False)

            for points, target in tqdm(testDataLoader, total=len(testDataLoader), smoothing=0.9):
                points, target = points.transpose(2, 1).float().cuda(), target.view(-1, 1)[:, 0].long().cuda()
                if args.model == 'pointnet_semseg':
                    seg_pred, trans_feat = classifier(points)
                    seg_pred = seg_pred.contiguous().view(-1, NUM_CLASSES)
                    loss = criterion(seg_pred, target, trans_feat)
                else:
                    seg_pred = classifier(points)
                    seg_pred = seg_pred.contiguous().view(-1, NUM_CLASSES)
                    loss = criterion(seg_pred, target)
                MyLogger.step_update(seg_pred.data.max(1)[1].cpu().numpy(),
                                     target.long().cpu().numpy(),
                                     loss.cpu().detach().numpy())

            MyLogger.epoch_summary(writer=writer, training=False, mode='semseg')
            if MyLogger.save_model:
                state = {
                    'step': MyLogger.step,
                    'miou': MyLogger.best_miou,
                    'epoch': MyLogger.best_miou_epoch,
                    'model_state_dict': classifier.state_dict(),
                    'optimizer_state_dict': optimizer.state_dict(),
                }
                torch.save(state, MyLogger.savepath)

        scheduler.step()
        if args.scheduler == 'step':
            for param_group in optimizer.param_groups:
                if optimizer.param_groups[0]['lr'] < LEARNING_RATE_CLIP:
                    param_group['lr'] = LEARNING_RATE_CLIP
        if args.bn_decay:
            momentum = MOMENTUM_ORIGINAL * (MOMENTUM_DECAY ** (epoch // MOMENTUM_DECAY_STEP))
            if momentum < 0.01:
                momentum = 0.01
            print('BN momentum updated to: %f' % momentum)
            classifier = classifier.apply(lambda x: bn_momentum_adjust(x, momentum))

    MyLogger.train_summary(mode='semseg')
예제 #4
0
def main(args):
    os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
    os.environ["CUDA_VISIBLE_DEVICES"] = args.gpu

    def to_categorical(y, num_class):
        """ 1-hot encodes a tensor """
        new_y = torch.eye(num_class)[y.cpu().data.numpy(), ]
        if y.is_cuda:
            return new_y.cuda()
        return new_y

    ''' === Set up Loggers and Load Data === '''
    MyLogger = TrainLogger(args, name=args.model.upper(), subfold='partseg',
                           filename=args.mode + '_log', cls2name=seg_label_to_cat)
    writer = SummaryWriter(os.path.join(MyLogger.experiment_dir, 'runs'))
    root = 'data/shapenetcore_partanno_segmentation_benchmark_v0_normal/'

    TRAIN_DATASET = PartNormalDataset(root=root, num_point=args.num_point, split='trainval', use_normal=args.normal)
    TEST_DATASET = PartNormalDataset(root=root, num_point=args.num_point, split='test', use_normal=args.normal)
    trainDataLoader = DataLoader(TRAIN_DATASET, batch_size=args.batch_size, shuffle=True, num_workers=4)
    testDataLoader = DataLoader(TEST_DATASET, batch_size=args.batch_size, shuffle=False, num_workers=4)

    num_classes, num_part = 16, 50

    ''' === Load Model and Backup Scripts === '''
    channel_num = 6 if args.normal else 3
    MODEL = importlib.import_module(args.model)
    shutil.copy(os.path.abspath(__file__), MyLogger.log_dir)
    shutil.copy('./models/%s.py' % args.model, MyLogger.log_dir)
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    classifier = MODEL.get_model(part_num=num_part, num_channel=channel_num, args=args).cuda().to(device)
    criterion = MODEL.get_loss().to(device)
    classifier = torch.nn.DataParallel(classifier)

    if args.restore:
        checkpoint = torch.load(args.restore_path)
        classifier = copy_parameters(classifier, checkpoint, verbose=True)
        MyLogger.logger.info('Use pre-trained weights from %s' % args.restore_path)
    else:
        MyLogger.logger.info('No pre-trained weights, start training from scratch...')
        if args.xavier_init:
            classifier = classifier.apply(weights_init)
            MyLogger.logger.info("Using Xavier weight initialisation")

    if args.mode == 'test':
        MyLogger.logger.info('\n\n')
        MyLogger.logger.info('=' * 33)
        MyLogger.logger.info('load parrameters from %s' % args.restore_path)
        with torch.no_grad():
            test_metrics = {}
            total_correct, total_seen = 0, 0
            total_seen_class = [0 for _ in range(num_part)]
            total_correct_class = [0 for _ in range(num_part)]
            shape_ious = {cat: [] for cat in seg_classes.keys()}  # {shape: []}

            for points, label, target in tqdm(testDataLoader, total=len(testDataLoader), smoothing=0.9):
                classifier.eval()
                cur_batch_size, num_point, _ = points.size()
                vote_pool = torch.zeros(cur_batch_size, num_point, num_part).cuda()  # (batch, num point, num part)
                points, label, target = points.transpose(2, 1).float().cuda(), label.long().cuda(), target.numpy()
                
                ''' === generate predictions from raw output (multiple via voting) === '''
                for _ in range(args.num_votes):
                    if args.model == 'pointnet_partseg':
                        seg_pred, _ = classifier(points, to_categorical(label, num_classes))
                    else:
                        seg_pred = classifier(points, to_categorical(label, num_classes))
                    vote_pool += seg_pred  # added on probability
                
                seg_pred = vote_pool / args.num_votes
                cur_pred_val_logits = seg_pred.cpu().data.numpy()
                cur_pred_val = np.zeros((cur_batch_size, num_point)).astype(np.int32)

                for i in range(cur_batch_size):
                    cat = seg_label_to_cat[target[i, 0]]  # str, shape name
                    logits = cur_pred_val_logits[i, :, :]  # array, (num point, num part)
                    cur_pred_val[i, :] = np.argmax(logits[:, seg_classes[cat]], 1) + seg_classes[cat][0] 
                    # only consider parts from that shape

                ''' === calculate accuracy === '''
                total_correct += np.sum(cur_pred_val == target)
                total_seen += (cur_batch_size * num_point)

                for l in range(num_part):
                    total_seen_class[l] += np.sum(target == l)
                    total_correct_class[l] += (np.sum((cur_pred_val == l) & (target == l)))

                ''' === calculate iou === '''
                for i in range(cur_batch_size):
                    segl = target[i, :]  # array, (num point, )
                    segp = cur_pred_val[i, :]  # array, (num point, )
                    cat = seg_label_to_cat[segl[0]]  # str, shape name
                    part_ious = [0. for _ in range(len(seg_classes[cat]))]  # parts belong to that shape
                    
                    for l in seg_classes[cat]:
                        if (np.sum(segl == l) == 0) and (np.sum(segp == l) == 0):  # no prediction or gt
                            part_ious[l - seg_classes[cat][0]] = 1.0
                        else:
                            iou = np.sum((segl == l) & (segp == l)) / float(np.sum((segl == l) | (segp == l)))
                            part_ious[l - seg_classes[cat][0]] = iou
                    shape_ious[cat].append(np.mean(part_ious))

        all_shape_ious = []
        for cat in shape_ious.keys():
            for iou in shape_ious[cat]:
                all_shape_ious.append(iou)
            shape_ious[cat] = np.mean(shape_ious[cat])
            
        mean_shape_ious = np.mean(list(shape_ious.values()))
        test_metrics['class_avg_iou'] = mean_shape_ious
        test_metrics['instance_avg_iou'] = np.mean(all_shape_ious)
        test_metrics['accuracy'] = total_correct / float(total_seen)
        test_metrics['class_avg_accuracy'] = np.mean(
            np.array(total_correct_class) / np.array(total_seen_class, dtype=np.float))
        for cat in sorted(shape_ious.keys()):
            MyLogger.logger.info('test mIoU of %-14s %f' % (cat, shape_ious[cat]))

        MyLogger.logger.info('Accuracy is: %.5f' % test_metrics['accuracy'])
        MyLogger.logger.info('Class avg accuracy is: %.5f' % test_metrics['class_avg_accuracy'])
        MyLogger.logger.info('Class avg mIoU is: %.5f' % test_metrics['class_avg_iou'])
        MyLogger.logger.info('Instance avg mIoU is: %.5f' % test_metrics['instance_avg_iou'])
        sys.exit("Test Finished")

    if not args.use_sgd:
        optimizer = torch.optim.Adam(
            classifier.parameters(),
            lr=args.lr,
            betas=(0.9, 0.999),
            eps=1e-08,
            weight_decay=1e-4)
    else:
        optimizer = torch.optim.SGD(classifier.parameters(),
                                    lr=args.lr * 100,
                                    momentum=args.momentum,
                                    weight_decay=1e-4)
    if args.scheduler is 'step':
        scheduler = StepLR(optimizer, step_size=args.step_size, gamma=args.lr_decay)
    else:
        scheduler = CosineAnnealingLR(optimizer, T_max=args.epoch, eta_min=1e-3)

    LEARNING_RATE_CLIP = 1e-5
    MOMENTUM_ORIGINAL = 0.1
    MOMENTUM_DECAY = 0.5
    MOMENTUM_DECAY_STEP = args.step_size

    for epoch in range(MyLogger.epoch, args.epoch + 1):

        MyLogger.epoch_init()

        for points, label, target in tqdm(trainDataLoader, total=len(trainDataLoader), smoothing=0.9):

            if args.data_aug:
                points = points.data.numpy()
                points[:, :, :3] = random_scale_point_cloud(points[:, :, 0:3])
                points[:, :, :3] = random_shift_point_cloud(points[:, :, 0:3])
                points = torch.Tensor(points)

            points, label, target = points.transpose(2, 1).float().cuda(), label.long().cuda(), \
                                    target.view(-1, 1)[:, 0].long().cuda()
            classifier.train()
            optimizer.zero_grad()
            if args.model == 'pointnet_partseg':
                seg_pred, trans_feat = classifier(points, to_categorical(label, num_classes))
                seg_pred = seg_pred.contiguous().view(-1, num_part)
                loss = criterion(seg_pred, target, trans_feat)
            else:
                seg_pred = classifier(points, to_categorical(label, num_classes))
                seg_pred = seg_pred.contiguous().view(-1, num_part)
                loss = criterion(seg_pred, target)

            loss.backward()
            optimizer.step()
            MyLogger.step_update(seg_pred.data.max(1)[1].cpu().numpy(),
                                 target.long().cpu().numpy(),
                                 loss.cpu().detach().numpy())
        MyLogger.epoch_summary(writer=writer, training=True, mode='partseg')

        '''=== Evaluating ==='''
        with torch.no_grad():

            classifier.eval()
            MyLogger.epoch_init(training=False)

            for points, label, target in tqdm(testDataLoader, total=len(testDataLoader), smoothing=0.9):
                cur_batch_size, NUM_POINT, _ = points.size()
                points, label, target = points.transpose(2, 1).float().cuda(), label.long().cuda(), \
                                        target.view(-1, 1)[:, 0].long().cuda()
                if args.model == 'pointnet_partseg':
                    seg_pred, trans_feat = classifier(points, to_categorical(label, num_classes))
                    seg_pred = seg_pred.contiguous().view(-1, num_part)
                    loss = criterion(seg_pred, target, trans_feat)
                else:
                    seg_pred = classifier(points, to_categorical(label, num_classes))
                    seg_pred = seg_pred.contiguous().view(-1, num_part)
                    loss = criterion(seg_pred, target)

                MyLogger.step_update(seg_pred.data.max(1)[1].cpu().numpy(),
                                     target.long().cpu().numpy(),
                                     loss.cpu().detach().numpy())
            
            MyLogger.epoch_summary(writer=writer, training=False, mode='partseg')

            if MyLogger.save_model:
                state = {
                    'step': MyLogger.step,
                    'miou': MyLogger.best_miou,
                    'epoch': MyLogger.best_miou_epoch,
                    'model_state_dict': classifier.state_dict(),
                    'optimizer_state_dict': optimizer.state_dict()}
                torch.save(state, MyLogger.savepath)

            if epoch % 5 == 0:
                state = {
                    'model_state_dict': classifier.state_dict(),
                    'optimizer_state_dict': optimizer.state_dict()}
                torch.save(state, MyLogger.savepath.replace('best_model', 'model_ep%d' % epoch))

            scheduler.step()
            if args.scheduler == 'step':
                for param_group in optimizer.param_groups:
                    if optimizer.param_groups[0]['lr'] < LEARNING_RATE_CLIP:
                        param_group['lr'] = LEARNING_RATE_CLIP
            if args.bn_decay:
                momentum = MOMENTUM_ORIGINAL * (MOMENTUM_DECAY ** (epoch // MOMENTUM_DECAY_STEP))
                if momentum < 0.01:
                    momentum = 0.01
                print('BN momentum updated to: %f' % momentum)
                classifier = classifier.apply(lambda x: bn_momentum_adjust(x, momentum))
예제 #5
0
파일: TSNE_Visu.py 프로젝트: zeta1999/OcCo
                                                       bn=args.bn)
    TRAIN_DATASET = General_CLSDataLoader_HDF5(file_list=TRAIN_FILES)
    # TEST_DATASET = General_CLSDataLoader_HDF5(file_list=TEST_FILES)
    trainDataLoader = DataLoader(TRAIN_DATASET,
                                 batch_size=args.batch_size,
                                 shuffle=True,
                                 num_workers=4)
    # testDataLoader = DataLoader(TEST_DATASET, batch_size=args.batch_size, shuffle=True, num_workers=4)

    MODEL = importlib.import_module(args.model)
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    encoder = MODEL.encoder(args=args, num_channel=3).to(device)
    encoder = torch.nn.DataParallel(encoder)

    checkpoint = torch.load(args.restore_path)
    encoder = copy_parameters(encoder, checkpoint, verbose=True)

    X_train, y_train, X_test, y_test = [], [], [], []
    with torch.no_grad():
        encoder.eval()

        for points, target in tqdm(trainDataLoader,
                                   total=len(trainDataLoader),
                                   smoothing=0.9):
            points, target = points.float().transpose(
                2, 1).cuda(), target.long().cuda()
            feats = encoder(points)
            X_train.append(feats.cpu().numpy())
            y_train.append(target.cpu().numpy())

        # for points, target in tqdm(testDataLoader, total=len(testDataLoader), smoothing=0.9):