예제 #1
0
파일: eval.py 프로젝트: fsx05021/3D
def evaluate(configs=None):
    configs = prepare() if configs is None else configs

    import h5py
    import math
    import torch
    import torch.backends.cudnn as cudnn
    import torch.nn.functional as F
    from tqdm import tqdm

    #####################
    # Kernel Definition #
    #####################

    def print_stats(stats):
        stats = stats.sum(axis=-1)
        iou = stats[2] / (stats[0] + stats[1] - stats[2])
        print('classes: {}'.format('  '.join(map('{:>8d}'.format, stats[0].astype(np.int64)))))
        print('positiv: {}'.format('  '.join(map('{:>8d}'.format, stats[1].astype(np.int64)))))
        print('truepos: {}'.format('  '.join(map('{:>8d}'.format, stats[2].astype(np.int64)))))
        print('clssiou: {}'.format('  '.join(map('{:>8.2f}'.format, iou * 100))))
        print('meanAcc: {:4.2f}'.format(stats[2].sum() / stats[1].sum() * 100))
        print('meanIoU: {:4.2f}'.format(iou.mean() * 100))

    ###########
    # Prepare #
    ###########

    if configs.device == 'cuda':
        cudnn.benchmark = True
        if configs.get('deterministic', False):
            cudnn.deterministic = True
            cudnn.benchmark = False
    if ('seed' not in configs) or (configs.seed is None):
        configs.seed = torch.initial_seed() % (2 ** 32 - 1)
    seed = configs.seed
    random.seed(seed)
    np.random.seed(seed)
    torch.manual_seed(seed)

    print(configs)

    if os.path.exists(configs.evaluate.stats_path):
        stats = np.load(configs.evaluate.stats_path)
        print_stats(stats)
        return

    #################################
    # Initialize DataLoaders, Model #
    #################################

    print(f'\n==> loading dataset "{configs.dataset}"')
    dataset = configs.dataset()[configs.dataset.split]

    print(f'\n==> creating model "{configs.model}"')
    model = configs.model()
    if configs.device == 'cuda':
        model = torch.nn.DataParallel(model)
    model = model.to(configs.device)

    if os.path.exists(configs.evaluate.best_checkpoint_path):
        print(f'==> loading checkpoint "{configs.evaluate.best_checkpoint_path}"')
        checkpoint = torch.load(configs.evaluate.best_checkpoint_path)
        model.load_state_dict(checkpoint.pop('model'))
        del checkpoint
    else:
        return

    model.eval()

    ##############
    # Evaluation #
    ##############

    total_num_scenes = len(dataset.scene_list)
    stats = np.zeros((3, configs.data.num_classes, total_num_scenes))

    for scene_index, (scene, scene_files) in enumerate(tqdm(dataset.scene_list.items(), desc='eval', ncols=0)):
        ground_truth = np.load(os.path.join(scene, 'label.npy')).reshape(-1)
        total_num_points_in_scene = ground_truth.shape[0]
        confidences = np.zeros(total_num_points_in_scene, dtype=np.float32)
        predictions = np.full(total_num_points_in_scene, -1, dtype=np.int64)

        for filename in scene_files:
            h5f = h5py.File(filename, 'r')
            scene_data = h5f['data'][...].astype(np.float32)
            scene_num_points = h5f['data_num'][...].astype(np.int64)
            window_to_scene_mapping = h5f['indices_split_to_full'][...].astype(np.int64)

            num_windows, max_num_points_per_window, num_channels = scene_data.shape
            extra_batch_size = configs.evaluate.num_votes * math.ceil(max_num_points_per_window / dataset.num_points)
            total_num_voted_points = extra_batch_size * dataset.num_points

            for min_window_index in range(0, num_windows, configs.evaluate.batch_size):
                max_window_index = min(min_window_index + configs.evaluate.batch_size, num_windows)
                batch_size = max_window_index - min_window_index
                window_data = scene_data[np.arange(min_window_index, max_window_index)]
                window_data = window_data.reshape(batch_size, -1, num_channels)

                # repeat, shuffle and tile
                # TODO: speedup here
                batched_inputs = np.zeros((batch_size, total_num_voted_points, num_channels), dtype=np.float32)
                batched_shuffled_point_indices = np.zeros((batch_size, total_num_voted_points), dtype=np.int64)
                for relative_window_index in range(batch_size):
                    num_points_in_window = scene_num_points[relative_window_index + min_window_index]
                    num_repeats = math.ceil(total_num_voted_points / num_points_in_window)
                    shuffled_point_indices = np.tile(np.arange(num_points_in_window), num_repeats)
                    shuffled_point_indices = shuffled_point_indices[:total_num_voted_points]
                    np.random.shuffle(shuffled_point_indices)
                    batched_shuffled_point_indices[relative_window_index] = shuffled_point_indices
                    batched_inputs[relative_window_index] = window_data[relative_window_index][shuffled_point_indices]

                # model inference
                inputs = torch.from_numpy(
                    batched_inputs.reshape((batch_size * extra_batch_size, dataset.num_points, -1)).transpose(0, 2, 1)
                ).float().to(configs.device)
                with torch.no_grad():
                    batched_confidences, batched_predictions = F.softmax(model(inputs), dim=1).max(dim=1)
                    batched_confidences = batched_confidences.view(batch_size, total_num_voted_points).cpu().numpy()
                    batched_predictions = batched_predictions.view(batch_size, total_num_voted_points).cpu().numpy()

                update_scene_predictions(batched_confidences, batched_predictions, batched_shuffled_point_indices,
                                         confidences, predictions, window_to_scene_mapping,
                                         total_num_voted_points, batch_size, min_window_index)

        # update stats
        update_stats(stats, ground_truth, predictions, scene_index, total_num_points_in_scene)

    np.save(configs.evaluate.stats_path, stats)
    print_stats(stats)
예제 #2
0
파일: train.py 프로젝트: rosyapril/pvcnn
def main():
    configs = prepare()
    if configs.evaluate is not None:
        configs.evaluate(configs)
        return

    import numpy as np
    import tensorboardX
    import torch
    import torch.backends.cudnn as cudnn
    from torch.utils.data import DataLoader
    from tqdm import tqdm

    ################################
    # Train / Eval Kernel Function #
    ################################

    # train kernel
    def train(model, loader, criterion, optimizer, scheduler, current_step,
              writer):
        model.train()
        for inputs, targets in tqdm(loader, desc='train', ncols=0):
            inputs = inputs.to(configs.device, non_blocking=True)
            targets = targets.to(configs.device, non_blocking=True)
            optimizer.zero_grad()
            outputs = model(inputs)
            loss = criterion(outputs, targets)
            writer.add_scalar('loss/train', loss.item(), current_step)
            current_step += targets.size(0)
            loss.backward()
            optimizer.step()
        if scheduler is not None:
            scheduler.step()

    # evaluate kernel
    def evaluate(model, loader, split='test'):
        meters = {}
        for k, meter in configs.train.meters.items():
            meters[k.format(split)] = meter()
        model.eval()
        with torch.no_grad():
            for inputs, targets in tqdm(loader, desc=split, ncols=0):
                inputs = inputs.to(configs.device, non_blocking=True)
                targets = targets.to(configs.device, non_blocking=True)
                outputs = model(inputs)
                for meter in meters.values():
                    meter.update(outputs, targets)
        for k, meter in meters.items():
            meters[k] = meter.compute()
        return meters

    ###########
    # Prepare #
    ###########

    if configs.device == 'cuda':
        cudnn.benchmark = True
    if 'seed' in configs and configs.seed is not None:
        random.seed(configs.seed)
        np.random.seed(configs.seed)
        torch.manual_seed(configs.seed)
        if configs.device == 'cuda' and configs.get('deterministic', True):
            cudnn.deterministic = True
            cudnn.benchmark = False

    print(configs)

    #####################################################################
    # Initialize DataLoaders, Model, Criterion, LRScheduler & Optimizer #
    #####################################################################

    print('\n==> loading dataset "{}"'.format(configs.dataset))
    dataset = configs.dataset()
    loaders = {}
    for split in dataset:
        loaders[split] = DataLoader(
            dataset[split],
            shuffle=(split == 'train'),
            batch_size=configs.train.batch_size,
            num_workers=configs.data.num_workers,
            pin_memory=True,
            # fixme: a quick fix for numpy random seed
            worker_init_fn=lambda x: np.random.seed())

    print('\n==> creating model "{}"'.format(configs.model))
    model = configs.model()
    if configs.device == 'cuda':
        model = torch.nn.DataParallel(model)
    model = model.to(configs.device)
    criterion = configs.train.criterion().to(configs.device)
    optimizer = configs.train.optimizer(model.parameters())

    last_epoch, best_metric = -1, None
    if os.path.exists(configs.train.checkpoint_path):
        print('==> loading checkpoint "{}"'.format(
            configs.train.checkpoint_path))
        checkpoint = torch.load(configs.train.checkpoint_path)
        print(' => loading model')
        model.load_state_dict(checkpoint.pop('model'))
        if 'optimizer' in checkpoint and checkpoint['optimizer'] is not None:
            print(' => loading optimizer')
            optimizer.load_state_dict(checkpoint.pop('optimizer'))
        last_epoch = checkpoint.get('epoch', last_epoch)
        best_metric = checkpoint.get('meters', {}).get(
            '{}_best'.format(configs.train.metric), best_metric)

    if 'scheduler' in configs.train and configs.train.scheduler is not None:
        configs.train.scheduler.last_epoch = last_epoch
        print('==> creating scheduler "{}"'.format(configs.train.scheduler))
        scheduler = configs.train.scheduler(optimizer)
    else:
        scheduler = None

    ############
    # Training #
    ############

    if last_epoch >= configs.train.num_epochs:
        meters = dict()
        for split, loader in loaders.items():
            if split != 'train':
                meters.update(evaluate(model, loader=loader, split=split))
        for k, meter in meters.items():
            print('[{}] = {:2f}'.format(k, meter))
        return

    with tensorboardX.SummaryWriter(configs.train.save_path) as writer:
        for current_epoch in range(last_epoch + 1, configs.train.num_epochs):
            current_step = current_epoch * len(dataset['train'])

            # train
            print('\n==> training epoch {}/{}'.format(
                current_epoch, configs.train.num_epochs))
            train(model,
                  loader=loaders['train'],
                  criterion=criterion,
                  optimizer=optimizer,
                  scheduler=scheduler,
                  current_step=current_step,
                  writer=writer)
            current_step += len(dataset['train'])

            # evaluate
            meters = dict()
            for split, loader in loaders.items():
                if split != 'train':
                    meters.update(evaluate(model, loader=loader, split=split))

            # check whether it is the best
            best = False
            if 'metric' in configs.train and configs.train.metric is not None:
                if best_metric is None or best_metric < meters[
                        configs.train.metric]:
                    best_metric, best = meters[configs.train.metric], True
                meters[configs.train.metric + '_best'] = best_metric
            # log in tensorboard
            for k, meter in meters.items():
                print('[{}] = {:2f}'.format(k, meter))
                writer.add_scalar(k, meter, current_step)

            # save checkpoint
            torch.save(
                {
                    'epoch': current_epoch,
                    'model': model.state_dict(),
                    'optimizer': optimizer.state_dict(),
                    'meters': meters,
                    'configs': configs,
                }, configs.train.checkpoint_path)
            if best:
                shutil.copyfile(configs.train.checkpoint_path,
                                configs.train.best_checkpoint_path)
            print('[save_path] = {}'.format(configs.train.save_path))
예제 #3
0
def main():
    configs = prepare()
    if configs.evaluate is not None:
        configs.evaluate.fn(configs)
        return

    import numpy as np
    import tensorboardX
    import torch
    import torch.backends.cudnn as cudnn
    from torch.utils.data import DataLoader
    from tqdm import tqdm

    ################################
    # Train / Eval Kernel Function #
    ################################

    def adjust_learning_rate(optimizer, epoch, args_lr):
        """Sets the learning rate to the initial LR decayed by half by every 5 or 10 epochs"""
        if epoch > 0:
            if epoch <= 30:
                lr = args_lr * (0.5**(epoch // 5))
            else:
                lr = args_lr * (0.5**(epoch // 10))
            for param_group in optimizer.param_groups:
                param_group['lr'] = lr
            writer.add_scalar('lr_dis', lr, epoch)

    # train kernel
    def train(model, source_loader, target_loader, criterion, optimizer_g,
              optimizer_cls, scheduler_g, scheduler_cls, current_step, writer,
              cons):

        model.train()
        loss_total = 0
        loss_adv_total = 0
        data_total = 0

        batch_iterator = zip(loop_iterable(source_loader),
                             loop_iterable(target_loader))

        for _ in trange(len(source_loader)):
            (inputs, targets), (inputs_t, _) = next(batch_iterator)

            if isinstance(inputs, dict):
                for k, v in inputs.items():
                    batch_size = v.size(0)
                    inputs[k] = v.to(configs.device, non_blocking=True)
            else:
                batch_size = inputs.size(0)
                inputs = inputs.to(configs.device, non_blocking=True)

            if isinstance(inputs_t, dict):
                for k, v in inputs_t.items():
                    batch_size = v.size(0)
                    inputs_t[k] = v.to(configs.device, non_blocking=True)
            else:
                batch_size = inputs_t.size(0)
                inputs_t = inputs_t.to(configs.device, non_blocking=True)

            if isinstance(targets, dict):
                for k, v in targets.items():
                    targets[k] = v.to(configs.device, non_blocking=True)
            else:
                targets = targets.to(configs.device, non_blocking=True)

            outputs = model(inputs)

            pred_t1, pred_t2 = model.module.inst_seg_net(
                {
                    'features': inputs_t['features'],
                    'one_hot_vectors': inputs_t['one_hot_vectors']
                },
                constant=cons,
                adaptation=True)

            loss_s = criterion(outputs, targets)

            # Adversarial loss
            loss_adv = -1 * discrepancy_loss(pred_t1, pred_t2)

            loss = loss_s + loss_adv
            loss.backward()
            optimizer_g.step()
            optimizer_cls.step()
            optimizer_g.zero_grad()
            optimizer_cls.zero_grad()

            loss_adv_total += loss_adv.item() * batch_size

            # Gen Training
            for _ in range(configs.train.gen_num_train):
                pred_t1, pred_t2 = model.module.inst_seg_net(
                    {
                        'features': inputs_t['features'],
                        'one_hot_vectors': inputs_t['one_hot_vectors']
                    },
                    constant=cons,
                    adaptation=True)
                loss_adv = -1 * discrepancy_loss(pred_t1, pred_t2)
                loss_adv.backward()
                loss_adv_total += loss_adv.item() * batch_size
                optimizer_g.step()
                optimizer_g.zero_grad()

            loss_total += loss_s.item() * batch_size
            data_total += batch_size

            writer.add_scalar('loss_s/train', loss_total / data_total,
                              current_step)
            writer.add_scalar('loss_adv/train', loss_adv_total / data_total,
                              current_step)
            current_step += batch_size

        if scheduler_g is not None:
            scheduler_g.step()

        if scheduler_cls is not None:
            scheduler_cls.step()

    # evaluate kernel
    def evaluate(model, loader, split='test'):
        meters = {}
        for k, meter in configs.train.meters.items():
            meters[k.format(split)] = meter()
        model.eval()
        with torch.no_grad():
            for inputs, targets in tqdm(loader, desc=split, ncols=0):
                if isinstance(inputs, dict):
                    for k, v in inputs.items():
                        inputs[k] = v.to(configs.device, non_blocking=True)
                else:
                    inputs = inputs.to(configs.device, non_blocking=True)
                if isinstance(targets, dict):
                    for k, v in targets.items():
                        targets[k] = v.to(configs.device, non_blocking=True)
                else:
                    targets = targets.to(configs.device, non_blocking=True)
                outputs = model(inputs)
                for meter in meters.values():
                    meter.update(outputs, targets)
        for k, meter in meters.items():
            meters[k] = meter.compute()
        return meters

    ###########
    # Prepare #
    ###########

    if configs.device == 'cuda':
        cudnn.benchmark = True
        if configs.get('deterministic', False):
            cudnn.deterministic = True
            cudnn.benchmark = False
    if ('seed' not in configs) or (configs.seed is None):
        configs.seed = torch.initial_seed() % (2**32 - 1)
    seed = configs.seed
    random.seed(seed)
    np.random.seed(seed)
    torch.manual_seed(seed)

    print(configs)

    #####################################################################
    # Initialize DataLoaders, Model, Criterion, LRScheduler & Optimizer #
    #####################################################################

    print(f'\n==> loading source dataset "{configs.source_dataset}"')
    source_dataset = configs.source_dataset()
    source_loaders = {
        "train":
        DataLoader(
            source_dataset["train"],
            shuffle=True,
            batch_size=configs.train.batch_size,
            drop_last=True,
            num_workers=configs.data.num_workers,
            pin_memory=True,
            worker_init_fn=lambda worker_id: np.random.seed(seed + worker_id))
    }

    print(f'\n==> loading target dataset "{configs.target_dataset}"')
    target_dataset = configs.target_dataset()
    target_loaders = {}
    for split in target_dataset:
        target_loaders[split] = DataLoader(
            target_dataset[split],
            shuffle=(split == 'train'),
            batch_size=configs.train.batch_size,
            drop_last=True,
            num_workers=configs.data.num_workers,
            pin_memory=True,
            worker_init_fn=lambda worker_id: np.random.seed(seed + worker_id))

    print(f'\n==> creating model "{configs.model}"')
    model = configs.model()
    if configs.device == 'cuda':
        model = torch.nn.DataParallel(model)
    model = model.to(configs.device)
    criterion = configs.train.criterion().to(configs.device)
    #params
    gen_params = [{
        'params': v
    } for k, v in model.module.inst_seg_net.g.named_parameters()
                  if 'pred_offset' not in k]

    cls_params = [{
        'params': model.module.inst_seg_net.c1.parameters()
    }, {
        'params': model.module.inst_seg_net.c2.parameters()
    }, {
        'params': model.module.center_reg_nets[0].parameters()
    }, {
        'params': model.module.center_reg_nets[1].parameters()
    }, {
        'params': model.module.box_est_nets[0].parameters()
    }, {
        'params': model.module.box_est_nets[1].parameters()
    }]

    optimizer_g = configs.train.optimizer_g(gen_params)
    optimizer_cls = configs.train.optimizer_cls(cls_params)
    # optimizer_dis = configs.train.optimizer_dis(dis_params)

    last_epoch, best_metrics = -1, {m: None for m in configs.train.metrics}

    if os.path.exists(configs.train.checkpoint_path):

        print(f'==> loading checkpoint "{configs.train.checkpoint_path}"')
        checkpoint = torch.load(configs.train.checkpoint_path)

        print(' => loading model')
        model.load_state_dict(checkpoint.pop('model'))

        if 'optimizer_g' in checkpoint and checkpoint[
                'optimizer_g'] is not None:
            print(' => loading optimizer_g')
            optimizer_g.load_state_dict(checkpoint.pop('optimizer_g'))

        if 'optimizer_cls' in checkpoint and checkpoint[
                'optimizer_cls'] is not None:
            print(' => loading optimizer_cls')
            optimizer_cls.load_state_dict(checkpoint.pop('optimizer_cls'))

        last_epoch = checkpoint.get('epoch', last_epoch)
        meters = checkpoint.get('meters', {})

        for m in configs.train.metrics:
            best_metrics[m] = meters.get(m + '_best', best_metrics[m])

        del checkpoint

    if 'scheduler_g' in configs.train and configs.train.scheduler_g is not None:
        configs.train.scheduler_g.last_epoch = last_epoch
        print(f'==> creating scheduler "{configs.train.scheduler_g}"')
        scheduler_g = configs.train.scheduler_g(optimizer_g)
    else:
        scheduler_g = None

    if 'scheduler_c' in configs.train and configs.train.scheduler_c is not None:
        configs.train.scheduler_c.last_epoch = last_epoch
        print(f'==> creating scheduler "{configs.train.scheduler_c}"')
        scheduler_c = configs.train.scheduler_c(optimizer_cls)
    else:
        scheduler_c = None

    ############
    # Training #
    ############

    if last_epoch >= configs.train.num_epochs:
        meters = dict()
        for split, loader in target_loaders.items():
            if split != 'train':
                meters.update(evaluate(model, loader=loader, split=split))
        for k, meter in meters.items():
            print(f'[{k}] = {meter:2f}')
        return

    with tensorboardX.SummaryWriter(configs.train.save_path) as writer:
        step_size = min(len(source_dataset['train']),
                        len(target_dataset['train']))

        for current_epoch in range(last_epoch + 1, configs.train.num_epochs):
            current_step = current_epoch * step_size
            cons = math.sin(
                (current_epoch + 1) / configs.train.num_epochs * math.pi / 2)

            writer.add_scalar('lr_g', scheduler_g.get_lr()[0], current_epoch)
            writer.add_scalar('lr_c', scheduler_c.get_lr()[0], current_epoch)

            # train
            print(
                f'\n==> training epoch {current_epoch}/{configs.train.num_epochs}'
            )
            train(model,
                  source_loader=source_loaders['train'],
                  target_loader=target_loaders['train'],
                  criterion=criterion,
                  optimizer_g=optimizer_g,
                  optimizer_cls=optimizer_cls,
                  scheduler_g=scheduler_g,
                  scheduler_cls=scheduler_c,
                  current_step=current_step,
                  writer=writer,
                  cons=cons)
            current_step += step_size

            # evaluate
            meters = dict()
            for split, loader in source_loaders.items():
                if split != 'train':
                    meters.update(evaluate(model, loader=loader, split=split))
            for k, meter in meters.items():
                print(f'Source [{k}] = {meter:2f}')

            meters = dict()
            for split, loader in target_loaders.items():
                if split != 'train':
                    meters.update(evaluate(model, loader=loader, split=split))

            # check whether it is the best
            best = {m: False for m in configs.train.metrics}
            for m in configs.train.metrics:
                if best_metrics[m] is None or best_metrics[m] < meters[m]:
                    best_metrics[m], best[m] = meters[m], True
                meters[m + '_best'] = best_metrics[m]
            # log in tensorboard
            for k, meter in meters.items():
                print(f'Target [{k}] = {meter:2f}')
                writer.add_scalar(k, meter, current_step)

            # save checkpoint
            torch.save(
                {
                    'epoch': current_epoch,
                    'model': model.state_dict(),
                    'optimizer_g': optimizer_g.state_dict(),
                    'optimizer_cls': optimizer_cls.state_dict(),
                    'meters': meters,
                    'configs': configs,
                }, configs.train.checkpoint_path)
            shutil.copyfile(
                configs.train.checkpoint_path,
                configs.train.checkpoints_path.format(current_epoch))
            for m in configs.train.metrics:
                if best[m]:
                    shutil.copyfile(configs.train.checkpoint_path,
                                    configs.train.best_checkpoint_paths[m])
            if best.get(configs.train.metric, False):
                shutil.copyfile(configs.train.checkpoint_path,
                                configs.train.best_checkpoint_path)
            print(f'[save_path] = {configs.train.save_path}')
예제 #4
0
def evaluate(configs=None):
    configs = prepare() if configs is None else configs

    import time

    import torch
    import torch.backends.cudnn as cudnn
    from torch.utils.data import DataLoader
    from tqdm import tqdm

    from ..utils import eval_from_files

    ###########
    # Prepare #
    ###########

    if configs.device == 'cuda':
        cudnn.benchmark = True
        if configs.get('deterministic', False):
            cudnn.deterministic = True
            cudnn.benchmark = False
    if ('seed' not in configs) or (configs.seed is None):
        configs.seed = torch.initial_seed() % (2 ** 32 - 1)

    if configs.evaluate.num_tests > 1:
        results = dict()
        stats_path = os.path.join(configs.evaluate.stats_path.replace('.npy', '.t'), 'best.eval.t{}.npy')
        predictions_path = os.path.join(configs.evaluate.predictions_path + '.t', 'best.predictions.t{}')
        os.makedirs(os.path.dirname(stats_path), exist_ok=True)
        os.makedirs(os.path.dirname(predictions_path), exist_ok=True)

    #################################
    # Initialize DataLoaders, Model #
    #################################
    print(f'\n==> loading dataset "{configs.dataset}"')
    dataset = configs.dataset()[configs.dataset.split]

    print(f'\n==> creating model "{configs.model}"')
    model = configs.model()
    if configs.device == 'cuda':
        model = torch.nn.DataParallel(model)
    model = model.to(configs.device)

    if os.path.exists(configs.evaluate.best_checkpoint_path):
        print(f'==> loading checkpoint "{configs.evaluate.best_checkpoint_path}"')
        checkpoint = torch.load(configs.evaluate.best_checkpoint_path)
        model.load_state_dict(checkpoint.pop('model'))
        del checkpoint
    else:
        return

    model.eval()

    for test_index in range(configs.evaluate.num_tests):
        if test_index == 0:
            print(configs)

        if test_index >= 0:
            seed = random.randint(1, int(time.time())) % (2 ** 32 - 1)
            print(f'\n==> Test [{test_index:02d}/{configs.evaluate.num_tests:02d}] initial seed\n[seed] = {seed}')
        random.seed(seed)
        np.random.seed(seed)
        torch.manual_seed(seed)

        if configs.evaluate.num_tests > 1:
            configs.evaluate.stats_path = stats_path.format(test_index)
            configs.evaluate.predictions_path = predictions_path.format(test_index)

        if os.path.exists(configs.evaluate.stats_path):
            print(f'==> hit {configs.evaluate.stats_path}')
            predictions = np.load(configs.evaluate.stats_path)
            image_ids = write_predictions(configs.evaluate.predictions_path, ids=dataset.data.ids,
                                          classes=dataset.data.class_names, boxes_2d=dataset.data.boxes_2d,
                                          predictions=predictions,
                                          image_id_file_path=configs.evaluate.image_id_file_path)
            _, current_results = eval_from_files(prediction_folder=configs.evaluate.predictions_path,
                                                 ground_truth_folder=configs.evaluate.ground_truth_path,
                                                 image_ids=image_ids, verbose=True)
            if configs.evaluate.num_tests == 1:
                return
            else:
                for class_name, v in current_results.items():
                    if class_name not in results:
                        results[class_name] = dict()
                    for kind, r in v.items():
                        if kind not in results[class_name]:
                            results[class_name][kind] = []
                        results[class_name][kind].append(r)
                continue

        loader = DataLoader(
            dataset, shuffle=False, batch_size=configs.evaluate.batch_size,
            num_workers=configs.data.num_workers, pin_memory=True,
            worker_init_fn=lambda worker_id: np.random.seed(seed + worker_id)
        )

        ##############
        # Evaluation #
        ##############

        predictions = np.zeros((len(dataset), 8))
        size_templates = configs.data.size_templates.to(configs.device)
        heading_angle_bin_centers = torch.arange(
            0, 2 * np.pi, 2 * np.pi / configs.data.num_heading_angle_bins).to(configs.device)
        current_step = 0

        with torch.no_grad():
            for inputs, targets in tqdm(loader, desc='eval', ncols=0):
                for k, v in inputs.items():
                    inputs[k] = v.to(configs.device, non_blocking=True)
                outputs = model(inputs)

                center = outputs['center']  # (B, 3)
                heading_scores = outputs['heading_scores']  # (B, NH)
                heading_residuals = outputs['heading_residuals']  # (B, NH)
                size_scores = outputs['size_scores']  # (B, NS)
                size_residuals = outputs['size_residuals']  # (B, NS, 3)

                batch_size = center.size(0)
                batch_id = torch.arange(batch_size, device=center.device)
                heading_bin_id = torch.argmax(heading_scores, dim=1)
                heading = heading_angle_bin_centers[heading_bin_id] + heading_residuals[batch_id, heading_bin_id]  # (B, )
                size_template_id = torch.argmax(size_scores, dim=1)
                size = size_templates[size_template_id] + size_residuals[batch_id, size_template_id]  # (B, 3)

                center = center.cpu().numpy()
                heading = heading.cpu().numpy()
                size = size.cpu().numpy()
                rotation_angle = targets['rotation_angle'].cpu().numpy()  # (B, )
                rgb_score = targets['rgb_score'].cpu().numpy()  # (B, )

                update_predictions(predictions=predictions, center=center, heading=heading, size=size,
                                   rotation_angle=rotation_angle, rgb_score=rgb_score,
                                   current_step=current_step, batch_size=batch_size)
                current_step += batch_size

        np.save(configs.evaluate.stats_path, predictions)
        image_ids = write_predictions(configs.evaluate.predictions_path, ids=dataset.data.ids,
                                      classes=dataset.data.class_names, boxes_2d=dataset.data.boxes_2d,
                                      predictions=predictions, image_id_file_path=configs.evaluate.image_id_file_path)
        _, current_results = eval_from_files(prediction_folder=configs.evaluate.predictions_path,
                                             ground_truth_folder=configs.evaluate.ground_truth_path,
                                             image_ids=image_ids, verbose=True)
        if configs.evaluate.num_tests == 1:
            return
        else:
            for class_name, v in current_results.items():
                if class_name not in results:
                    results[class_name] = dict()
                for kind, r in v.items():
                    if kind not in results[class_name]:
                        results[class_name][kind] = []
                    results[class_name][kind].append(r)
    for class_name, v in results.items():
        print(f'{class_name}  AP(Average Precision)')
        for kind, r in v.items():
            r = np.asarray(r)
            m = r.mean(axis=0)
            s = r.std(axis=0)
            u = r.max(axis=0)
            rs = ', '.join(f'{mv:.2f} +/- {sv:.2f} ({uv:.2f})' for mv, sv, uv in zip(m, s, u))
            print(f'{kind:<4} AP: {rs}')
예제 #5
0
def predict(configs=None):
    configs = prepare() if configs is None else configs

    import h5py
    import math
    import torch
    import torch.backends.cudnn as cudnn
    import torch.nn.functional as F
    from tqdm import tqdm

    #####################
    # Kernel Definition #
    #####################

    def print_stats(stats):
        stats = stats.sum(axis=-1)
        iou = stats[2] / (stats[0] + stats[1] - stats[2])
        classnames = ["roads", "water", "marsh", "opengrnd","building", "trail", "medfrst", "forest"]
        print('clsname: {}'.format('  '.join(map('{:>8}'.format, classnames))))
        print('truecnt: {}'.format('  '.join(map('{:>8d}'.format, stats[0].astype(np.int64)))))
        print('predict: {}'.format('  '.join(map('{:>8d}'.format, stats[1].astype(np.int64)))))
        print('truepos: {}'.format('  '.join(map('{:>8d}'.format, stats[2].astype(np.int64)))))
        print('clssiou: {}'.format('  '.join(map('{:>8.2f}'.format, iou * 100))))
        print('meanAcc: {:4.2f}'.format(stats[2].sum() / stats[1].sum() * 100))
        print('meanIoU: {:4.2f}'.format(iou.mean() * 100))

    ###########
    # Prepare #
    ###########

    if configs.device == 'cuda':
        cudnn.benchmark = True
        if configs.get('deterministic', False):
            cudnn.deterministic = True
            cudnn.benchmark = False
    if ('seed' not in configs) or (configs.seed is None):
        configs.seed = torch.initial_seed() % (2 ** 32 - 1)
    seed = configs.seed
    random.seed(seed)
    np.random.seed(seed)
    torch.manual_seed(seed)

    #print(configs)

    # if os.path.exists(configs.evaluate.stats_path):
    #     stats = np.load(configs.evaluate.stats_path)
    #     print_stats(stats)
    #     return

    #################################
    # Initialize DataLoaders, Model #
    #################################

    print(f'\n==> loading dataset "{configs.dataset}"')
    dataset = configs.dataset()[configs.dataset.split]

    print(f'\n==> creating model "{configs.model}"')
    model = configs.model()
    if configs.device == 'cuda':
        model = torch.nn.DataParallel(model)
    model = model.to(configs.device)

    if os.path.exists(configs.predict.best_checkpoint_path):
        print(f'==> loading checkpoint "{configs.predict.best_checkpoint_path}"')
        checkpoint = torch.load(configs.predict.best_checkpoint_path)
        model.load_state_dict(checkpoint.pop('model'))
        del checkpoint
    else:
        return

    model.eval()

    ##############
    # Evaluation #
    ##############

    total_num_scenes = len(dataset.scene_list)
    prefix = "c" + str(configs.model.width_multiplier).replace(".","p")
    #stats = np.zeros((3, configs.data.num_classes, total_num_scenes))
    #conf_mat = np.zeros((configs.data.num_classes, configs.data.num_classes))

    for scene_index, (scene, scene_files) in enumerate(tqdm(dataset.scene_list.items(), desc='Predicting', ncols=0)):
        #ground_truth = np.load(os.path.join(scene, 'label.npy')).reshape(-1)
        #total_num_points_in_scene = ground_truth.shape[0]
        datamarker = open(os.path.join(scene, '.dataset'),'r')
        total_num_points_in_scene = datamarker.read()
        datamarker.close()
        total_num_points_in_scene = int(total_num_points_in_scene)
        #print(str(total_num_points_in_scene) + ' points in scene')
        confidences = np.zeros(total_num_points_in_scene, dtype=np.float32)
        predictions = np.full(total_num_points_in_scene, -1, dtype=np.int64)
        entropies = np.zeros(total_num_points_in_scene, dtype=np.float32)

        for filename in scene_files:
            h5f = h5py.File(filename, 'r')
            scene_data = h5f['data'][...].astype(np.float32)
            scene_num_points = h5f['data_num'][...].astype(np.int64)
            window_to_scene_mapping = h5f['indices_split_to_full'][...].astype(np.int64)

            num_windows, max_num_points_per_window, num_channels = scene_data.shape
            extra_batch_size = configs.predict.num_votes * math.ceil(max_num_points_per_window / dataset.num_points)
            total_num_voted_points = extra_batch_size * dataset.num_points

            for min_window_index in range(0, num_windows, configs.predict.batch_size):
                max_window_index = min(min_window_index + configs.predict.batch_size, num_windows)
                batch_size = max_window_index - min_window_index
                window_data = scene_data[np.arange(min_window_index, max_window_index)]
                window_data = window_data.reshape(batch_size, -1, num_channels)

                # repeat, shuffle and tile
                # TODO: speedup here
                batched_inputs = np.zeros((batch_size, total_num_voted_points, num_channels), dtype=np.float32)
                batched_shuffled_point_indices = np.zeros((batch_size, total_num_voted_points), dtype=np.int64)
                for relative_window_index in range(batch_size):
                    num_points_in_window = scene_num_points[relative_window_index + min_window_index]
                    num_repeats = math.ceil(total_num_voted_points / num_points_in_window)
                    shuffled_point_indices = np.tile(np.arange(num_points_in_window), num_repeats)
                    shuffled_point_indices = shuffled_point_indices[:total_num_voted_points]
                    np.random.shuffle(shuffled_point_indices)
                    batched_shuffled_point_indices[relative_window_index] = shuffled_point_indices
                    batched_inputs[relative_window_index] = window_data[relative_window_index][shuffled_point_indices]

                # model inference
                inputs = torch.from_numpy(
                    batched_inputs.reshape((batch_size * extra_batch_size, dataset.num_points, -1)).transpose(0, 2, 1)
                ).float().to(configs.device)
                with torch.no_grad():
                    # cofidence och pred kommer från max(), inte softmax, och är värdet samt index för maxvärdet i softmax.
                    batched_probs = F.softmax(model(inputs), dim=1)
                    batched_confidences, batched_predictions = batched_probs.max(dim=1)
                    batched_entropies = torch.sum(-batched_probs * torch.log(batched_probs), dim=1)
                    batched_confidences = batched_confidences.view(batch_size, total_num_voted_points).cpu().numpy()
                    batched_predictions = batched_predictions.view(batch_size, total_num_voted_points).cpu().numpy()
                    batched_entropies = batched_entropies.view(batch_size, total_num_voted_points).cpu().numpy()

                update_scene_predictions(batched_confidences, batched_predictions, batched_shuffled_point_indices,
                                         confidences, predictions, window_to_scene_mapping,
                                         total_num_voted_points, batch_size, min_window_index, batched_entropies, entropies)



        # update stats
        # update_stats(stats, ground_truth, predictions, scene_index, total_num_points_in_scene)
        # update_conf_mat(conf_mat, ground_truth, predictions, total_num_points_in_scene)

        np.save(os.path.join(scene, prefix + '_w' + str(configs.train.weight_type) +  'preds.npy'), predictions.astype(float))
        np.save(os.path.join(scene, prefix + '_w' + str(configs.train.weight_type) + 'entropy.npy'), entropies.astype(float))


    # np.save(configs.evaluate.stats_path, stats)
    # np.save(configs.evaluate.conf_mat_path, conf_mat)
    # print_stats(stats)
    print("Done!")
예제 #6
0
def main():
    configs = prepare()
    if configs.evaluate is not None:
        configs.evaluate.fn(configs)
        return

    import numpy as np
    import tensorboardX
    import torch
    import torch.backends.cudnn as cudnn
    from torch.utils.data import DataLoader
    from tqdm import tqdm

    ################################
    # Train / Eval Kernel Function #
    ################################

    # train kernel
    def train(model, loader, criterion, optimizer, scheduler, current_step,
              writer):
        model.train()
        for inputs, targets in tqdm(loader, desc='train', ncols=0):
            if isinstance(inputs, dict):
                for k, v in inputs.items():
                    batch_size = v.size(0)
                    inputs[k] = v.to(configs.device, non_blocking=True)
            else:
                batch_size = inputs.size(0)
                inputs = inputs.to(configs.device, non_blocking=True)
            if isinstance(targets, dict):
                for k, v in targets.items():
                    targets[k] = v.to(configs.device, non_blocking=True)
            else:
                targets = targets.to(configs.device, non_blocking=True)
            optimizer.zero_grad()

            outputs = model(inputs)
            loss = criterion(outputs, targets)
            writer.add_scalar('loss/train', loss.item(), current_step)
            current_step += batch_size
            loss.backward()
            optimizer.step()

            if scheduler is not None:
                scheduler.step()

    # evaluate kernel
    def evaluate(model, loader, split='test'):
        meters = {}
        for k, meter in configs.train.meters.items():
            meters[k.format(split)] = meter()
        model.eval()
        with torch.no_grad():
            for inputs, targets in tqdm(loader, desc=split, ncols=0):
                if isinstance(inputs, dict):
                    for k, v in inputs.items():
                        inputs[k] = v.to(configs.device, non_blocking=True)
                else:
                    inputs = inputs.to(configs.device, non_blocking=True)
                if isinstance(targets, dict):
                    for k, v in targets.items():
                        targets[k] = v.to(configs.device, non_blocking=True)
                else:
                    targets = targets.to(configs.device, non_blocking=True)
                outputs = model(inputs)
                for meter in meters.values():
                    meter.update(outputs, targets)
        for k, meter in meters.items():
            meters[k] = meter.compute()
        return meters

    ###########
    # Prepare #
    ###########

    if configs.device == 'cuda':
        cudnn.benchmark = True
        if configs.get('deterministic', False):
            cudnn.deterministic = True
            cudnn.benchmark = False
    if ('seed' not in configs) or (configs.seed is None):
        configs.seed = torch.initial_seed() % (2**32 - 1)
    seed = configs.seed
    random.seed(seed)
    np.random.seed(seed)
    torch.manual_seed(seed)

    print(configs)

    #####################################################################
    # Initialize DataLoaders, Model, Criterion, LRScheduler & Optimizer #
    #####################################################################

    print(f'\n==> loading dataset "{configs.dataset}"')
    dataset = configs.dataset()
    loaders = {}
    for split in dataset:
        loaders[split] = DataLoader(
            dataset[split],
            shuffle=(split == 'train'),
            batch_size=configs.train.batch_size,
            num_workers=configs.data.num_workers,
            pin_memory=True,
            worker_init_fn=lambda worker_id: np.random.seed(seed + worker_id))

    print(f'\n==> creating model "{configs.model}"')
    model = configs.model()
    if configs.device == 'cuda':
        model = torch.nn.DataParallel(model)
    model = model.to(configs.device)
    criterion = configs.train.criterion().to(configs.device)
    optimizer = configs.train.optimizer(model.parameters())

    last_epoch, best_metrics = -1, {m: None for m in configs.train.metrics}
    if os.path.exists(configs.train.checkpoint_path) and configs.train.resume:
        print(f'==> loading checkpoint "{configs.train.checkpoint_path}"')
        checkpoint = torch.load(configs.train.checkpoint_path)
        print(' => loading model')
        model.load_state_dict(checkpoint.pop('model'))
        if 'optimizer' in checkpoint and checkpoint['optimizer'] is not None:
            print(' => loading optimizer')
            optimizer.load_state_dict(checkpoint.pop('optimizer'))
        last_epoch = checkpoint.get('epoch', last_epoch)
        meters = checkpoint.get('meters', {})
        for m in configs.train.metrics:
            best_metrics[m] = meters.get(m + '_best', best_metrics[m])
        del checkpoint

    if 'scheduler' in configs.train and configs.train.scheduler is not None:
        configs.train.scheduler.last_epoch = last_epoch
        print(f'==> creating scheduler "{configs.train.scheduler}"')
        scheduler = configs.train.scheduler(optimizer)
    else:
        scheduler = None

    ############
    # Training #
    ############

    if last_epoch >= configs.train.num_epochs:
        meters = dict()
        for split, loader in loaders.items():
            if split != 'train':
                meters.update(evaluate(model, loader=loader, split=split))
        for k, meter in meters.items():
            print(f'[{k}] = {meter:2f}')
        return

    with tensorboardX.SummaryWriter(configs.train.save_path) as writer:
        for current_epoch in range(last_epoch + 1, configs.train.num_epochs):
            current_step = current_epoch * len(dataset['train'])

            # train
            print(
                f'\n==> training epoch {current_epoch}/{configs.train.num_epochs}'
            )
            train(model,
                  loader=loaders['train'],
                  criterion=criterion,
                  optimizer=optimizer,
                  scheduler=scheduler,
                  current_step=current_step,
                  writer=writer)
            current_step += len(dataset['train'])

            # evaluate
            meters = dict()
            for split, loader in loaders.items():
                if split != 'train':
                    meters.update(evaluate(model, loader=loader, split=split))

            # check whether it is the best
            best = {m: False for m in configs.train.metrics}
            for m in configs.train.metrics:
                if best_metrics[m] is None or best_metrics[m] < meters[m]:
                    best_metrics[m], best[m] = meters[m], True
                meters[m + '_best'] = best_metrics[m]
            # log in tensorboard
            for k, meter in meters.items():
                print(f'[{k}] = {meter:2f}')
                writer.add_scalar(k, meter, current_step)

            # save checkpoint
            torch.save(
                {
                    'epoch': current_epoch,
                    'model': model.state_dict(),
                    'optimizer': optimizer.state_dict(),
                    'meters': meters,
                    'configs': configs,
                }, configs.train.checkpoint_path)
            shutil.copyfile(
                configs.train.checkpoint_path,
                configs.train.checkpoints_path.format(current_epoch))
            for m in configs.train.metrics:
                if best[m]:
                    shutil.copyfile(configs.train.checkpoint_path,
                                    configs.train.best_checkpoint_paths[m])
            if best.get(configs.train.metric, False):
                shutil.copyfile(configs.train.checkpoint_path,
                                configs.train.best_checkpoint_path)
            print(f'[save_path] = {configs.train.save_path}')
예제 #7
0
파일: eval.py 프로젝트: hikaru-nara/YOGO
def evaluate(configs=None):
    configs = prepare() if configs is None else configs

    import math
    import torch
    import torch.backends.cudnn as cudnn
    import torch.nn.functional as F
    from tqdm import tqdm

    from meters.shapenet import MeterShapeNet

    ###########
    # Prepare #
    ###########

    if configs.device == 'cuda':
        cudnn.benchmark = True
        if configs.get('deterministic', False):
            cudnn.deterministic = True
            cudnn.benchmark = False
    if ('seed' not in configs) or (configs.seed is None):
        configs.seed = torch.initial_seed() % (2**32 - 1)
    seed = configs.seed
    random.seed(seed)
    np.random.seed(seed)
    torch.manual_seed(seed)

    print(configs)

    if os.path.exists(configs.evaluate.stats_path):
        stats = np.load(configs.evaluate.stats_path)
        print('clssIoU: {}'.format('  '.join(
            map('{:>8.2f}'.format, stats[:, 0] / stats[:, 1] * 100))))
        print('meanIoU: {:4.2f}'.format(stats[:, 0].sum() / stats[:, 1].sum() *
                                        100))
        return

    #################################
    # Initialize DataLoaders, Model #
    #################################

    print(f'\n==> loading dataset "{configs.dataset}"')
    dataset = configs.dataset()[configs.dataset.split]
    meter = MeterShapeNet()

    print(f'\n==> creating model "{configs.model}"')
    model = configs.model()
    if configs.device == 'cuda':
        model = torch.nn.DataParallel(model)
    model = model.to(configs.device)

    if os.path.exists(configs.evaluate.best_checkpoint_path):
        print(
            f'==> loading checkpoint "{configs.evaluate.best_checkpoint_path}"'
        )
        checkpoint = torch.load(configs.evaluate.best_checkpoint_path)
        model.load_state_dict(checkpoint.pop('model'))
        del checkpoint
    else:
        return

    model.eval()

    ##############
    # Evaluation #
    ##############

    stats = np.zeros((configs.data.num_shapes, 2))

    for shape_index, (file_path, shape_id) in enumerate(
            tqdm(dataset.file_paths, desc='eval', ncols=0)):
        data = np.loadtxt(file_path).astype(np.float32)
        total_num_points_in_shape = data.shape[0]
        confidences = np.zeros(total_num_points_in_shape, dtype=np.float32)
        predictions = np.full(total_num_points_in_shape, -1, dtype=np.int64)

        coords = data[:, :3]
        if dataset.normalize:
            coords = dataset.normalize_point_cloud(coords)
        coords = coords.transpose()
        ground_truth = data[:, -1].astype(np.int64)
        if dataset.with_normal:
            normal = data[:, 3:6].transpose()
            if dataset.with_one_hot_shape_id:
                shape_one_hot = np.zeros(
                    (dataset.num_shapes, coords.shape[-1]), dtype=np.float32)
                shape_one_hot[shape_id, :] = 1.0
                point_set = np.concatenate([coords, normal, shape_one_hot])
            else:
                point_set = np.concatenate([coords, normal])
        else:
            if dataset.with_one_hot_shape_id:
                shape_one_hot = np.zeros(
                    (dataset.num_shapes, coords.shape[-1]), dtype=np.float32)
                shape_one_hot[shape_id, :] = 1.0
                point_set = np.concatenate([coords, shape_one_hot])
            else:
                point_set = coords
        extra_batch_size = configs.evaluate.num_votes * math.ceil(
            total_num_points_in_shape / dataset.num_points)
        total_num_voted_points = extra_batch_size * dataset.num_points
        num_repeats = math.ceil(total_num_voted_points /
                                total_num_points_in_shape)
        shuffled_point_indices = np.tile(np.arange(total_num_points_in_shape),
                                         num_repeats)
        shuffled_point_indices = shuffled_point_indices[:
                                                        total_num_voted_points]
        np.random.shuffle(shuffled_point_indices)
        start_class, end_class = meter.part_class_to_shape_part_classes[
            ground_truth[0]]

        # model inference
        inputs = torch.from_numpy(point_set[:, shuffled_point_indices].reshape(
            -1, extra_batch_size,
            dataset.num_points).transpose(1, 0, 2)).float().to(configs.device)
        with torch.no_grad():
            a = time.time()
            vote_confidences = F.softmax(model(inputs), dim=1)

            vote_confidences, vote_predictions = vote_confidences[:,
                                                                  start_class:
                                                                  end_class, :].max(
                                                                      dim=1)
            vote_confidences = vote_confidences.view(
                total_num_voted_points).cpu().numpy()
            vote_predictions = (
                vote_predictions +
                start_class).view(total_num_voted_points).cpu().numpy()

        update_shape_predictions(vote_confidences, vote_predictions,
                                 shuffled_point_indices, confidences,
                                 predictions, total_num_voted_points)
        update_stats(stats, ground_truth, predictions, shape_id, start_class,
                     end_class)

    np.save(configs.evaluate.stats_path, stats)

    print('clssIoU: {}'.format('  '.join(
        map('{:>8.2f}'.format, stats[:, 0] / stats[:, 1] * 100))))
    print('meanIoU: {:4.2f}'.format(stats[:, 0].sum() / stats[:, 1].sum() *
                                    100))