コード例 #1
0
def upsnet_evaluate(upsnet_log_directory='/home/adelgior/code/upsnet/output/upsnet/'
                                         'cityscapes/upsnet_resnet50_cityscapes_4gpu/',
                    gt_folder='/home/adelgior/code/upsnet/data/cityscapes/panoptic', split='val',
                    eval_iou_threshold=0.5, overwrite=False):
    upsnet_results_directory = upsnet_log_directory + split + '/results/'
    upsnet_pano_path = pathlib.Path(upsnet_results_directory, 'pans_unified')
    gt_json_file = os.path.join(upsnet_pano_path, 'gt.json')
    old_pred_json_file = upsnet_pano_path / 'pred.json'
    pred_folder = upsnet_pano_path / 'pan'
    new_pred_json_file = pathlib.Path(str(old_pred_json_file).replace('.json', 'with_image_ids.json'))

    labels_table = get_labels_table_without_voids(
        labels_table_cityscapes.get_cityscapes_trainids_label_table_cocoform(void_trainids=(-1, 255)))

    problem_config = instance_utils.InstanceProblemConfig(n_instances_by_semantic_id=[None for _ in labels_table],
                                                          labels_table=labels_table)

    # Fix json format
    gt_json_format = json.load(open(gt_json_file, 'rb'))
    image_ids = [l['id'] for l in gt_json_format['images']]
    file_names = [l['file_name'] for l in gt_json_format['images']]
    pred_json_format = json.load(open(old_pred_json_file, 'rb'))
    assert len(image_ids) == len(pred_json_format['annotations']), \
        '{} image ids; {} annotations'.format(len(image_ids), len(pred_json_format['annotations']))
    assert os.path.join(pred_folder)
    for i in range(len(pred_json_format['annotations'])):
        pred_json_format['annotations'][i]['image_id'] = image_ids[i]
        pred_json_format['annotations'][i]['file_name'] = file_names[i].replace('_leftImg8bit', '')
    json.dump(pred_json_format, new_pred_json_file.open('w'))

    eval_pq_npz_file = evaluate.main_unwrapped(gt_json_file, new_pred_json_file, gt_folder, pred_folder, problem_config,
                                               iou_threshold=eval_iou_threshold, overwrite=overwrite)
    return eval_pq_npz_file
コード例 #2
0
def get_problem_config_from_labels_table(labels_table,
                                         n_instances_by_semantic_id,
                                         map_to_semantic=False,
                                         void_value=255):
    problem_config = instance_utils.InstanceProblemConfig(
        labels_table=labels_table,
        n_instances_by_semantic_id=n_instances_by_semantic_id,
        map_to_semantic=map_to_semantic,
        void_value=void_value)
    return problem_config
コード例 #3
0
def get_problem_config(class_names,
                       n_instances_per_class: int,
                       map_to_semantic=False):
    # 0. Problem setup (instance segmentation definition)
    class_names = class_names
    n_semantic_classes = len(class_names)
    n_instances_by_semantic_id = [1] + [
        n_instances_per_class for _ in range(1, n_semantic_classes)
    ]
    problem_config = instance_utils.InstanceProblemConfig(
        n_instances_by_semantic_id=n_instances_by_semantic_id,
        map_to_semantic=map_to_semantic)
    problem_config.set_class_names(class_names)
    return problem_config
コード例 #4
0
def main():
    instanceseg.utils.scripts.check_clean_work_tree()
    parser = argparse.ArgumentParser()
    parser.add_argument('-g', '--gpu', type=int, required=True)
    parser.add_argument('-c',
                        '--config',
                        type=int,
                        default=0,
                        choices=configurations.keys())
    parser.add_argument('--resume', help='Checkpoint path')
    parser.add_argument('--image_index',
                        type=int,
                        help='Image index to use for train/validation set',
                        default=None)
    args = parser.parse_args()
    gpu = args.gpu
    config_idx = args.config

    cfg = instanceseg.utils.configs.create_config_from_default(
        configurations[config_idx], default_config)
    if args.image_index is not None:
        cfg['image_index'] = args.image_index

    out = instanceseg.utils.logs.get_log_dir(
        osp.basename(__file__).replace('.py', ''),
        config_idx,
        instanceseg.utils.configs.create_config_copy(cfg),
        parent_directory=osp.dirname(osp.abspath(__file__)))

    print('logdir: {}'.format(out))
    resume = args.resume

    os.environ['CUDA_VISIBLE_DEVICES'] = str(gpu)
    cuda = torch.cuda.is_available()

    torch.manual_seed(1337)
    if cuda:
        torch.cuda.manual_seed(1337)

    # 0. Problem setup (instance segmentation definition)
    n_semantic_classes = 21
    n_instances_by_semantic_id = [1] + [
        cfg['n_instances_per_class']
        for sem_cls in range(1, n_semantic_classes)
    ]
    problem_config = instance_utils.InstanceProblemConfig(
        n_instances_by_semantic_id=n_instances_by_semantic_id)

    # 1. dataset
    root = osp.expanduser('~/data/datasets')
    dataset_kwargs = dict(transform=True,
                          semantic_only_labels=cfg['semantic_only_labels'],
                          set_extras_to_void=cfg['set_extras_to_void'],
                          semantic_subset=cfg['semantic_subset'],
                          modified_indices=[cfg['image_index']])
    kwargs = {'num_workers': 4, 'pin_memory': True} if cuda else {}
    val_split = 'seg11valid'
    val_dataset = instanceseg.datasets.voc.VOC2011ClassSeg(root,
                                                           split=val_split,
                                                           **dataset_kwargs)
    val_loader = torch.utils.data.DataLoader(val_dataset,
                                             batch_size=1,
                                             shuffle=False,
                                             **kwargs)
    train_loader = torch.utils.data.DataLoader(val_dataset,
                                               batch_size=1,
                                               shuffle=True,
                                               **kwargs)

    problem_config.set_class_names(val_dataset.class_names)

    # 2. model

    model = instanceseg.models.FCN8sInstance(
        semantic_instance_class_list=problem_config.
        semantic_instance_class_list,
        map_to_semantic=False,
        include_instance_channel0=False)
    print('Number of classes in model: {}'.format(model.n_classes))
    start_epoch = 0
    start_iteration = 0
    if resume:
        checkpoint = torch.load(resume)
        model.load_state_dict(checkpoint['model_state_dict'])
        start_epoch = checkpoint['epoch']
        start_iteration = checkpoint['iteration']
    else:
        print('Copying params from vgg16')
        vgg16 = instanceseg.models.VGG16(pretrained=True)
        model.copy_params_from_vgg16(vgg16)
    if cuda:
        model = model.cuda()

    # 3. optimizer
    if cfg['optim'] == 'adam':
        optim = torch.optim.Adam(model.parameters(),
                                 lr=cfg['lr'],
                                 weight_decay=cfg['weight_decay'])
    elif cfg['optim'] == 'sgd':
        optim = torch.optim.SGD(
            [
                {
                    'params':
                    instanceseg.utils.configs.get_parameters(model, bias=False)
                },
                {
                    'params':
                    instanceseg.utils.configs.get_parameters(model, bias=True),
                    #            {'params': filter(lambda p: False if p is None else p.requires_grad, get_parameters(
                    #                model, bias=False))},
                    #            {'params': filter(lambda p: False if p is None else p.requires_grad, get_parameters(
                    #                model, bias=True)),
                    'lr':
                    cfg['lr'] * 2,
                    'weight_decay':
                    0
                },
            ],
            lr=cfg['lr'],
            momentum=cfg['momentum'],
            weight_decay=cfg['weight_decay'])
    else:
        raise Exception('optimizer {} not recognized.'.format(cfg['optim']))
    if resume:
        optim.load_state_dict(checkpoint['optim_state_dict'])

    writer = SummaryWriter(log_dir=out)
    trainer = instanceseg.Trainer(
        cuda=cuda,
        model=model,
        optimizer=optim,
        train_loader=train_loader,
        val_loader=val_loader,
        out_dir=out,
        max_iter=cfg['max_iteration'],
        instance_problem=problem_config,
        interval_validate=cfg.get('interval_validate', len(train_loader)),
        matching_loss=cfg['matching'],
        tensorboard_writer=writer,
        loader_semantic_lbl_only=cfg['semantic_only_labels'])
    trainer.epoch = start_epoch
    trainer.iteration = start_iteration

    print('Starting test by training {} images'.format(
        len(train_loader.dataset)))
    trainer.train()

    print('Evaluating final model')

    val_loss, metrics, (segmentation_visualizations,
                        score_visualizations) = trainer.validate_split(
                            should_export_visualizations=False)

    trainer.export_visualizations(segmentation_visualizations,
                                  'seg_' + val_split,
                                  tile=True,
                                  outdir='./tmp/')
    trainer.export_visualizations(score_visualizations,
                                  'score_' + val_split,
                                  tile=False,
                                  outdir='./tmp/')
    # viz = visualization_utils.get_tile_image(visualizations)
    # skimage.io.imsave(os.path.join(here, 'viz_evaluate.png'), viz)
    metrics = np.array(metrics)
    metrics *= 100
    print('''\
        Accuracy: {0}
        Accuracy Class: {1}
        Mean IU: {2}
        FWAV Accuracy: {3}'''.format(*metrics))
    if metrics[2] < 80:
        print(instanceseg.utils.misc.TermColors.FAIL +
              'Test FAILED.  mIOU: {}'.format(metrics[2]) +
              instanceseg.utils.misc.TermColors.ENDC)
    else:
        print(instanceseg.utils.misc.TermColors.OKGREEN +
              'TEST PASSED! mIOU: {}'.format(metrics[2]) +
              instanceseg.utils.misc.TermColors.ENDC)
コード例 #5
0
def main():
    synthetic_generator_n_instances_per_semantic_id = 2
    args = parse_args()
    cfg = {
        'max_confidence': args.max_confidence,
        'scoring_method': args.scoring_method,
        'smearing': args.smearing,
        'assignment_mixing': args.assignment_mixing,
        'xaxis': args.xaxis,
    }

    out = instanceseg.utils.logs.get_log_dir(
        osp.basename(__file__).replace('.py', ''),
        config_id=None,
        cfg=cfg,
        parent_directory=os.path.join(here, 'logs', 'synthetic'))
    print('Log in {}'.format(out))
    cuda = True  # torch.cuda.is_available()
    gpu = 0
    os.environ['CUDA_VISIBLE_DEVICES'] = str(gpu)

    torch.manual_seed(1337)
    if cuda:
        torch.cuda.manual_seed(1337)

    # 1. dataset
    dataset_kwargs = dict(
        transform=True,
        n_max_per_class=synthetic_generator_n_instances_per_semantic_id,
        map_to_single_instance_problem=False,
        blob_size=(80, 80))
    train_dataset = instanceseg.datasets.synthetic.BlobExampleGenerator(
        **dataset_kwargs)
    val_dataset = instanceseg.datasets.synthetic.BlobExampleGenerator(
        **dataset_kwargs)
    try:
        img, (sl, il) = train_dataset[0]
    except:
        import ipdb
        ipdb.set_trace()
        raise Exception('Cannot load an image from your dataset')
    loader_kwargs = {'num_workers': 4, 'pin_memory': True} if cuda else {}
    train_loader = torch.utils.data.DataLoader(train_dataset,
                                               batch_size=1,
                                               shuffle=True,
                                               **loader_kwargs)
    val_loader = torch.utils.data.DataLoader(val_dataset,
                                             batch_size=1,
                                             shuffle=False,
                                             **loader_kwargs)
    train_loader_for_val = torch.utils.data.DataLoader(
        train_dataset.copy(modified_length=3),
        batch_size=1,
        shuffle=False,
        **loader_kwargs)

    # 0. Problem setup (instance segmentation definition)
    class_names = val_dataset.class_names
    n_semantic_classes = len(class_names)
    cfg = {'n_instances_per_class': 3}
    n_instances_per_class = cfg['n_instances_per_class'] or \
                            (1 if cfg['single_instance'] else synthetic_generator_n_instances_per_semantic_id)
    n_instances_by_semantic_id = [1] + [
        n_instances_per_class for sem_cls in range(1, n_semantic_classes)
    ]
    problem_config = instance_utils.InstanceProblemConfig(
        n_instances_by_semantic_id=n_instances_by_semantic_id)
    problem_config.set_class_names(class_names)

    writer = SummaryWriter(log_dir=out)

    for data_idx, (data, (sem_lbl, inst_lbl)) in enumerate(train_loader):
        if data_idx >= 1:
            break
        if cuda:
            data, (sem_lbl, inst_lbl) = data.cuda(), (sem_lbl.cuda(),
                                                      inst_lbl.cuda())
        data, sem_lbl, inst_lbl = Variable(data, volatile=True), \
                                  Variable(sem_lbl), Variable(inst_lbl)
        if args.xaxis == 'max_confidence':
            max_confidences = [1, 2, 3, 4, 5, 10, 100, 1000, 10000]
            smearings = [args.smearing for _ in max_confidences]
            assignment_mixings = [
                args.assignment_mixing for _ in max_confidences
            ]
        elif args.xaxis == 'smearing':
            smearings = list(np.linspace(0, 1, 10))
            max_confidences = [args.max_confidence for _ in smearings]
            assignment_mixings = [args.assignment_mixing for _ in smearings]
        else:
            raise ValueError('xaxis == {} unrecognized.'.format(args.xaxis))
        for prediction_number, (max_confidence, smearing,
                                assignment_mixing) in enumerate(
                                    zip(max_confidences, smearings,
                                        assignment_mixings)):
            scoring_cfg = {
                'max_confidence': max_confidence,
                'scoring_method': args.scoring_method,
                'smearing': float(smearing),
                'assignment_mixing': assignment_mixing,
            }
            print('prediction {}/{}'.format(prediction_number + 1,
                                            len(max_confidences)))
            score = compute_scores(data,
                                   sem_lbl,
                                   inst_lbl,
                                   problem_config,
                                   cuda=True,
                                   **scoring_cfg)
            pred_permutations, loss, loss_components = loss_function(
                score,
                sem_lbl,
                inst_lbl,
                problem_config,
                return_loss_components=True)
            if np.isnan(float(loss.data[0])):
                raise ValueError('losses is nan while validating')
            softmax_scores = F.softmax(score, dim=1)
            inst_lbl_pred = score.data.max(dim=1)[1].cpu().numpy()[:, :, :]

            # Write scalars
            writer.add_scalar('max_confidence', max_confidence,
                              prediction_number)
            writer.add_scalar('smearing', smearing, prediction_number)
            # writer.add_scalar('instance_mixing', instance_mixing, prediction_number)
            writer.add_scalar('losses', loss, prediction_number)
            channel_labels = problem_config.get_channel_labels('{} {}')
            for i, label in enumerate(channel_labels):
                tag = 'loss_components/{}'.format(label.replace(' ', '_'))
                writer.add_scalar(tag, loss_components[i], prediction_number)

            # Write images
            write_visualizations(sem_lbl,
                                 inst_lbl,
                                 softmax_scores,
                                 pred_permutations,
                                 problem_config,
                                 outdir=out,
                                 writer=writer,
                                 iteration=prediction_number,
                                 basename='scores')