Ejemplo n.º 1
0
def do_crf_inference(image, annotation, t=5, use_gpu=False):
    annotation = np.expand_dims(annotation, axis=-1)
    unary = np.concatenate([annotation, 1 - annotation], axis=-1)
    num_classes = unary.shape[2]
    shape = image.shape[0:2]

    # make input pytorch compatible
    img_var = torch.tensor(
        image.transpose(2, 0, 1).reshape([1, 3, shape[0], shape[1]]))
    unary_var = torch.tensor(
        unary.transpose(2, 0, 1).reshape([1, num_classes, shape[0], shape[1]]))
    img_var = img_var.cuda() if use_gpu else img_var
    unary_var = unary_var.cuda() if use_gpu else unary_var

    # Create CRF module
    gausscrf = convcrf.GaussCRF(conf=convcrf.default_conf,
                                shape=shape,
                                nclasses=num_classes,
                                use_gpu=use_gpu)
    gausscrf = gausscrf.cuda() if use_gpu else gausscrf

    # Perform CRF inference
    prediction = gausscrf.forward(unary=unary_var, img=img_var, num_iter=t)
    prediction = prediction.data.cpu().numpy()[0]
    return np.argmax(prediction, axis=0)
Ejemplo n.º 2
0
def do_crf_inference(image, unary, speed_eval, pyinn=False):

    if pyinn or not hasattr(torch.nn.functional, 'unfold'):
        # pytorch 0.3 or older requires pyinn.
        pyinn = True
        # Cheap and easy trick to make sure that pyinn is loadable.
        import pyinn

    # get basic hyperparameters
    num_classes = unary.shape[2]
    shape = image.shape[0:2]
    config = convcrf.default_conf
    config['filter_size'] = 7
    config['pyinn'] = pyinn

    ##
    # make input pytorch compatible
    image = image.transpose(2, 0, 1)  # shape: [3, hight, width]
    # Add batch dimension to image: [1, 3, height, width]
    image = image.reshape([1, 3, shape[0], shape[1]])
    img_var = Variable(torch.Tensor(image)).cuda()

    unary = unary.transpose(2, 0, 1)  # shape: [3, hight, width]
    # Add batch dimension to unary: [1, 21, height, width]
    unary = unary.reshape([1, num_classes, shape[0], shape[1]])
    unary_var = Variable(torch.Tensor(unary)).cuda()

    logging.info("Build ConvCRF.")
    ##
    # Create CRF module
    gausscrf = convcrf.GaussCRF(conf=config, shape=shape, nclasses=num_classes)
    # Cuda computation is required.
    # A CPU implementation of our message passing is not provided.
    gausscrf.cuda()

    logging.info("Start Computation.")
    # Perform CRF inference
    prediction = gausscrf.forward(unary=unary_var, img=img_var)

    if speed_eval:
        # Evaluate inference speed
        logging.info("Doing speed evaluation.")
        start_time = time.time()
        for i in range(10):
            # Running ConvCRF 10 times and average total time
            pred = gausscrf.forward(unary=unary_var, img=img_var)

        pred.cpu()  # wait for all GPU computations to finish

        duration = (time.time() - start_time) * 1000 / 10

        logging.info("Finished running 10 predictions.")
        logging.info("Avg. Computation time: {} ms".format(duration))

    return prediction.data.cpu().numpy()
Ejemplo n.º 3
0
def do_crf_inference(image, args):

    if args.pyinn or not hasattr(torch.nn.functional, 'unfold'):
        # pytorch 0.3 or older requires pyinn.
        args.pyinn = True
        # Cheap and easy trick to make sure that pyinn is loadable.
        import pyinn

    # get basic hyperparameters
    #num_classes = unary.shape[2]
    num_classes = 21
    shape = image.shape[0:2]
    config = convcrf.default_conf
    config['filter_size'] = 7
    config['pyinn'] = args.pyinn

    if args.normalize:
        # Warning, applying image normalization affects CRF computation.
        # The parameter 'col_feats::schan' needs to be adapted.

        # Normalize image range
        #     This changes the image features and influences CRF output
        image = image / 255
        # mean substraction
        #    CRF is invariant to mean subtraction, output is NOT affected
        image = image - 0.5
        # std normalization
        #       Affect CRF computation
        image = image / 0.3

        # schan = 0.1 is a good starting value for normalized images.
        # The relation is f_i = image * schan
        config['col_feats']['schan'] = 0.1

    # make input pytorch compatible
    image = image.transpose(2, 0, 1)  # shape: [3, hight, width]
    # Add batch dimension to image: [1, 3, height, width]
    image = image.reshape([1, 3, shape[0], shape[1]])
    img_var = Variable(torch.Tensor(image))  #.cuda()

    #unary = unary.transpose(2, 0, 1)  # shape: [3, hight, width]
    # Add batch dimension to unary: [1, 21, height, width]
    #unary = unary.reshape([1, num_classes, shape[0], shape[1]])
    #unary_var = Variable(torch.Tensor(unary))# .cuda()

    logging.info("Build ConvCRF.")
    ##
    # Create CRF module
    gausscrf = convcrf.GaussCRF(conf=config, shape=shape, nclasses=num_classes)
    # Cuda computation is required.
    # A CPU implementation of our message passing is not provided.
    #gausscrf()#.cuda()

    logging.info("Start Computation.")
    # Perform CRF inference
    #prediction = gausscrf.forward(unary=unary_var, img=img_var)

    if args.nospeed:
        # Evaluate inference speed
        logging.info("Doing speed evaluation.")
        start_time = time.time()
        #for i in range(10):
        ## Running ConvCRF 10 times and average total time
        #pred = gausscrf.forward(unary=unary_var, img=img_var)

        #pred.cpu()  # wait for all GPU computations to finish

        duration = (time.time() - start_time) * 1000 / 10

        logging.info("Finished running 10 predictions.")
        logging.info("Avg. Computation time: {} ms".format(duration))
Ejemplo n.º 4
0
def do_crf_inference(image, unary, args):

    if args.pyinn or not hasattr(torch.nn.functional, 'unfold'):
        # pytorch 0.3 or older requires pyinn.
        args.pyinn = True
        # Cheap and easy trick to make sure that pyinn is loadable.
        import pyinn

    # get basic hyperparameters
    num_classes = unary.shape[2]
    shape = image.shape[0:2]
    config = convcrf.default_conf
    config['filter_size'] = 7
    config['pyinn'] = args.pyinn

    if args.normalize:
        # Warning, applying image normalization affects CRF computation.
        # The parameter 'col_feats::schan' needs to be adapted.

        # Normalize image range
        #     This changes the image features and influences CRF output
        image = image / 255
        # mean substraction
        #    CRF is invariant to mean subtraction, output is NOT affected
        image = image - 0.5
        # std normalization
        #       Affect CRF computation
        image = image / 0.3

        # schan = 0.1 is a good starting value for normalized images.
        # The relation is f_i = image / schan
        config['col_feats']['schan'] = 0.1

    # make input pytorch compatible
    img = image.transpose(2, 0, 1)  # shape: [3, hight, width]
    # Add batch dimension to image: [1, 3, height, width]
    img = img.reshape([1, 3, shape[0], shape[1]])
    img_var = Variable(torch.Tensor(img)).cuda()

    un = unary.transpose(2, 0, 1)  # shape: [3, hight, width]
    # Add batch dimension to unary: [1, 21, height, width]
    un = un.reshape([1, num_classes, shape[0], shape[1]])
    unary_var = Variable(torch.Tensor(un)).cuda()

    logging.debug("Build ConvCRF.")
    ##
    # Create CRF module
    gausscrf = convcrf.GaussCRF(conf=config, shape=shape, nclasses=num_classes)
    # Cuda computation is required.
    # A CPU implementation of our message passing is not provided.
    gausscrf.cuda()

    # Perform ConvCRF inference
    """
    'Warm up': Our implementation compiles cuda kernels during runtime.
    The first inference call thus comes with some overhead.
    """
    logging.info("Start Computation.")
    prediction = gausscrf.forward(unary=unary_var, img=img_var)

    if args.nospeed:

        logging.info("Doing speed benchmark with filter size: {}".format(
            config['filter_size']))
        logging.info("Running multiple iteration. This may take a while.")

        # Our implementation compiles cuda kernels during runtime.
        # The first inference run is those much slower.
        # prediction = gausscrf.forward(unary=unary_var, img=img_var)

        start_time = time.time()
        for i in range(10):
            # Running ConvCRF 10 times and report average total time
            prediction = gausscrf.forward(unary=unary_var, img=img_var)

        prediction.cpu()  # wait for all GPU computations to finish
        duration = (time.time() - start_time) * 1000 / 10

        logging.debug("Finished running 10 predictions.")
        logging.debug("Avg Computation time: {} ms".format(duration))

    # Perform FullCRF inference
    myfullcrf = fullcrf.FullCRF(config, shape, num_classes)
    fullprediction = myfullcrf.compute(unary, image, softmax=False)

    if args.nospeed:

        start_time = time.time()
        for i in range(5):
            # Running FullCRF 5 times and report average total time
            fullprediction = myfullcrf.compute(unary, image, softmax=False)

        fullduration = (time.time() - start_time) * 1000 / 5

        logging.debug("Finished running 5 predictions.")
        logging.debug("Avg Computation time: {} ms".format(fullduration))

        logging.info("Using FullCRF took {:4.0f} ms ({:2.2f} s)".format(
            fullduration, fullduration / 1000))

        logging.info("Using ConvCRF took {:4.0f} ms ({:2.2f} s)".format(
            duration, duration / 1000))

        logging.info("Congratulation. Using ConvCRF provids a speed-up"
                     " of {:.0f}.".format(fullduration / duration))

        logging.info("")

    return prediction.data.cpu().numpy(), fullprediction
Ejemplo n.º 5
0
def main():
    parser = argparse.ArgumentParser(
        formatter_class=argparse.ArgumentDefaultsHelpFormatter, )
    parser.add_argument('-g', '--gpu', type=int, required=True, help='gpu id')
    parser.add_argument('--resume', help='checkpoint path')
    # configurations (same configuration as original work)
    # https://github.com/shelhamer/fcn.berkeleyvision.org
    parser.add_argument('--max-iteration',
                        type=int,
                        default=25,
                        help='max iteration')
    parser.add_argument(
        '--lr',
        type=float,
        default=1.0e-7,
        help='learning rate',
    )
    parser.add_argument(
        '--weight-decay',
        type=float,
        default=0.0005,
        help='weight decay',
    )
    parser.add_argument(
        '--momentum',
        type=float,
        default=0.99,
        help='momentum',
    )
    args = parser.parse_args()

    args.model = 'FCN8sAtOnce'
    args.git_hash = git_hash()

    now = datetime.datetime.now()
    args.out = osp.join(here, 'logs', now.strftime('%Y%m%d_%H%M%S.%f'))

    os.makedirs(args.out)
    with open(osp.join(args.out, 'config.yaml'), 'w') as f:
        yaml.safe_dump(args.__dict__, f, default_flow_style=False)

    os.environ['CUDA_VISIBLE_DEVICES'] = str(args.gpu)
    cuda = torch.cuda.is_available()

    torch.manual_seed(1337)
    if cuda:
        torch.cuda.manual_seed(1337)

    # 1. dataset

    root = osp.expanduser('~/data/datasets')
    kwargs = {'num_workers': 4, 'pin_memory': True} if cuda else {}
    train_loader = torch.utils.data.DataLoader(torchfcn.datasets.DAVISClassSeg(
        root, split='train', transform=True),
                                               batch_size=100,
                                               shuffle=True,
                                               **kwargs)
    val_loader = torch.utils.data.DataLoader(torchfcn.datasets.DAVISClassSeg(
        root, split='val', transform=True),
                                             batch_size=100,
                                             shuffle=False,
                                             **kwargs)

    # 2. model
    config = convcrf.default_conf
    config['filter_size'] = 5
    config['pyinn'] = False
    config['trainable'] = True
    logging.info("Build ConvCRF.")
    ##
    # Create CRF module
    model = convcrf.GaussCRF(conf=config, shape=[224, 224], nclasses=2)
    start_epoch = 0
    start_iteration = 0
    if args.resume:
        checkpoint = torch.load(args.resume)
        model.load_state_dict(checkpoint['model_state_dict'])
        start_epoch = checkpoint['epoch']
        start_iteration = checkpoint['iteration']

    if cuda:
        model = model.cuda()

    # for m in model.modules():
    #     print(dir(m))
    # for p in model.parameters():
    #     print(p.name)

    optim = torch.optim.SGD(model.parameters(),
                            lr=args.lr,
                            momentum=args.momentum,
                            weight_decay=args.weight_decay)
    if args.resume:
        optim.load_state_dict(checkpoint['optim_state_dict'])

    trainer = crftrainer.Trainer(
        cuda=cuda,
        model=model,
        optimizer=optim,
        train_loader=train_loader,
        val_loader=val_loader,
        out=args.out,
        max_iter=args.max_iteration,
    )
    trainer.epoch = start_epoch
    trainer.iteration = start_iteration
    trainer.train()
Ejemplo n.º 6
0
    def __init__(self, args):
        self.args = args

        # Define Saver
        self.saver = Saver(args)
        self.saver.save_experiment_config()
        # Define Tensorboard Summary
        self.summary = TensorboardSummary(self.saver.experiment_dir)
        self.writer = self.summary.create_summary()

        # Define Dataloader
        kwargs = {'num_workers': args.workers, 'pin_memory': True}
        self.train_loader, self.val_loader, self.test_loader, self.nclass = make_data_loader(
            args, **kwargs)

        # Define network
        model = DeepLab(num_classes=self.nclass,
                        backbone=args.backbone,
                        output_stride=args.out_stride,
                        sync_bn=args.sync_bn,
                        freeze_bn=args.freeze_bn,
                        crop_size=args.crop_size,
                        crf_loss=args.crf_loss)

        train_params = [{
            'params': model.get_1x_lr_params(),
            'lr': args.lr
        }, {
            'params': model.get_10x_lr_params(),
            'lr': args.lr * 10
        }]
        print(sum(p.numel() for p in model.parameters() if p.requires_grad))

        # Define Optimizer
        optimizer = torch.optim.SGD(train_params,
                                    momentum=args.momentum,
                                    weight_decay=args.weight_decay,
                                    nesterov=args.nesterov)

        # Define Criterion
        # whether to use class balanced weights
        if args.use_balanced_weights:
            classes_weights_path = os.path.join(
                Path.db_root_dir(args.dataset),
                args.dataset + '_classes_weights.npy')
            if os.path.isfile(classes_weights_path):
                weight = np.load(classes_weights_path)
            else:
                weight = calculate_weigths_labels(args.dataset,
                                                  self.train_loader,
                                                  self.nclass)
                np.save(
                    os.path.join(Path.db_root_dir(args.dataset),
                                 args.dataset + '_classes_weights.npy'),
                    weight)
            weight = torch.from_numpy(weight.astype(np.float32))
        else:
            weight = None

        self.criterion = SegmentationLosses(
            weight=weight, cuda=args.cuda,
            ignore_index=args.ignore_index).build_loss(mode=args.loss_type)
        self.model, self.optimizer = model, optimizer

        # Define Evaluator
        self.evaluator = Evaluator(self.nclass)
        # Define lr scheduler
        self.scheduler = LR_Scheduler(args.lr_scheduler, args.lr, args.epochs,
                                      len(self.train_loader))

        # Using cuda
        if args.cuda:
            self.model = torch.nn.DataParallel(self.model,
                                               device_ids=self.args.gpu_ids)
            patch_replication_callback(self.model)
            self.model = self.model.cuda()

        if args.crf_loss:
            _shape = int(args.crop_size / 4)
            if args.crop_size / 4 != int(args.crop_size / 4):
                _shape += 1

            config = convcrf.default_conf
            config['filter_size'] = 7
            config['col_feats']['schan'] = 0.1
            config['trainable'] = True

            self.convcrf = convcrf.GaussCRF(conf=config,
                                            shape=(_shape, _shape),
                                            nclasses=self.nclass)
            self.convcrf.cuda()

        # Resuming checkpoint
        self.best_pred = 0.0
        if args.resume is not None:
            if not os.path.isfile(args.resume):
                raise RuntimeError("=> no checkpoint found at '{}'".format(
                    args.resume))
            checkpoint = torch.load(args.resume)
            args.start_epoch = checkpoint['epoch']
            if args.cuda:
                self.model.module.load_state_dict(checkpoint['state_dict'])
            else:
                self.model.load_state_dict(checkpoint['state_dict'])
            if not args.ft:
                self.optimizer.load_state_dict(checkpoint['optimizer'])
            self.best_pred = checkpoint['best_pred']
            print("=> loaded checkpoint '{}' (epoch {})".format(
                args.resume, checkpoint['epoch']))

        # Clear start epoch if fine-tuning
        if args.ft:
            args.start_epoch = 0
Ejemplo n.º 7
0
def main():
    parser = argparse.ArgumentParser(
        formatter_class=argparse.ArgumentDefaultsHelpFormatter,
    )
    parser.add_argument('-g', '--gpu', type=int, required=True, help='gpu id')
    parser.add_argument('--resume', help='checkpoint path')
    # configurations (same configuration as original work)
    # https://github.com/shelhamer/fcn.berkeleyvision.org
    parser.add_argument(
        '--max-iteration', type=int, default=3000, help='max iteration'
    )
    parser.add_argument(
        '--lr', type=float, default=1.0e-4, help='learning rate',
    )
    parser.add_argument(
        '--weight-decay', type=float, default=0.0005, help='weight decay',
    )
    parser.add_argument(
        '--momentum', type=float, default=0.99, help='momentum',
    )
    args = parser.parse_args()

    args.model = 'FCN8sAtOnce'
    args.git_hash = git_hash()

    now = datetime.datetime.now()
    args.out = osp.join(here, 'logs', now.strftime('%Y%m%d_%H%M%S.%f'))

    os.makedirs(args.out)
    with open(osp.join(args.out, 'config.yaml'), 'w') as f:
        yaml.safe_dump(args.__dict__, f, default_flow_style=False)

    os.environ['CUDA_VISIBLE_DEVICES'] = str(args.gpu)
    cuda = torch.cuda.is_available()

    torch.manual_seed(1337)
    if cuda:
        torch.cuda.manual_seed(1337)

    # 1. dataset

    root = osp.expanduser('~/data/datasets')
    kwargs = {'num_workers': 4, 'pin_memory': True} if cuda else {}
    train_loader = torch.utils.data.DataLoader(
        torchfcn.datasets.DAVISClassSeg(root, split='train', transform=True),
        batch_size=1, shuffle=True, **kwargs)
    val_loader = torch.utils.data.DataLoader(
        torchfcn.datasets.DAVISClassSeg(
            root, split='val', transform=True),
        batch_size=1, shuffle=False, **kwargs)
    loader = val_loader
    # 2. model

    model = torchfcn.models.FCN8sAtOnce(n_class=2)
    epoch = 0
    iteration = 0

    checkpoint = torch.load(args.resume)
    model.load_state_dict(checkpoint['model_state_dict'])

    if cuda:
        model = model.cuda()

    training = False
    model.eval()
    n_class = len(loader.dataset.class_names)

    val_loss = 0
    visualizations = []
    label_trues, label_preds = [], []
    timestamp_start = datetime.datetime.now(pytz.timezone('Asia/Tokyo'))

    config = convcrf.default_conf
    config['filter_size'] = 7
    config['pyinn'] = False
    logging.info("Build ConvCRF.")
    ##
    # Create CRF module
    gausscrf = convcrf.GaussCRF(conf=config, shape=[224, 224], nclasses=n_class)
    # Cuda computation is required.
    # A CPU implementation of our message passing is not provided.
    gausscrf.cuda()

    for batch_idx, (data, target, unary) in tqdm.tqdm(
            enumerate(loader), total=len(loader),
            desc='Valid iteration=%d' % iteration, ncols=80,
            leave=False):

        data, target, unary = data.cuda(), target.cuda(), unary.cuda()
        shape = unary.shape[-2:]
        unary = unary.reshape([loader.batch_size, n_class, shape[0], shape[1]])
        data, target, unary_var = Variable(data), Variable(target), Variable(unary)

        with torch.no_grad():
            score = gausscrf.forward(unary=unary_var, img=data)

        loss = cross_entropy2d(score, target,
                               size_average=False)
        loss_data = loss.data.item()
        if np.isnan(loss_data):
            raise ValueError('loss is nan while validating')
        val_loss += loss_data / len(data)

        imgs = data.data.cpu()
        lbl_pred = score.data.max(1)[1].cpu().numpy()[:, :, :]
        lbl_true = target.data.cpu()
        for img, lt, lp in zip(imgs, lbl_true, lbl_pred):
            img, lt = loader.dataset.untransform(img, lt)
            label_trues.append(lt)
            label_preds.append(lp)
            if len(visualizations) < 9:
                viz = fcn.utils.visualize_segmentation(
                    lbl_pred=lp, lbl_true=lt, img=img, n_class=n_class)
                visualizations.append(viz)
    metrics = torchfcn.utils.label_accuracy_score(
        label_trues, label_preds, n_class)

    out = osp.join(args.out, 'visualization_viz')
    if not osp.exists(out):
        os.makedirs(out)
    out_file = osp.join(out, 'iter%012d.jpg' % iteration)
    scipy.misc.imsave(out_file, fcn.utils.get_tile_image(visualizations))

    val_loss /= len(loader)

    with open(osp.join(args.out, 'log.csv'), 'a') as f:
        elapsed_time = (
            datetime.datetime.now(pytz.timezone('Asia/Tokyo')) -
            timestamp_start).total_seconds()
        log = [epoch, iteration] + [''] * 5 + \
              [val_loss] + list(metrics) + [elapsed_time]
        log = map(str, log)
        f.write(','.join(log) + '\n')

    mean_iu = metrics[2]
    print(mean_iu)