Exemplo n.º 1
0
def create_transforms(relax_crop, zero_crop):
    # Preparation of the data loaders
    first = [
        tr.CropFromMask(crop_elems=('image', 'gt'),
                        relax=relax_crop,
                        zero_pad=zero_crop),
        tr.FixedResize(resolutions={
            'crop_image': (512, 512),
            'crop_gt': (512, 512)
        })
    ]
    second = [
        tr.ToImage(norm_elem='extreme_points'),
        tr.ConcatInputs(elems=('crop_image', 'extreme_points')),
        tr.ToTensor()
    ]
    train_tf = transforms.Compose([
        tr.RandomHorizontalFlip(),
        tr.ScaleNRotate(rots=(-20, 20), scales=(.75, 1.25)), *first,
        tr.ExtremePoints(sigma=10, pert=5, elem='crop_gt'), *second
    ])
    test_tf = transforms.Compose(
        [*first,
         tr.ExtremePoints(sigma=10, pert=0, elem='crop_gt'), *second])
    return train_tf, test_tf
Exemplo n.º 2
0
def eval_model(net, save_dir, batch_size=10):
    # Setting parameters
    relax_crop = 50  # Enlarge the bounding box by relax_crop pixels
    zero_pad_crop = True  # Insert zero padding when cropping the image

    net.eval()
    composed_transforms_ts = transforms.Compose([
        tr.CropFromMask(crop_elems=('image', 'gt'),
                        relax=relax_crop,
                        zero_pad=zero_pad_crop),
        tr.FixedResize(resolutions={
            'gt': None,
            'crop_image': (512, 512),
            'crop_gt': (512, 512)
        }),
        tr.ExtremePoints(sigma=10, pert=0, elem='crop_gt'),
        tr.ToImage(norm_elem='extreme_points'),
        tr.ConcatInputs(elems=('crop_image', 'extreme_points')),
        tr.ToTensor()
    ])
    db_test = pascal.VOCSegmentation(split='val',
                                     transform=composed_transforms_ts,
                                     retname=True)
    testloader = DataLoader(db_test,
                            batch_size=1,
                            shuffle=False,
                            num_workers=2)

    save_dir.mkdir(exist_ok=True)

    with torch.no_grad():
        test(net, testloader, save_dir)
Exemplo n.º 3
0
if resume_epoch != nEpochs:
    # Logging into Tensorboard
    log_dir = os.path.join(save_dir, 'models', datetime.now().strftime('%b%d_%H-%M-%S') + '_' + socket.gethostname())
    # writer = SummaryWriter(log_dir=log_dir)

    # Use the following optimizer
    optimizer = optim.SGD(train_params, lr=p['lr'], momentum=p['momentum'], weight_decay=p['wd'])
    p['optimizer'] = str(optimizer)

    # Preparation of the data loaders
    composed_transforms_tr = transforms.Compose([
        tr.RandomHorizontalFlip(),
        tr.ScaleNRotate(rots=(-20, 20), scales=(.75, 1.25)),
        tr.CropFromMask(crop_elems=('image', 'gt'), relax=relax_crop, zero_pad=zero_pad_crop),
        tr.FixedResize(resolutions={'crop_image': (512, 512), 'crop_gt': (512, 512)}),
        tr.ExtremePoints(sigma=10, pert=5, elem='crop_gt'),
        tr.ToImage(norm_elem='extreme_points'),
        tr.ConcatInputs(elems=('crop_image', 'extreme_points')),
        tr.ToTensor()])
    composed_transforms_ts = transforms.Compose([
        tr.CropFromMask(crop_elems=('image', 'gt'), relax=relax_crop, zero_pad=zero_pad_crop),
        tr.FixedResize(resolutions={'crop_image': (512, 512), 'crop_gt': (512, 512)}),
        tr.ExtremePoints(sigma=10, pert=0, elem='crop_gt'),
        tr.ToImage(norm_elem='extreme_points'),
        tr.ConcatInputs(elems=('crop_image', 'extreme_points')),
        tr.ToTensor()])

    voc_train = pascal.VOCSegmentation(split='train', transform=composed_transforms_tr)
    voc_val = pascal.VOCSegmentation(split='val', transform=composed_transforms_ts)

    if use_sbd:
Exemplo n.º 4
0
    p['optimizer'] = str(optimizer)

    # Preparation of the data loaders
    composed_transforms_tr = transforms.Compose([
        tr.RandomHorizontalFlip(),
        tr.ScaleNRotate(rots=(-20, 20), scales=(.75, 1.25)),
        tr.CropFromMask(crop_elems=('image', 'gt'),
                        relax=relax_crop,
                        zero_pad=zero_pad_crop),
        tr.FixedResize(resolutions={
            'crop_image': (512, 512),
            'crop_gt': (512, 512)
        }),
        tr.ExtremePoints(sigma=10,
                         pert=30,
                         elem='crop_gt',
                         num_pts=args.num_pts,
                         type=args.point_type,
                         vis=True if args.debug else False),
        tr.ToImage(norm_elem='extreme_points'),
        tr.ConcatInputs(elems=('crop_image', 'extreme_points')),
        tr.ToTensor()
    ])
    composed_transforms_ts = transforms.Compose([
        tr.CropFromMask(crop_elems=('image', 'gt'),
                        relax=relax_crop,
                        zero_pad=zero_pad_crop),
        tr.FixedResize(resolutions={
            'crop_image': (512, 512),
            'crop_gt': (512, 512)
        }),
        tr.ExtremePoints(sigma=10,
Exemplo n.º 5
0
    net.to(device)
    net.eval()

    # Setup data transformations
    composed_transforms = [
        tr.IdentityTransform(tr_elems=['gt'], prefix='ori_'),
        tr.CropFromMask(crop_elems=['image', 'gt'],
                        relax=cfg['relax_crop'],
                        zero_pad=cfg['zero_pad_crop'],
                        adaptive_relax=cfg['adaptive_relax'],
                        prefix=''),
        tr.Resize(resize_elems=['image', 'gt', 'void_pixels'],
                  min_size=cfg['min_size'],
                  max_size=cfg['max_size']),
        tr.ComputeImageGradient(elem='image'),
        tr.ExtremePoints(sigma=10, pert=0, elem='gt'),
        tr.GaussianTransform(tr_elems=['extreme_points'],
                             mask_elem='gt',
                             sigma=10,
                             tr_name='points'),
        tr.FixedResizePoints(
            resolutions={'extreme_points': (cfg['lr_size'], cfg['lr_size'])},
            mask_elem='gt',
            prefix='lr_'),
        tr.FixedResize(resolutions={
            'image': (cfg['lr_size'], cfg['lr_size']),
            'gt': (cfg['lr_size'], cfg['lr_size']),
            'void_pixels': (cfg['lr_size'], cfg['lr_size'])
        },
                       prefix='lr_'),
        tr.GaussianTransform(tr_elems=['lr_extreme_points'],
Exemplo n.º 6
0
p = OrderedDict()  # Parameters to include in report
classifier = 'psp'  # Head classifier to use
useTest = 1  # See evolution of the test set when training?
nTestInterval = 10  # Run on test set every nTestInterval epochs
snapshot = 20  # Store a model every snapshot epochs
relax_crop = 50  # Enlarge the bounding box by relax_crop pixels
nInputChannels = 4  # Number of input channels (RGB + heatmap of extreme points)
zero_pad_crop = True  # Insert zero padding when cropping the image




composed_transforms_ts = transforms.Compose([
    tr.CropFromMask(crop_elems=('image', 'gt'), relax=relax_crop, zero_pad=zero_pad_crop),
    tr.FixedResize(resolutions={'gt': None, 'crop_image': (512, 512), 'crop_gt': (512, 512)}),
    tr.ExtremePoints(sigma=10, pert=30, elem='crop_gt', num_pts=50, type='polygon', vis=False),
    tr.ToImage(norm_elem='extreme_points'),
    tr.ConcatInputs(elems=('crop_image', 'extreme_points')),
    tr.ToTensor()])
db_test = pascal.VOCSegmentation(split='val', transform=composed_transforms_ts, retname=True)
testloader = DataLoader(db_test, batch_size=1, shuffle=False, num_workers=8)

modelName = 'dextr_pascal'
net = resnet.resnet101(1, pretrained=True, nInputChannels=nInputChannels, classifier=classifier)

net = torch.nn.DataParallel(net.cuda())

# net = net.cuda()
print("Initializing weights from: {}".format(
        args.model_path))
net.load_state_dict(