def predict(image, mask, root_path, AI_directory_path, model_type="life"):

    device = torch.device('cuda')

    size = (256, 256)
    img_transform = transforms.Compose([
        transforms.Resize(size=size),
        transforms.ToTensor(),
        transforms.Normalize(mean=opt.MEAN, std=opt.STD)
    ])
    mask_transform = transforms.Compose(
        [transforms.Resize(size=size),
         transforms.ToTensor()])

    dataset_val = Places2(root_path, image, mask, img_transform,
                          mask_transform)
    model = PConvUNet().to(device)
    load_ckpt(AI_directory_path, [('model', model)])

    model.eval()

    evaluate(model, dataset_val, device, image.split('.')[0] + 'result.jpg')

    return image.split('.')[0] + 'result.jpg'
parser = argparse.ArgumentParser()
# training options
parser.add_argument('--root', type=str, default='./data')
parser.add_argument('--snapshot', type=str, default='')
parser.add_argument('--image_size', type=int, default=256)
parser.add_argument('--mask_root', type=str, default='./mask')

args = parser.parse_args()

device = torch.device('cuda')

size = (args.image_size, args.image_size)
img_transform = transforms.Compose([
    transforms.Resize(size=size),
    transforms.ToTensor(),
    transforms.Normalize(mean=opt.MEAN, std=opt.STD)
])
mask_transform = transforms.Compose(
    [transforms.Resize(size=size),
     transforms.ToTensor()])

dataset_val = Places2(args.root, args.mask_root, img_transform, mask_transform,
                      'val')

model = PConvUNet().to(device)
load_ckpt(args.snapshot, [('model', model)])

model.eval()
evaluate(model, dataset_val, device, 'result.jpg')
Exemplo n.º 3
0
              auto_resize=not random_masks) for train_flag in [True, False]
]

print('train size:', len(dataset_train))
print('val size:', len(dataset_val))
print('dataset shapes:')
for tensor in dataset_train[0]:
    print(tensor.shape)

iterator_train = iter(
    data.DataLoader(dataset_train,
                    batch_size=args.batch_size,
                    sampler=InfiniteSampler(len(dataset_train)),
                    num_workers=args.n_threads))
print(len(dataset_train))
model = PConvUNet(input_guides=1 if use_depth else 0).to(device)

if args.finetune:
    lr = args.lr_finetune
    model.freeze_enc_bn = True
else:
    lr = args.lr

start_iter = 0
optimizer = torch.optim.Adam(filter(lambda p: p.requires_grad,
                                    model.parameters()),
                             lr=lr)
criterion = InpaintingLoss(VGG16FeatureExtractor()).to(device)

if args.resume:
    start_iter = load_ckpt(args.resume, [('model', model)],
Exemplo n.º 4
0
#total_white = total_white /(len(dataset_train) * 64 *64)

iterator_train = iter(
    data.DataLoader(dataset_train,
                    batch_size=args.batch_size,
                    sampler=InfiniteSampler(len(dataset_train)),
                    num_workers=args.n_threads))

iterator_val = iter(
    data.DataLoader(dataset_val,
                    batch_size=args.batch_size,
                    sampler=InfiniteSampler(len(dataset_val)),
                    num_workers=args.n_threads))

print(len(dataset_train))
model = PConvUNet(layer_size=3).to(device)

if args.finetune:
    lr = args.lr_finetune
    model.freeze_enc_bn = True
else:
    lr = args.lr

start_iter = 0
optimizer = torch.optim.Adam(filter(lambda p: p.requires_grad,
                                    model.parameters()),
                             lr=lr)
criterion = InpaintingLoss(VGG16FeatureExtractor()).to(device)

if args.resume:
    start_iter = load_ckpt(args.resume, [('model', model)],
Exemplo n.º 5
0
size = (args.image_size, args.image_size)
#size = (288, 288)
img_tf = transforms.Compose(
    [transforms.Normalize(mean=opt.MEAN, std=opt.STD)])
mask_tf = transforms.Compose(
    [transforms.ToTensor()])

dataset_train = Places2(args.root, args.mask_root, img_tf, mask_tf, 'train')
dataset_val = Places2(args.root, args.mask_root, img_tf, mask_tf, 'val')

iterator_train = iter(data.DataLoader(
    dataset_train, batch_size=args.batch_size,
    sampler=InfiniteSampler(len(dataset_train)),
    num_workers=args.n_threads))
print(len(dataset_train))
model = PConvUNet().to(device)

if args.finetune:
    lr = args.lr_finetune
    model.freeze_enc_bn = True
else:
    lr = args.lr

start_iter = 0
optimizer = torch.optim.Adam(
    filter(lambda p: p.requires_grad, model.parameters()), lr=lr)
criterion = InpaintingLoss(VGG16FeatureExtractor()).to(device)

if args.resume:
    start_iter = load_ckpt(
        args.resume, [('model', model)], [('optimizer', optimizer)])
Exemplo n.º 6
0
    [transforms.Resize(size=size),
     transforms.ToTensor()])

#dataset_train = Places2(args.root, args.mask_root, img_tf, mask_tf, 'train')
#dataset_val = Places2(args.root, args.mask_root, img_tf, mask_tf, 'val')

dataset_train = Dataset(args.root, args.mask_root, img_tf, mask_tf, True)
dataset_val = Dataset(args.root_val, args.mask_root_val, img_tf, mask_tf, True)

iterator_train = iter(
    data.DataLoader(dataset_train,
                    batch_size=args.batch_size,
                    sampler=InfiniteSampler(len(dataset_train)),
                    num_workers=args.n_threads))
print(len(dataset_train))
model = PConvUNet().to(device)

if args.finetune:
    lr = args.lr_finetune
    model.freeze_enc_bn = True
else:
    lr = args.lr

start_iter = 0
optimizer = torch.optim.Adam(filter(lambda p: p.requires_grad,
                                    model.parameters()),
                             lr=lr)
criterion = InpaintingLoss(VGG16FeatureExtractor()).to(device)

if args.resume:
    start_iter = load_ckpt(args.resume, [('model', model)],
Exemplo n.º 7
0
device = torch.device(args.device)

size = (args.image_height, args.image_width)

masks = args.mask_root if random_masks else [(args.mask_root,
                                              '_objectmask.png')]

dataset_val = DDDataset(args.root, (args.image_height, args.image_width),
                        insuffixes=[args.suffix],
                        masks=masks,
                        train=False,
                        auto_resize=not random_masks,
                        random_masks=random_masks,
                        depth_map=(args.depth_root,
                                   '_WO.exr') if use_depth else None)

model = PConvUNet(input_guides=1 if use_depth else 0).to(device)
load_ckpt(args.snapshot, [('model', model)])

model.eval()
evaluate(model,
         dataset_val,
         device,
         args.out_file,
         gamma=args.gamma,
         exposure=args.exposure,
         black=args.black_level,
         white=args.white_level,
         random=random_images)
Exemplo n.º 8
0
import torch
from torchsummary import summary

from net import PConvUNet

device = torch.device('cpu')

model = PConvUNet().to(device)
print(model)
# summary(model, input_size=(3, 256, 256), device='cpu')