def predict(image, mask, root_path, AI_directory_path, model_type="life"):

    device = torch.device('cuda')

    size = (256, 256)
    img_transform = transforms.Compose([
        transforms.Resize(size=size),
        transforms.ToTensor(),
        transforms.Normalize(mean=opt.MEAN, std=opt.STD)
    ])
    mask_transform = transforms.Compose(
        [transforms.Resize(size=size),
         transforms.ToTensor()])

    dataset_val = Places2(root_path, image, mask, img_transform,
                          mask_transform)
    model = PConvUNet().to(device)
    load_ckpt(AI_directory_path, [('model', model)])

    model.eval()

    evaluate(model, dataset_val, device, image.split('.')[0] + 'result.jpg')

    return image.split('.')[0] + 'result.jpg'
parser = argparse.ArgumentParser()
# training options
parser.add_argument('--root', type=str, default='./data')
parser.add_argument('--snapshot', type=str, default='')
parser.add_argument('--image_size', type=int, default=256)
parser.add_argument('--mask_root', type=str, default='./mask')

args = parser.parse_args()

device = torch.device('cuda')

size = (args.image_size, args.image_size)
img_transform = transforms.Compose([
    transforms.Resize(size=size),
    transforms.ToTensor(),
    transforms.Normalize(mean=opt.MEAN, std=opt.STD)
])
mask_transform = transforms.Compose(
    [transforms.Resize(size=size),
     transforms.ToTensor()])

dataset_val = Places2(args.root, args.mask_root, img_transform, mask_transform,
                      'val')

model = PConvUNet().to(device)
load_ckpt(args.snapshot, [('model', model)])

model.eval()
evaluate(model, dataset_val, device, 'result.jpg')
Пример #3
0
              auto_resize=not random_masks) for train_flag in [True, False]
]

print('train size:', len(dataset_train))
print('val size:', len(dataset_val))
print('dataset shapes:')
for tensor in dataset_train[0]:
    print(tensor.shape)

iterator_train = iter(
    data.DataLoader(dataset_train,
                    batch_size=args.batch_size,
                    sampler=InfiniteSampler(len(dataset_train)),
                    num_workers=args.n_threads))
print(len(dataset_train))
model = PConvUNet(input_guides=1 if use_depth else 0).to(device)

if args.finetune:
    lr = args.lr_finetune
    model.freeze_enc_bn = True
else:
    lr = args.lr

start_iter = 0
optimizer = torch.optim.Adam(filter(lambda p: p.requires_grad,
                                    model.parameters()),
                             lr=lr)
criterion = InpaintingLoss(VGG16FeatureExtractor()).to(device)

if args.resume:
    start_iter = load_ckpt(args.resume, [('model', model)],
Пример #4
0
#total_white = total_white /(len(dataset_train) * 64 *64)

iterator_train = iter(
    data.DataLoader(dataset_train,
                    batch_size=args.batch_size,
                    sampler=InfiniteSampler(len(dataset_train)),
                    num_workers=args.n_threads))

iterator_val = iter(
    data.DataLoader(dataset_val,
                    batch_size=args.batch_size,
                    sampler=InfiniteSampler(len(dataset_val)),
                    num_workers=args.n_threads))

print(len(dataset_train))
model = PConvUNet(layer_size=3).to(device)

if args.finetune:
    lr = args.lr_finetune
    model.freeze_enc_bn = True
else:
    lr = args.lr

start_iter = 0
optimizer = torch.optim.Adam(filter(lambda p: p.requires_grad,
                                    model.parameters()),
                             lr=lr)
criterion = InpaintingLoss(VGG16FeatureExtractor()).to(device)

if args.resume:
    start_iter = load_ckpt(args.resume, [('model', model)],
Пример #5
0
    [transforms.Resize(size=size),
     transforms.ToTensor()])

dataset_train = Places2(args.root, args.mask_root, img_tf, mask_tf, 'train')
dataset_val = Places2(args.root, args.mask_root, img_tf, mask_tf, 'val')
# dataset_train = DIV2K(args.root, args.mask_root, img_tf, mask_tf, 'train')
# dataset_val = DIV2K(args.root, args.mask_root, img_tf, mask_tf, 'val')

iterator_train = iter(
    data.DataLoader(dataset_train,
                    batch_size=args.batch_size,
                    sampler=InfiniteSampler(len(dataset_train)),
                    num_workers=args.n_threads))
print(len(dataset_train))

model = PConvUNet()
if torch.cuda.device_count() > 1:
    print("Use", torch.cuda.device_count(), "GPUs!")
    model = nn.DataParallel(model)

model = model.to(device)

if args.finetune:
    lr = args.lr_finetune
    model.freeze_enc_bn = True
else:
    lr = args.lr

start_iter = 0
optimizer = torch.optim.Adam(filter(lambda p: p.requires_grad,
                                    model.parameters()),
print("set dataset")
dataset_train = Places2(args.root, args.mask_root, img_tf, mask_tf, 'train')
dataset_val = Places2(args.root, args.mask_root, img_tf, mask_tf, 'val')

print("prepare data iterator")
iterator_train = iter(
    data.DataLoader(dataset_train,
                    batch_size=args.batch_size,
                    sampler=InfiniteSampler(len(dataset_train)),
                    num_workers=args.n_threads))
print(len(dataset_train))

print("set model")
#model = PConvUNet().to(device)
model = PConvUNet().to("cuda")
model = torch.nn.DataParallel(
    model, device_ids=None)  #device_ids=None means using all devices

if args.finetune:
    lr = args.lr_finetune
    model.freeze_enc_bn = True
else:
    lr = args.lr

start_iter = 0
optimizer = torch.optim.Adam(filter(lambda p: p.requires_grad,
                                    model.parameters()),
                             lr=lr)
criterion = InpaintingLoss(VGG16FeatureExtractor()).to(device)