def predict(image, mask, root_path, AI_directory_path, model_type="life"):

    device = torch.device('cuda')

    size = (256, 256)
    img_transform = transforms.Compose([
        transforms.Resize(size=size),
        transforms.ToTensor(),
        transforms.Normalize(mean=opt.MEAN, std=opt.STD)
    ])
    mask_transform = transforms.Compose(
        [transforms.Resize(size=size),
         transforms.ToTensor()])

    dataset_val = Places2(root_path, image, mask, img_transform,
                          mask_transform)
    model = PConvUNet().to(device)
    load_ckpt(AI_directory_path, [('model', model)])

    model.eval()

    evaluate(model, dataset_val, device, image.split('.')[0] + 'result.jpg')

    return image.split('.')[0] + 'result.jpg'
示例#2
0
 def initialize_model(self, path=None, train=True):
     self.G = RFRNet()
     self.optm_G = optim.Adam(self.G.parameters(), lr=2e-4)
     self.print_networks(False)
     if train:
         self.lossNet = VGG16FeatureExtractor()
     try:
         start_iter = load_ckpt(path, [('generator', self.G)],
                                [('optimizer_G', self.optm_G)])
         if train:
             self.optm_G = optim.Adam(self.G.parameters(), lr=2e-4)
             print('Model Initialized, iter: ', start_iter)
             self.iter = start_iter
     except:
         print('No trained model, from start')
         self.iter = 0
parser = argparse.ArgumentParser()
# training options
parser.add_argument('--root', type=str, default='./data')
parser.add_argument('--snapshot', type=str, default='')
parser.add_argument('--image_size', type=int, default=256)
parser.add_argument('--mask_root', type=str, default='./mask')

args = parser.parse_args()

device = torch.device('cuda')

size = (args.image_size, args.image_size)
img_transform = transforms.Compose([
    transforms.Resize(size=size),
    transforms.ToTensor(),
    transforms.Normalize(mean=opt.MEAN, std=opt.STD)
])
mask_transform = transforms.Compose(
    [transforms.Resize(size=size),
     transforms.ToTensor()])

dataset_val = Places2(args.root, args.mask_root, img_transform, mask_transform,
                      'val')

model = PConvUNet().to(device)
load_ckpt(args.snapshot, [('model', model)])

model.eval()
evaluate(model, dataset_val, device, 'result.jpg')
示例#4
0
model = PConvUNet(input_guides=1 if use_depth else 0).to(device)

if args.finetune:
    lr = args.lr_finetune
    model.freeze_enc_bn = True
else:
    lr = args.lr

start_iter = 0
optimizer = torch.optim.Adam(filter(lambda p: p.requires_grad,
                                    model.parameters()),
                             lr=lr)
criterion = InpaintingLoss(VGG16FeatureExtractor()).to(device)

if args.resume:
    start_iter = load_ckpt(args.resume, [('model', model)],
                           [('optimizer', optimizer)])
    for param_group in optimizer.param_groups:
        param_group['lr'] = lr
    print('Starting from iter ', start_iter)

for i in tqdm(range(start_iter, args.max_iter)):
    model.train()

    image, mask, gt = [x.to(device) for x in next(iterator_train)]
    if args.mask_root is not None:
        guide = image[:, 3:4, :, :]
        image = image[:, 0:3, :, :]
        output, _ = model(image, mask, guide)
    else:
        output, _ = model(image, mask)
    loss_dict = criterion(image, mask, output, gt)
示例#5
0
                    type=str,
                    default='../posewarp/data/hunter_256.jpeg')
parser.add_argument('--maskPath',
                    type=str,
                    default='../posewarp/data/hunter_256_IUV.npy')
#parser.add_argument('--maskPath', type=str, default='/home/cloudlet/vlr_824/project/backgroundInpainting/mask/003328.jpg')
parser.add_argument('--modelPath', type=str, default='./pretrainedModel.pth')
parser.add_argument('--image_size', type=int, default=256)
args = parser.parse_args()

device = torch.device('cuda')

size = (args.image_size, args.image_size)
img_transform = transforms.Compose([
    transforms.Resize(size=size),
    transforms.ToTensor(),
    transforms.Normalize(mean=opt.MEAN, std=opt.STD)
])
mask_transform = transforms.Compose(
    [transforms.Resize(size=size),
     transforms.ToTensor()])

dataset_val = customData(args.imPath, args.maskPath, img_transform,
                         mask_transform, 'val')

model = PConvUNet()
load_ckpt(args.modelPath, [('model', model)])
model.to(device)

model.eval()
evaluateCustom(model, dataset_val, device, 'result.jpg')