def predict(image, mask, root_path, AI_directory_path, model_type="life"):

    device = torch.device('cuda')

    size = (256, 256)
    img_transform = transforms.Compose([
        transforms.Resize(size=size),
        transforms.ToTensor(),
        transforms.Normalize(mean=opt.MEAN, std=opt.STD)
    ])
    mask_transform = transforms.Compose(
        [transforms.Resize(size=size),
         transforms.ToTensor()])

    dataset_val = Places2(root_path, image, mask, img_transform,
                          mask_transform)
    model = PConvUNet().to(device)
    load_ckpt(AI_directory_path, [('model', model)])

    model.eval()

    evaluate(model, dataset_val, device, image.split('.')[0] + 'result.jpg')

    return image.split('.')[0] + 'result.jpg'
parser = argparse.ArgumentParser()
# training options
parser.add_argument('--root', type=str, default='./data')
parser.add_argument('--snapshot', type=str, default='')
parser.add_argument('--image_size', type=int, default=256)
parser.add_argument('--mask_root', type=str, default='./mask')

args = parser.parse_args()

device = torch.device('cuda')

size = (args.image_size, args.image_size)
img_transform = transforms.Compose([
    transforms.Resize(size=size),
    transforms.ToTensor(),
    transforms.Normalize(mean=opt.MEAN, std=opt.STD)
])
mask_transform = transforms.Compose(
    [transforms.Resize(size=size),
     transforms.ToTensor()])

dataset_val = Places2(args.root, args.mask_root, img_transform, mask_transform,
                      'val')

model = PConvUNet().to(device)
load_ckpt(args.snapshot, [('model', model)])

model.eval()
evaluate(model, dataset_val, device, 'result.jpg')
# training options
parser.add_argument('--root', type=str, default='/home/washbee1/celeba-kaggle')
parser.add_argument('--maskroot', type=str, default='mask')
parser.add_argument(
    '--snapshot',
    type=str,
    default='../inpainting-inuse/saves-data-aug-lm/ckpt/5690000.pth')
parser.add_argument('--image_size', type=int, default=256)
args = parser.parse_args()

device = torch.device('cuda')

size = (args.image_size, args.image_size)
img_transform = transforms.Compose([
    transforms.Resize(size=size),
    transforms.ToTensor(),
    transforms.Normalize(mean=opt.MEAN, std=opt.STD)
])
mask_transform = transforms.Compose(
    [transforms.Resize(size=size),
     transforms.ToTensor()])

dataset_val = Places2(args.root, args.maskroot, img_transform, mask_transform,
                      'test')

model = PConvUNet().to(device)
load_ckpt(args.snapshot, [('model', model)])

model.eval()
evaluate(model, dataset_val, device, 'result.jpg')
Exemple #4
0
#mask_transform = transforms.Compose(
#    [transforms.Resize(size=size),
#     transforms.ToTensor()])

size = (args.image_size, args.image_size)

img_tf = transforms.Compose(
    [
        transforms.Resize(size=size),
        transforms.ToTensor(),
        transforms.Normalize(mean=opt.MEAN, std=opt.STD)
    ]
)

mask_tf = transforms.Compose(
    [transforms.Resize(size=size),
     transforms.ToTensor()])


dataset_val = Places2(args.root, args.maskroot, img_tf, mask_tf, 'demo')

model = PConvUNet().to(device)
load_ckpt(args.snapshot, [('model', model)])

model.eval()
demo(model, dataset_val, device, 'demo.jpg')






parser = argparse.ArgumentParser()
# training options
parser.add_argument('--root', type=str, default='/home/washbee1/data1024x1024-512-temp/data_large/train')
parser.add_argument('--image_size', type=int, default=512)
args = parser.parse_args()

device = torch.device('cuda')

size = (args.image_size, args.image_size)

size = (args.image_size, args.image_size)


img_tf = transforms.Compose(
    [
        transforms.ToTensor(),
    ]
)


dataset_val = Places2(args.root, None, img_tf, None, 'demo')

demo( dataset_val,  'demo.jpg')




if not os.path.exists(args.save_dir):
    os.makedirs('{:s}/images'.format(args.save_dir))
    os.makedirs('{:s}/ckpt'.format(args.save_dir))

if not os.path.exists(args.log_dir):
    os.makedirs(args.log_dir)
writer = SummaryWriter(log_dir=args.log_dir)

size = (args.image_size, args.image_size)
#size = (288, 288)
img_tf = transforms.Compose(
    [transforms.Normalize(mean=opt.MEAN, std=opt.STD)])
mask_tf = transforms.Compose(
    [transforms.ToTensor()])

dataset_train = Places2(args.root, args.mask_root, img_tf, mask_tf, 'train')
dataset_val = Places2(args.root, args.mask_root, img_tf, mask_tf, 'val')

iterator_train = iter(data.DataLoader(
    dataset_train, batch_size=args.batch_size,
    sampler=InfiniteSampler(len(dataset_train)),
    num_workers=args.n_threads))
print(len(dataset_train))
model = PConvUNet().to(device)

if args.finetune:
    lr = args.lr_finetune
    model.freeze_enc_bn = True
else:
    lr = args.lr
Exemple #7
0

if __name__ == '__main__':
    import os

    parser = argparse.ArgumentParser()
    parser.add_argument('--image_size', type=int, default=512)
    parser.add_argument('--save_dir',
                        type=str,
                        default='mask-rect-large-hq-512')
    parser.add_argument(
        '--root',
        type=str,
        default='/home/washbee1/celeba-hq-crop/data1024x1024/data_large/train')
    parser.add_argument("-p",
                        "--shape-predictor",
                        help="path to facial landmark predictor",
                        default="../shape_predictor_5_face_landmarks.dat")

    args = parser.parse_args()

    if not os.path.exists(args.save_dir):
        os.makedirs(args.save_dir)

    dataset_train = Places2(args.root, None, None, None, 'demo')
    detector = dlib.get_frontal_face_detector()
    predictor = dlib.shape_predictor(args.shape_predictor)

    for path in dataset_train.paths:
        crop_face(args, path, detector)
size = (args.image_size, args.image_size)

img_tf = transforms.Compose(
    [
        transforms.Resize(size=size),
        transforms.ToTensor(),
        transforms.Normalize(mean=opt.MEAN, std=opt.STD)
    ]
)

mask_tf = transforms.Compose(
    [transforms.Resize(size=size),
     transforms.ToTensor()])

dataset_train = Places2(args.root, args.mask_root_train, img_tf, mask_tf, 'train', targeted = True)
dataset_val = Places2(args.root, args.mask_root_val, img_tf, mask_tf, 'val', targeted = True)

iterator_train = iter(data.DataLoader(
    dataset_train, batch_size=args.batch_size,
    sampler=InfiniteSampler(len(dataset_train)),
    num_workers=args.n_threads))
print(len(dataset_train))
model = PConvUNet().to(device)

if args.finetune:
    lr = args.lr_finetune
    model.freeze_enc_bn = True
else:
    lr = args.lr
parser.add_argument('--maskroot', type=str, default='demo-prod/demo-masks')
#parser.add_argument('--snapshot', type=str, default='/home/washbee1/PycharmProjects/image_inpainting/targeted-training/saves-targeted-2/ckpt/7210000.pth')
#parser.add_argument('--snapshot', type=str, default='/home/washbee1/PycharmProjects/image_inpainting/targeted-training/saves-targeted-2/ckpt/7250000.pth')
#parser.add_argument('--snapshot', type=str, default='/home/washbee1/PycharmProjects/image_inpainting/targeted-training/saves-targeted-2/ckpt/7645000.pth')
#parser.add_argument('--snapshot', type=str, default='/home/washbee1/PycharmProjects/image_inpainting/targeted-training/saves-targeted-2/ckpt/8795000.pth')#9590000.pth
parser.add_argument('--snapshot', type=str, default='/home/washbee1/PycharmProjects/image_inpainting/targeted-training/saves-targeted-2/ckpt/9590000.pth')#9590000.pth
parser.add_argument('--image_size', type=int, default=256)
parser.add_argument("-p", "--shape-predictor",
                    help="path to facial landmark predictor", default="shape_predictor_5_face_landmarks.dat")

args = parser.parse_args()

if not os.path.exists(args.maskroot):
    os.makedirs(args.maskroot)

dataset_train = Places2(args.root, None, None, None, 'demo')
detector = dlib.get_frontal_face_detector()
predictor = dlib.shape_predictor(args.shape_predictor)

for path in dataset_train.paths:
    crop_face(args, path, detector, predictor)
#############p2

device = torch.device('cuda')

size = (args.image_size, args.image_size)

img_tf = transforms.Compose(
    [
        transforms.Resize(size=size),
        transforms.ToTensor(),
Exemple #10
0
from net import PConvUNet
from util.io import load_ckpt

parser = argparse.ArgumentParser()
# training options
parser.add_argument('--root', type=str, default='../Data')
parser.add_argument('--snapshot', type=str, default='')
parser.add_argument('--image_size', type=int, default=256)
args = parser.parse_args()

device = torch.device('cuda')

size = (args.image_size, args.image_size)
img_transform = transforms.Compose([
    transforms.Resize(size=size),
    transforms.ToTensor(),
    transforms.Normalize(mean=opt.MEAN, std=opt.STD)
])
mask_transform = transforms.Compose(
    [transforms.Resize(size=size),
     transforms.ToTensor()])

dataset_val = Places2(args.root, "../masks", img_transform, mask_transform,
                      'val')

model = PConvUNet().to(device)
load_ckpt(args.snapshot, [('model', model)])

model.eval()
evaluate(model, dataset_val, device, 'result.jpg')
Exemple #11
0
import opt
from places2 import Places2
from evaluation import evaluate
from net import PConvUNet
from util.io import load_ckpt

parser = argparse.ArgumentParser()
# training options
parser.add_argument('--root', type=str, default='./data')
parser.add_argument('--snapshot', type=str, default='')
parser.add_argument('--image_size', type=int, default=256)
args = parser.parse_args()

device = torch.device('cuda')

size = (args.image_size, args.image_size)
img_transform = transforms.Compose(
    [transforms.Resize(size=size), transforms.ToTensor(),
     transforms.Normalize(mean=opt.MEAN, std=opt.STD)])
mask_transform = transforms.Compose(
    [transforms.Resize(size=size), transforms.ToTensor()])

dataset_val = Places2(args.root,"./data/test_mask", img_transform, mask_transform, 'test_image_mask')

model = PConvUNet().to(device)
load_ckpt(args.snapshot, [('model', model)])

model.eval()
evaluate(model, dataset_val, device, 'result.jpg')
Exemple #12
0
import opt
from places2 import Places2
from evaluation import evaluate
from net import PConvUNet
from util.io import load_ckpt

parser = argparse.ArgumentParser()
# training options
parser.add_argument('--root', type=str, default='./data')
parser.add_argument('--snapshot', type=str, default='')
parser.add_argument('--image_size', type=int, default=256)
args = parser.parse_args()

device = torch.device('cuda')

size = (args.image_size, args.image_size)
img_transform = transforms.Compose(
    [transforms.Resize(size=size), transforms.ToTensor(),
     transforms.Normalize(mean=opt.MEAN, std=opt.STD)])
mask_transform = transforms.Compose(
    [transforms.Resize(size=size), transforms.ToTensor()])

dataset_val = Places2(args.root,'./mask', img_transform, mask_transform, 'val')

model = PConvUNet().to(device)
load_ckpt(args.snapshot, [('model', model)])

model.eval()
evaluate(model, dataset_val, device, 'result.jpg')