Beispiel #1
0
class LinearStyleTransfer(nn.Module):
    def __init__(self, root):

        super(LinearStyleTransfer, self).__init__()

        self.vgg = encoder4()
        self.dec = decoder4()
        self.matrix = MulLayer("r41")

        self.vgg.load_state_dict(
            torch.load(root + "python_package/models/vgg_r41.pth",
                       map_location="cpu"))
        self.dec.load_state_dict(
            torch.load(root + "python_package/models/dec_r41.pth",
                       map_location="cpu"))
        self.matrix.load_state_dict(
            torch.load(root + "python_package/models/r41.pth",
                       map_location="cpu"))

    def forward(self, contentV, styleV):

        sF = self.vgg(styleV)
        cF = self.vgg(contentV)
        feature, transmatrix = self.matrix(cF["r41"], sF["r41"])
        transfer = self.dec(feature)
        transfer = transfer.clamp(0, 1)

        return transfer
    def load_model(self):
        # MODEL
        if (self.opt.layer == 'r31'):
            self.vgg = encoder3()
            self.dec = decoder3()
        elif (self.opt.layer == 'r41'):
            self.vgg = encoder4()
            self.dec = decoder4()
        self.matrix = MulLayer(layer=self.opt.layer)

        self.vgg.load_state_dict(torch.load(self.opt.vgg_dir))
        self.dec.load_state_dict(torch.load(self.opt.decoder_dir))
        self.matrix.load_state_dict(
            torch.load(self.opt.matrix_dir, map_location=self.device))
        self.vgg.to(self.device)
        self.dec.to(self.device)
        self.matrix.to(self.device)
Beispiel #3
0
    def __init__(self, root):

        super(LinearStyleTransfer, self).__init__()

        self.vgg = encoder4()
        self.dec = decoder4()
        self.matrix = MulLayer("r41")

        self.vgg.load_state_dict(
            torch.load(root + "python_package/models/vgg_r41.pth",
                       map_location="cpu"))
        self.dec.load_state_dict(
            torch.load(root + "python_package/models/dec_r41.pth",
                       map_location="cpu"))
        self.matrix.load_state_dict(
            torch.load(root + "python_package/models/r41.pth",
                       map_location="cpu"))
Beispiel #4
0
                                                  shuffle=True,
                                                  num_workers=1,
                                                  drop_last=True)
    content_loader = iter(content_loader_)
    style_dataset = Dataset(opt.stylePath, opt.loadSize, opt.fineSize)
    style_loader_ = torch.utils.data.DataLoader(dataset=style_dataset,
                                                batch_size=opt.batchSize,
                                                shuffle=True,
                                                num_workers=1,
                                                drop_last=True)
    style_loader = iter(style_loader_)

    ################# MODEL #################
    vgg5 = loss_network()
    if (opt.layer == 'r31'):
        matrix = MulLayer('r31')
        vgg = encoder3()
        dec = decoder3()
    elif (opt.layer == 'r41'):
        matrix = MulLayer('r41')
        vgg = encoder4()
        dec = decoder4()
    vgg.load_state_dict(torch.load(opt.vgg_dir))
    # dec.load_state_dict(torch.load(opt.decoder_dir))
    vgg5.load_state_dict(torch.load(opt.loss_network_dir))
    matrix.load_state_dict(torch.load(opt.matrixPath))
    for param in vgg.parameters():
        param.requires_grad = False
    for param in vgg5.parameters():
        param.requires_grad = False
    for param in matrix.parameters():

styleV = loadImg(opt.style).unsqueeze(0)

content_dataset = Dataset(opt.content_dir,
                          loadSize=opt.loadSize,
                          fineSize=opt.fineSize,
                          test=True,
                          video=True)
content_loader = torch.utils.data.DataLoader(dataset=content_dataset,
                                             batch_size=1,
                                             shuffle=False)

################# MODEL #################
if (opt.layer == 'r31'):
    matrix = MulLayer(layer='r31')
    vgg = encoder3()
    dec = decoder3()
elif (opt.layer == 'r41'):
    matrix = MulLayer(layer='r41')
    vgg = encoder4()
    dec = decoder4()
vgg.load_state_dict(torch.load(opt.vgg_dir))
dec.load_state_dict(torch.load(opt.decoder_dir))
matrix.load_state_dict(torch.load(opt.matrix_dir))

for param in matrix.parameters():
    param.requires_grad = False
for param in vgg.parameters():
    param.requires_grad = False
for param in matrix.parameters():
Beispiel #6
0
    args = parser.parse_args()
    args.cuda = torch.cuda.is_available()
    print_options(args)
    os.makedirs(args.outf, exist_ok=True)
    content_name = args.content.split("/")[-1].split(".")[0]
    style_name = args.style.split("/")[-1].split(".")[0]
    device = torch.device(args.device)

    ################# MODEL #################
    if(args.layer == 'r31'):
        vgg = encoder3().to(device)
        dec = decoder3().to(device)
    elif(args.layer == 'r41'):
        vgg = encoder4().to(device)
        dec = decoder4().to(device)
    matrix = MulLayer(args.layer).to(device)
    vgg.load_state_dict(torch.load(args.vgg_dir))
    dec.load_state_dict(torch.load(args.decoder_dir))
    matrix.load_state_dict(torch.load(args.matrixPath))
    
    PATCH_SIZE = args.patch_size
    PADDING = args.padding
    
    content_tf = test_transform(0, False)
    style_tf = test_transform(args.style_size, True)

    repeat = 15 if args.test_speed else 1
    time_list = []

    for i in range(repeat):
        image = Image.open(args.content)
Beispiel #7
0
                              loadSize = opt.loadSize,
                              fineSize = opt.fineSize,
                              test     = True,
                              video    = True)
    content_loader = torch.utils.data.DataLoader(dataset    = content_dataset,
                                                 batch_size = 1,
                                                 shuffle    = False)

    ################# MODEL #################
    if(opt.layer == 'r31'):
        vgg = encoder3()
        dec = decoder3()
    elif(opt.layer == 'r41'):
        vgg = encoder4()
        dec = decoder4()
    matrix = MulLayer(layer=opt.layer)
    vgg.load_state_dict(torch.load(opt.vgg_dir))
    dec.load_state_dict(torch.load(opt.decoder_dir))
    matrix.load_state_dict(torch.load(opt.matrix_dir))

    ################# GLOBAL VARIABLE #################
    contentV = torch.Tensor(1,3,opt.fineSize,opt.fineSize)

    ################# GPU  #################
    if(opt.cuda):
        vgg.cuda()
        dec.cuda()
        matrix.cuda()

        styleV = styleV.cuda()
        contentV = contentV.cuda()
                                              shuffle=True,
                                              num_workers=1,
                                              drop_last=True)
content_loader = iter(content_loader_)
style_dataset = Dataset(opt.stylePath, opt.loadSize, opt.fineSize)
style_loader_ = torch.utils.data.DataLoader(dataset=style_dataset,
                                            batch_size=opt.batchSize,
                                            shuffle=True,
                                            num_workers=1,
                                            drop_last=True)
style_loader = iter(style_loader_)

################# MODEL #################
vgg5 = loss_network()
if (opt.layer == 'r31'):
    matrix = MulLayer('r31')
    vgg = encoder3()
    dec = decoder3()
elif (opt.layer == 'r41'):
    matrix = MulLayer('r41')
    vgg = encoder4()
    dec = decoder4()
vgg.load_state_dict(torch.load(opt.vgg_dir))
dec.load_state_dict(torch.load(opt.decoder_dir))
vgg5.load_state_dict(torch.load(opt.loss_network_dir))

for param in vgg.parameters():
    param.requires_grad = False
for param in vgg5.parameters():
    param.requires_grad = False
for param in dec.parameters():
                                              batch_size=opt.batchSize,
                                              shuffle=True,
                                              num_workers=1,
                                              drop_last=True)
content_loader = iter(content_loader_)
style_dataset = Dataset(opt.stylePath, opt.loadSize, opt.fineSize)
style_loader_ = torch.utils.data.DataLoader(dataset=style_dataset,
                                            batch_size=opt.batchSize,
                                            shuffle=True,
                                            num_workers=1,
                                            drop_last=True)
style_loader = iter(style_loader_)

################# MODEL #################
if (opt.layer == 'r31'):
    matrix = MulLayer('r31')
    vgg = encoder3()
    dec = decoder3()
elif (opt.layer == 'r41'):
    matrix = MulLayer('r41')
    vgg = encoder4()
    dec = decoder4()
vgg.load_state_dict(torch.load(opt.vgg_dir))
dec.load_state_dict(torch.load(opt.decoder_dir))

for param in vgg.parameters():
    param.requires_grad = False
for param in dec.parameters():
    param.requires_grad = False

################# LOSS & OPTIMIZER #################
Beispiel #10
0
from libs.models import encoder3, encoder4
from libs.models import decoder3, decoder4

os.environ[
    "CUDA_VISIBLE_DEVICES"] = "0"  # USED ONLY IF OTHER GPUS ARE BEING USED
if True:
    style_dataset = Dataset('Database/WikiArt/train/', 256, 256, test=True)
    style_loader_ = torch.utils.data.DataLoader(dataset=style_dataset,
                                                batch_size=128,
                                                shuffle=False,
                                                num_workers=4,
                                                drop_last=True)
    style_loader = iter(style_loader_)
    # styleV = torch.Tensor(64,3,224,224).cuda()

    matrix = MulLayer('r31')
    vgg = encoder3()
    vgg.load_state_dict(torch.load('models/vgg_r31.pth'))
    matrix.load_state_dict(torch.load('models/r31.pth'))
    vgg.cuda()
    matrix.cuda()
    features = []
    means = []
    with torch.no_grad():
        for iteration, (styleV, t) in enumerate(style_loader_):
            sF = vgg(styleV.cuda())
            sb, sc, sh, sw = sF.size()
            sFF = sF.view(sb, sc, -1)
            sMean = torch.mean(sFF, dim=2, keepdim=True)
            sMean = sMean.unsqueeze(3)
            sMeanS = sMean.expand_as(sF)
Beispiel #11
0
                                             shuffle=False,
                                             num_workers=1)
style_dataset = Dataset(opt.stylePath, opt.loadSize, opt.fineSize, test=True)
style_loader = torch.utils.data.DataLoader(dataset=style_dataset,
                                           batch_size=opt.batchSize,
                                           shuffle=False,
                                           num_workers=1)

################# MODEL #################
if (opt.layer == 'r31'):
    vgg = encoder3()
    dec = decoder3()
elif (opt.layer == 'r41'):
    vgg = encoder4()
    dec = decoder4()
matrix = MulLayer(opt.layer)
vgg.load_state_dict(torch.load(opt.vgg_dir))
dec.load_state_dict(torch.load(opt.decoder_dir))
matrix.load_state_dict(torch.load(opt.matrixPath))

################# GLOBAL VARIABLE #################
contentV = torch.Tensor(opt.batchSize, 3, opt.fineSize, opt.fineSize)
styleV = torch.Tensor(opt.batchSize, 3, opt.fineSize, opt.fineSize)

################# GPU  #################
if (opt.cuda):
    vgg.cuda()
    dec.cuda()
    matrix.cuda()
    contentV = contentV.cuda()
    styleV = styleV.cuda()
class Transfer(object):
    def __init__(self, opt=None, load_deafult=False):
        # PREPARATIONS
        if opt:
            self.opt = opt
        else:
            self.opt = Opt()
        self.device = torch.device(
            'cuda' if torch.cuda.is_available() else 'cpu')
        print_options(self.opt)

        os.makedirs(self.opt.outf, exist_ok=True)
        cudnn.benchmark = True

        self.load_model()
        if load_deafult:
            self.load_data(self.opt.style, self.opt.content)

    def load_model(self):
        # MODEL
        if (self.opt.layer == 'r31'):
            self.vgg = encoder3()
            self.dec = decoder3()
        elif (self.opt.layer == 'r41'):
            self.vgg = encoder4()
            self.dec = decoder4()
        self.matrix = MulLayer(layer=self.opt.layer)

        self.vgg.load_state_dict(torch.load(self.opt.vgg_dir))
        self.dec.load_state_dict(torch.load(self.opt.decoder_dir))
        self.matrix.load_state_dict(
            torch.load(self.opt.matrix_dir, map_location=self.device))
        self.vgg.to(self.device)
        self.dec.to(self.device)
        self.matrix.to(self.device)

    def load_data(self, style_img_path, content_img_path):
        transform = transforms.Compose(
            [transforms.Resize(self.opt.loadSize),
             transforms.ToTensor()])

        self.styleV = transform(
            Image.open(style_img_path).convert('RGB')).unsqueeze(0).to(
                self.device)
        self.contentV = transform(
            Image.open(content_img_path).convert('RGB')).unsqueeze(0).to(
                self.device)

    def transfer(self):
        with torch.no_grad():
            sF = self.vgg(self.styleV)
            cF = self.vgg(self.contentV)

            if (self.opt.layer == 'r41'):
                feature, transmatrix = self.matrix(cF[self.opt.layer],
                                                   sF[self.opt.layer])
            else:
                feature, transmatrix = self.matrix(cF, sF)
                transfer = self.dec(feature)

        transfer = transfer.clamp(0, 1).squeeze(0).data.cpu().numpy()
        transfer = 255 * transfer.transpose((1, 2, 0))
        # transfer = transfer.transpose((1,2,0))

        return transfer
 def __init__(self):
     super(Transfer3, self).__init__()
     self.vgg_c = encoder3()
     self.vgg_s = encoder3()
     self.matrix = MulLayer(layer='r31')
     self.dec = decoder3()