示例#1
0
class LinearStyleTransfer(nn.Module):
    def __init__(self, root):

        super(LinearStyleTransfer, self).__init__()

        self.vgg = encoder4()
        self.dec = decoder4()
        self.matrix = MulLayer("r41")

        self.vgg.load_state_dict(
            torch.load(root + "python_package/models/vgg_r41.pth",
                       map_location="cpu"))
        self.dec.load_state_dict(
            torch.load(root + "python_package/models/dec_r41.pth",
                       map_location="cpu"))
        self.matrix.load_state_dict(
            torch.load(root + "python_package/models/r41.pth",
                       map_location="cpu"))

    def forward(self, contentV, styleV):

        sF = self.vgg(styleV)
        cF = self.vgg(contentV)
        feature, transmatrix = self.matrix(cF["r41"], sF["r41"])
        transfer = self.dec(feature)
        transfer = transfer.clamp(0, 1)

        return transfer
示例#2
0
    style_loader = iter(style_loader_)

    ################# MODEL #################
    vgg5 = loss_network()
    if (opt.layer == 'r31'):
        matrix = MulLayer('r31')
        vgg = encoder3()
        dec = decoder3()
    elif (opt.layer == 'r41'):
        matrix = MulLayer('r41')
        vgg = encoder4()
        dec = decoder4()
    vgg.load_state_dict(torch.load(opt.vgg_dir))
    # dec.load_state_dict(torch.load(opt.decoder_dir))
    vgg5.load_state_dict(torch.load(opt.loss_network_dir))
    matrix.load_state_dict(torch.load(opt.matrixPath))
    for param in vgg.parameters():
        param.requires_grad = False
    for param in vgg5.parameters():
        param.requires_grad = False
    for param in matrix.parameters():
        param.requires_grad = False
    # for param in dec.parameters():
    #     param.requires_grad = False

    ################# LOSS & OPTIMIZER #################
    criterion = LossCriterion(opt.style_layers, opt.content_layers,
                              opt.style_weight, opt.content_weight,
                              opt.sp_weight)
    optimizer = optim.Adam(dec.parameters(), opt.lr)
content_loader = torch.utils.data.DataLoader(dataset=content_dataset,
                                             batch_size=1,
                                             shuffle=False)

################# MODEL #################
if (opt.layer == 'r31'):
    matrix = MulLayer(layer='r31')
    vgg = encoder3()
    dec = decoder3()
elif (opt.layer == 'r41'):
    matrix = MulLayer(layer='r41')
    vgg = encoder4()
    dec = decoder4()
vgg.load_state_dict(torch.load(opt.vgg_dir))
dec.load_state_dict(torch.load(opt.decoder_dir))
matrix.load_state_dict(torch.load(opt.matrix_dir))

for param in matrix.parameters():
    param.requires_grad = False
for param in vgg.parameters():
    param.requires_grad = False
for param in matrix.parameters():
    param.requires_grad = False

################# GLOBAL VARIABLE #################
contentV = torch.Tensor(1, 3, opt.fineSize, opt.fineSize)

################# GPU  #################
if (opt.cuda):
    vgg.cuda()
    dec.cuda()
示例#4
0
    os.makedirs(args.outf, exist_ok=True)
    content_name = args.content.split("/")[-1].split(".")[0]
    style_name = args.style.split("/")[-1].split(".")[0]
    device = torch.device(args.device)

    ################# MODEL #################
    if(args.layer == 'r31'):
        vgg = encoder3().to(device)
        dec = decoder3().to(device)
    elif(args.layer == 'r41'):
        vgg = encoder4().to(device)
        dec = decoder4().to(device)
    matrix = MulLayer(args.layer).to(device)
    vgg.load_state_dict(torch.load(args.vgg_dir))
    dec.load_state_dict(torch.load(args.decoder_dir))
    matrix.load_state_dict(torch.load(args.matrixPath))
    
    PATCH_SIZE = args.patch_size
    PADDING = args.padding
    
    content_tf = test_transform(0, False)
    style_tf = test_transform(args.style_size, True)

    repeat = 15 if args.test_speed else 1
    time_list = []

    for i in range(repeat):
        image = Image.open(args.content)
        if args.resize != 0:
            image = image.resize((args.resize, args.resize))
        IMAGE_WIDTH, IMAGE_HEIGHT = image.size
示例#5
0
os.environ[
    "CUDA_VISIBLE_DEVICES"] = "0"  # USED ONLY IF OTHER GPUS ARE BEING USED
if True:
    style_dataset = Dataset('Database/WikiArt/train/', 256, 256, test=True)
    style_loader_ = torch.utils.data.DataLoader(dataset=style_dataset,
                                                batch_size=128,
                                                shuffle=False,
                                                num_workers=4,
                                                drop_last=True)
    style_loader = iter(style_loader_)
    # styleV = torch.Tensor(64,3,224,224).cuda()

    matrix = MulLayer('r31')
    vgg = encoder3()
    vgg.load_state_dict(torch.load('models/vgg_r31.pth'))
    matrix.load_state_dict(torch.load('models/r31.pth'))
    vgg.cuda()
    matrix.cuda()
    features = []
    means = []
    with torch.no_grad():
        for iteration, (styleV, t) in enumerate(style_loader_):
            sF = vgg(styleV.cuda())
            sb, sc, sh, sw = sF.size()
            sFF = sF.view(sb, sc, -1)
            sMean = torch.mean(sFF, dim=2, keepdim=True)
            sMean = sMean.unsqueeze(3)
            sMeanS = sMean.expand_as(sF)
            sF = sF - sMeanS
            sF = matrix.snet(sF)
示例#6
0
class Transfer(object):
    def __init__(self, opt=None, load_deafult=False):
        # PREPARATIONS
        if opt:
            self.opt = opt
        else:
            self.opt = Opt()
        self.device = torch.device(
            'cuda' if torch.cuda.is_available() else 'cpu')
        print_options(self.opt)

        os.makedirs(self.opt.outf, exist_ok=True)
        cudnn.benchmark = True

        self.load_model()
        if load_deafult:
            self.load_data(self.opt.style, self.opt.content)

    def load_model(self):
        # MODEL
        if (self.opt.layer == 'r31'):
            self.vgg = encoder3()
            self.dec = decoder3()
        elif (self.opt.layer == 'r41'):
            self.vgg = encoder4()
            self.dec = decoder4()
        self.matrix = MulLayer(layer=self.opt.layer)

        self.vgg.load_state_dict(torch.load(self.opt.vgg_dir))
        self.dec.load_state_dict(torch.load(self.opt.decoder_dir))
        self.matrix.load_state_dict(
            torch.load(self.opt.matrix_dir, map_location=self.device))
        self.vgg.to(self.device)
        self.dec.to(self.device)
        self.matrix.to(self.device)

    def load_data(self, style_img_path, content_img_path):
        transform = transforms.Compose(
            [transforms.Resize(self.opt.loadSize),
             transforms.ToTensor()])

        self.styleV = transform(
            Image.open(style_img_path).convert('RGB')).unsqueeze(0).to(
                self.device)
        self.contentV = transform(
            Image.open(content_img_path).convert('RGB')).unsqueeze(0).to(
                self.device)

    def transfer(self):
        with torch.no_grad():
            sF = self.vgg(self.styleV)
            cF = self.vgg(self.contentV)

            if (self.opt.layer == 'r41'):
                feature, transmatrix = self.matrix(cF[self.opt.layer],
                                                   sF[self.opt.layer])
            else:
                feature, transmatrix = self.matrix(cF, sF)
                transfer = self.dec(feature)

        transfer = transfer.clamp(0, 1).squeeze(0).data.cpu().numpy()
        transfer = 255 * transfer.transpose((1, 2, 0))
        # transfer = transfer.transpose((1,2,0))

        return transfer