Пример #1
0
def recon(opt):
    DN = de_norm()
    model = VGGRC(opt).cuda()
    model.load_state_dict(torch.load('checkpoints/rec/%s/model_%d.pth' % (opt.outf, opt.start)))

    os.makedirs("./checkpoints/img/%s/"%opt.outf, exist_ok=True)
    testloader = DataLoader(
            ST_dataset(root=opt.root, name=opt.name, mode='pairedVGG'),
            batch_size=1, 
            shuffle=False,
            num_workers=0,
    )
    pbar = tqdm(total=len(testloader))
    for k, data in enumerate(testloader):

        Maps = []
        face_feat = model.VGG(data[1].cuda())
        style_feat = model.VGG(data[0].cuda())

        for i in range(len(model.VGG.layers)):
            Maps += [ModifyMap(style_feat[i], face_feat[i], opt)]
        
        out = model.net_forward(Maps)
        temp_image = make_grid(torch.clamp(DN(out[0]).unsqueeze(0), 0, 1), nrow=1, padding=0, normalize=False)
        # train_writer.add_image('outimg', temp_image, k)
        #print(temp_image.cpu().detach().numpy().shape)
        m = cv2.imwrite(
            "./checkpoints/img/%s/%d.png"%(opt.outf, k),
            cv2.cvtColor(
                cv2.resize(255*temp_image.cpu().detach().numpy().transpose(1,2,0), (500, 660)),
                cv2.COLOR_BGR2RGB
            )
        )
Пример #2
0
def convert_photo(message):
    try:
        fileID = message.photo[-1].file_id
    except:
        bot.send_message(message.from_user.id,
                         'Фото некорректно. Повторите попытку')
        return 0
    file = bot.get_file(fileID)
    image = bot.download_file(file.file_path)
    image = Image.open(io.BytesIO(image))
    if message.from_user.id in user_dict and user_dict[
            message.from_user.id] != 1:
        size = (int(user_dict[message.from_user.id] * image.size[1]),
                int(user_dict[message.from_user.id] * image.size[0]))
    else:
        size = (image.size[1], image.size[0])
    s = size[0] * size[1]
    if s > SIZE * SIZE:
        size = (int(size[0] * SIZE / sqrt(s)), int(size[1] * SIZE / sqrt(s)))
        bot.send_message(
            message.from_user.id,
            f'Площадь фотографии более {SIZE}x{SIZE}, размеры были уменьшены')
    image = transforms.Resize(size)(image)
    image = transform(image).unsqueeze(0)
    bot.send_message(message.from_user.id, 'Обработка займет некоторое время')

    with no_grad():
        image = transforms.ToPILImage()(dataset.de_norm(
            model(image).detach()[0]))
    bio = io.BytesIO()
    bio.name = 'image.jpeg'
    image.save(bio, 'JPEG')
    bio.seek(0)
    bot.send_photo(message.from_user.id, photo=bio)
    bot.send_message(message.from_user.id, 'Можете отправить следующее фото')

    del image
    del bio
Пример #3
0
def modifedStyleTransfer(opt):
    print('loading VGG models......')
    model = VGGRC(opt).cuda()
    model.load_state_dict(
        torch.load('/content/PortraitST/weights/model_%d.pth' % (opt.start)))
    DN = de_norm()
    os.makedirs("/content/log/%s/" % opt.outf, exist_ok=True)
    os.makedirs("/content/checkpoints/%s/" % opt.outf, exist_ok=True)

    dataloader = DataLoader(
        ST_dataset(root=opt.root, name=opt.name, mode='no_align'),
        batch_size=opt.batch_size,
        shuffle=False,
        num_workers=0,
    )
    images = -1

    print('start processing.')
    for k, data in enumerate(dataloader):
        style_feats = model.VGG(data[0].cuda())
        input_feats = model.VGG(data[1].cuda())
        input_feats_copy = model.VGG(data[1].cuda())
        totalLoss = OverAllLoss(opt)
        Maps = FeatureMap(opt)
        Maps.mapload(input_feats, style_feats, len(model.VGG.layers))
        #FeatureMaps = FeatureMap(opt)
        #FeatureMaps.mapload(input_feats, style_feats, len(model.VGG.layers))
        Maplist = Maps.featureList

        out = model.net_forward(Maplist)
        temp_image = make_grid(torch.clamp(DN(out[0]).unsqueeze(0), 0, 1),
                               nrow=1,
                               padding=0,
                               normalize=False)
        train_writer.add_image('outimg', temp_image, k)

        index = 0
        view_shape = (4, int(Maplist[index].shape[1] / 4),
                      Maplist[index].shape[2], Maplist[index].shape[3])
        print(view_shape)
        temp_image = make_grid(Maplist[index].reshape(view_shape)[:, :3, :, :],
                               nrow=4,
                               padding=0,
                               normalize=True)
        train_writer.add_image('Gain Map', temp_image, 0)
        temp_image = make_grid(
            style_feats[index].reshape(view_shape)[:, :3, :, :],
            nrow=4,
            padding=0,
            normalize=True)
        train_writer.add_image('style_feat_4', temp_image, 0)
        temp_image = make_grid(
            input_feats[index].reshape(view_shape)[:, :3, :, :],
            nrow=4,
            padding=0,
            normalize=True)
        train_writer.add_image('input_feat_4', temp_image, 0)

        images += 1
        total_iter = opt.iter + 1

        # Set optimizer.
        if 'image' in opt.optimode:
            # Initialize the output.
            output = out.cuda()
            #output.requires_grad = True
            output = torch.nn.Parameter(output, requires_grad=True)
            optimizer = torch.optim.LBFGS([output], lr=opt.lr)
        else:
            total_iter *= 300
            for layer in range(5):
                Maplist[layer] = torch.nn.Parameter(Maplist[layer],
                                                    requires_grad=True)
            optimizer = torch.optim.Adam(Maplist, lr=1e-1)

        optimizer.zero_grad()

        # define total iteration number.
        pbar = tqdm(total=total_iter)

        for iters in range(total_iter):

            def closure():
                input_feats = model.VGG(output)
                optimizer.zero_grad()
                Loss_gain = 0
                Loss_style = 0
                Loss = 0
                for i in range(len(model.VGG.layers)):
                    loss_gain_item, loss_style_item = totalLoss.forward(
                        style_feats[i],
                        input_feats[i],
                        Map=Maps.featureList[i],
                        mode=model.VGG.layers[i])

                    Loss_gain += loss_gain_item
                    Loss_style += loss_style_item
                # loss term
                Loss = Loss_gain + Loss_style
                Loss.backward(retain_graph=True)
                return Loss

            # Updates.
            #Loss.backward(retain_graph=True)
            if 'image' in opt.optimode:
                optimizer.step(closure)
                if iters % opt.iter_show == 0:
                    # record result pics.
                    temp_image = make_grid(torch.clamp(
                        DN(output[0]).unsqueeze(0), 0, 1),
                                           nrow=opt.batch_size,
                                           padding=0,
                                           normalize=False)
                    train_writer.add_image('temp result', temp_image,
                                           iters + images * opt.iter)

                if iters % (opt.iter_show) == 0:
                    save_image(
                        temp_image,
                        "/content/PortraitST/test/result/%d_%d.png" %
                        (k, iters))
            else:
                optimizer.zero_grad()
                input_feats = Maplist
                Loss_gain = 0
                Loss_style = 0
                Loss = 0
                for i in range(len(model.VGG.layers)):
                    loss_gain_item, loss_style_item = totalLoss.forward(
                        style_feats[i],
                        input_feats[i],
                        Map=Maps.featureList[i],
                        mode=model.VGG.layers[i])

                    Loss_gain += loss_gain_item
                    Loss_style += loss_style_item
                # loss term
                Loss = Loss_gain + Loss_style
                Loss.backward(retain_graph=True)
                optimizer.step()
                if iters % (opt.iter_show * 10) == 0:
                    # record result pics.
                    output = model.net_forward(Maplist)
                    temp_image = make_grid(output[0].unsqueeze(0),
                                           nrow=opt.batch_size,
                                           padding=0,
                                           normalize=True)
                    train_writer.add_image('temp result', temp_image,
                                           iters + images * total_iter)
                    print(Loss_gain.item(), Loss_style.item())

                if iters % (100) == 0:
                    save_image(
                        temp_image, "/content/checkpoints/%s/%d_%d.png" %
                        (opt.outf, k, iters))

            #optimizer.zero_grad()
            pbar.update(1)

        pbar.close()
Пример #4
0
def StyleTransfer(opt):
    print('loading VGG models......')
    model = myVGG(layers=opt.layers.split(',')).cuda()

    totalLoss = OverAllLoss(opt)
    DN = de_norm()
    os.makedirs("./log/%s/" % opt.outf, exist_ok=True)
    os.makedirs("./checkpoints/%s/" % opt.outf, exist_ok=True)

    modefactor = 1  # number of face images
    counter = 0  #count for dataset.

    if opt.optimode == 'dataset':
        os.makedirs("./checkpoints/%s/0/" % opt.outf, exist_ok=True)
        os.makedirs("./checkpoints/%s/1/" % opt.outf, exist_ok=True)
        dataloader = DataLoader(
            RC_dataset(root=opt.root, name='train', mode='no'),
            batch_size=opt.batch_size,
            shuffle=False,
            num_workers=0,
        )
        modefactor = 2

    else:
        dataloader = DataLoader(
            ST_dataset(root=opt.root, name=opt.name, mode='no'),
            batch_size=opt.batch_size,
            shuffle=False,
            num_workers=0,
        )

    images = -1

    # Trigger of the gabor wavelet loss.
    if opt.gabor == True:
        Gabor = GaborWavelet().cuda()

    print('start processing.')
    for epoch in range(opt.epoch):
        for k, data in enumerate(dataloader):

            for imgs in range(0, modefactor):

                if opt.optimode == 'dataset':
                    input_feats = model(data[imgs].cuda())
                    style_feats = model(data[2].cuda())
                    # Initialize the output.
                    output = data[imgs].clone().cuda()
                    output = torch.nn.Parameter(output, requires_grad=True)
                else:
                    input_feats = model(data[1].cuda())
                    style_feats = model(data[0].cuda())
                    output = data[1].clone().cuda()
                    output = torch.nn.Parameter(output, requires_grad=True)

                    if opt.gabor == True:
                        Gabor_target = Gabor(
                            decolor(255 * DN(data[1][0].cuda()).unsqueeze(0)) /
                            255)
                        print('wow', Gabor_target)
                        optimizer_Gabor = torch.optim.Adam([output], lr=1e-5)
                        optimizer_Gabor.zero_grad()

                Maps = []
                for i in range(len(model.layers)):
                    Maps += [ModifyMap(style_feats[i], input_feats[i], opt)]

                index = 0
                view_shape = (4, int(Maps[index].shape[1] / 4),
                              Maps[index].shape[2], Maps[index].shape[3])
                print(view_shape)
                temp_image = make_grid(
                    Maps[index].reshape(view_shape)[:, :3, :, :],
                    nrow=4,
                    padding=0,
                    normalize=True)
                # train_writer.add_image('Gain Map', temp_image, 0)
                temp_image = make_grid(
                    style_feats[index].reshape(view_shape)[:, :3, :, :],
                    nrow=4,
                    padding=0,
                    normalize=True)
                # train_writer.add_image('style_feat_4', temp_image, 0)
                temp_image = make_grid(
                    input_feats[index].reshape(view_shape)[:, :3, :, :],
                    nrow=4,
                    padding=0,
                    normalize=True)
                # train_writer.add_image('input_feat_4', temp_image, 0)

                # Initialize the output.
                #output = data[1].cuda()
                #output.requires_grad = True
                images += 1
                # Set optimizer.
                optimizer = torch.optim.LBFGS([output], lr=opt.lr)
                optimizer.zero_grad()

                # Iteration for 300 times.
                pbar = tqdm(total=opt.iter)

                for iters in range(opt.iter + 1):

                    def closure():
                        input_feats = model(output)
                        optimizer.zero_grad()
                        Loss_gain = 0
                        Loss_style = 0
                        Loss_hist = 0
                        Loss = 0
                        for i in range(len(model.layers)):
                            loss_gain_item, loss_style_item = totalLoss.forward(
                                style_feats[i],
                                input_feats[i],
                                Map=Maps[i],
                                mode=model.layers[i])
                            Loss_gain += loss_gain_item
                            Loss_style += loss_style_item
                            #if 'conv4_1' in model.layers[i]: #or 'conv1_1' in model.layers[i]:
                            #   Loss_hist += 15*totalLoss.compare(input_feats[i], histogramMatch(input_feats[i], style_feats[i]))
                        # loss term
                        Loss = Loss_gain + Loss_style + Loss_hist
                        #print(Loss_gain, Loss_style, Loss_hist)
                        Loss.backward(retain_graph=True)

                        return Loss

                    if iters % opt.iter_show == 0 and iters != 0:
                        # record result pics.
                        temp_image = make_grid(torch.clamp(
                            DN(output[0]).unsqueeze(0), 0, 1),
                                               nrow=opt.batch_size,
                                               padding=0,
                                               normalize=False)
                        # train_writer.add_image('temp result', temp_image, iters+images*opt.iter)

                        if iters % (20) == 0 and iters != 0:
                            if opt.optimode == 'dataset':
                                print(imgs)
                                if imgs == 0:
                                    save_image(
                                        temp_image,
                                        "./checkpoints/%s/0/%d.png" %
                                        (opt.outf, counter))
                                else:
                                    save_image(
                                        temp_image,
                                        "./checkpoints/%s/1/%d.png" %
                                        (opt.outf, counter))
                                    counter += 1
                            else:
                                save_image(
                                    temp_image, "./checkpoints/%s/%d_%d.png" %
                                    (opt.outf, k, iters))

                    # Updates.
                    #Loss.backward(retain_graph=True)
                    if opt.gabor == True:
                        optimizer_Gabor.zero_grad()
                        # decoloriation
                        Loss_gabor = totalLoss.L1(
                            Gabor(
                                decolor(255 * DN(output[0]).unsqueeze(0)) /
                                255), Gabor_target)
                        #print(Loss_gabor)
                        Loss_gabor.backward()
                        optimizer_Gabor.step()

                    optimizer.step(closure)
                    pbar.update(1)

                pbar.close()
Пример #5
0
from tqdm import tqdm
from VGG import myVGG
from dataset import ST_dataset, RC_dataset, de_norm, decolor, make_trans
sys.path.insert(1, '/content/PortraitST/options')
sys.path.insert(1, '/content/PortraitST/refs')
from histogram_match import match_histograms
from gainmap_option import FeatureOptions
from torch.utils.data import DataLoader
from torchvision.utils import make_grid, save_image
from PIL import Image

import torch.nn as nn
import torch.nn.functional as F
import numpy as np

DN = de_norm()
Trans = make_trans()


def histogramMatch(img, reference):
    img = (img[0]).detach().cpu().numpy().transpose(1, 2, 0)
    reference = (reference[0]).detach().cpu().numpy().transpose(1, 2, 0)
    #img = (torch.clamp(255*DN(img[0]), 0, 255)).detach().cpu().numpy().transpose(1,2,0).astype('uint8')
    matched = match_histograms(img, reference, multichannel=True)
    #print('matched:', matched, img.shape, reference.shape)
    return torch.from_numpy(matched.transpose(2, 0,
                                              1)).unsqueeze(0).float().cuda()


def window_stdev(X, window_size, kernel):
    X = F.pad(X, [
Пример #6
0
def trainRC(opt):
    print('loading VGG models......')
    DN = de_norm()
    model = FeatureRC(opt).cuda()
    if opt.start>=1:
        model.load_state_dict(torch.load('checkpoints/rec/%s/model_%d.pth' % (opt.outf, opt.start)))
    os.makedirs("./log/rec/%s/"%opt.outf, exist_ok=True)
    os.makedirs("./checkpoints/rec/%s/"%opt.outf, exist_ok=True)

    optimizer = torch.optim.Adam(model.parameters(), lr=1e-2, weight_decay=1e-8)
    scheduler = lr_scheduler.StepLR(optimizer, step_size=500, gamma=0.1)
    traindataset  = RC_dataset(root=opt.root, name='train', mode='unpaired')
    dataloader = DataLoader(
            traindataset, 
            batch_size=opt.batch_size, 
            shuffle=False,
            num_workers=0,
        )

    testloader = DataLoader(
            RC_dataset(root=opt.root, name='test', mode='unpaired'),
            batch_size=1, 
            shuffle=False,
            num_workers=0,
    )

    iters = 0 + opt.start*len(traindataset)
    print('start processing.')
    for epoch in range(opt.start, opt.epoch):
        pbar = tqdm(total=len(dataloader))
        for k, data in enumerate(dataloader):
            iters += opt.batch_size
            Loss, out = model(data[0].cuda(), data[1].cuda())

            if iters%50 == 0:
                print("total_loss", Loss.item(), iters)
                # train_writer.add_scalar("total_loss", Loss.item(), iters)

            if iters%100 == 0:
                index = 0

                for k2, test in enumerate(testloader):
                    if k2 == 0:
                        _, out = model(test[0].cuda(), test[1].cuda())

                        temp_image = make_grid(torch.clamp(DN(test[1][0]).unsqueeze(0),0,1), nrow=1, padding=0, normalize=False)
                        print('style', temp_image, iters+k2)
                        # train_writer.add_image('style', temp_image, iters+k2)

                        temp_image = make_grid(torch.clamp(DN(test[0][0]).unsqueeze(0),0,1), nrow=1, padding=0, normalize=False)
                        print('face', temp_image, iters+k2)
                        # train_writer.add_image('face', temp_image, iters+k2)

                        temp_image = make_grid(torch.clamp(DN(out[0]).unsqueeze(0), 0, 1), nrow=1, padding=0, normalize=False)
                        print('out', temp_image, iters+k2)
                        # train_writer.add_image('out', temp_image, iters+k2)

            Loss.backward(retain_graph=True)
            optimizer.step()
            optimizer.zero_grad()
            
            pbar.update(1)

        pbar.close()
        torch.save(model.cpu().state_dict(), 'checkpoints/rec/%s/model_%d.pth' % (opt.outf, epoch))
        model.cuda()
Пример #7
0
def trainCN(opt):
    print('loading models......')
    CoorNet = CoordinateNet(opt)

    DN = de_norm()
    if opt.start >= 1:
        CoorNet.model.load_state_dict(
            torch.load('checkpoints/rec/%s/model_%d.pth' %
                       (opt.outf, opt.start)))
    os.makedirs("./log/rec/%s/" % opt.outf, exist_ok=True)
    os.makedirs("./checkpoints/rec/%s/" % opt.outf, exist_ok=True)

    dataloader = DataLoader(
        CN_dataset(root=os.path.join(opt.root, opt.name),
                   name='train',
                   mode='paired',
                   length=1200),
        batch_size=opt.batch_size,
        shuffle=False,
        num_workers=0,
    )

    train_writer = tensorboardX.SummaryWriter("./log/rec/%s/" % opt.outf)
    optimizer = torch.optim.Adam(CoorNet.model.parameters(),
                                 lr=1e-5,
                                 weight_decay=1e-8)
    scheduler = lr_scheduler.StepLR(optimizer, step_size=500, gamma=0.1)
    iters = 0 + opt.start * 1200
    print('start processing.')

    for epoch in range(opt.start, opt.epoch):
        #pbar = tqdm(total=len(dataloader))
        for k, data in enumerate(dataloader):
            iters += opt.batch_size
            res = CoorNet.get_result(data[0].cuda(), data[3].cuda())
            Loss = CoorNet.loss_func(data, res)

            Loss.backward()
            optimizer.step()
            optimizer.zero_grad()
            #pbar.update(1)

            if iters % (opt.iter_show * 5) == 0:
                # record result pics.
                print(Loss.item())
                temp_image = make_grid(torch.clamp(DN(res[0]), 0, 1),
                                       nrow=opt.batch_size,
                                       padding=0,
                                       normalize=False)
                train_writer.add_image('temp result', temp_image, iters)
                temp_image = make_grid(torch.clamp(DN(data[0][0]), 0, 1),
                                       nrow=opt.batch_size,
                                       padding=0,
                                       normalize=False)
                train_writer.add_image('example', temp_image, iters)
                temp_image = make_grid(torch.clamp(DN(data[2][0]), 0, 1),
                                       nrow=opt.batch_size,
                                       padding=0,
                                       normalize=False)
                train_writer.add_image('target style', temp_image, iters)

        #pbar.close()
        torch.save(CoorNet.model.cpu().state_dict(),
                   'checkpoints/rec/%s/model_%d.pth' % (opt.outf, epoch))
        CoorNet.model.cuda()