Пример #1
0
def alphamatting():
    netG = NetG(False).cuda()
    netG.load_state_dict(t.load(MODEL_DIR, map_location=t.device('cpu')))
    netG.eval()

    img_root = '/data1/zzl/dataset/alphamatting/input_lowers'
    trimap_root = '/data1/zzl/dataset/alphamatting/trimap_lowres'

    img_name = os.listdir(img_root)

    current_path = os.getcwd()

    for name in tqdm.tqdm(img_name):
        for i in range(1, 4):
            trimap_floder = 'Trimap' + str(i)

            img = cv2.imread(os.path.join(img_root, name))
            trimap = cv2.imread(os.path.join(trimap_root, trimap_floder,
                                             name))[:, :, 0]

            pred_mattes = inference_img_whole(netG, img, trimap)

            pred_mattes = pred_mattes.astype(np.uint8)

            save_path = os.path.join(current_path, trimap_floder)
            if not os.path.exists(save_path):
                os.mkdir(save_path)
            cv2.imwrite(os.path.join(save_path, name), pred_mattes)
Пример #2
0
def clip_input():
    with t.no_grad():
        net_G = NetG().cuda()
        net_G.eval()
        net_G.load_state_dict(t.load(MODEL_DIR, map_location=t.device('cpu')))

        img_root = '/data1/zzl/dataset/matting/alphamatting/input_lowers'
        trimap_root = '/data1/zzl/dataset/matting/alphamatting/trimap_lowres'

        img_name = os.listdir(img_root)

        for name in tqdm.tqdm(img_name):
            for i in range(1, 4):

                trimap_floder = 'Trimap' + str(i)

                img = Image.open(os.path.join(img_root, name))
                print('img_size', img.size)
                print('img_shape', np.shape(img))
                img, w_clip, h_clip = padding_img(img)
                print('img.shape', np.shape(img))
                # print('img', w_clip, h_clip)

                crop_img = clip_img(img, h_clip, w_clip)

                img = transform(img)

                trimap = Image.open(
                    os.path.join(trimap_root, trimap_floder, name))
                (h_r, w_r) = np.shape(trimap)
                trimap, w_clip, h_clip = padding_img(trimap)

                # print('trimap', w_clip, h_clip)

                crop_tri = clip_img(trimap, h_clip, w_clip)

                input_img = t.cat((crop_img, crop_tri), dim=1)
                input_img = input_img.cuda()

                fake_alpha = net_G(input_img)

                com_fake = combination(fake_alpha, h_clip, w_clip)

                vis.images(com_fake.cpu().numpy(), win='fake_alpha')
                vis.images(img.numpy() * 0.5 + 0.5, win='input')
                # print(fake_alpha[0].size())
                # print(com_fake.size())
                save_alpha = to_pil(com_fake.cpu())
                save_alpha = save_alpha.convert('L')
                print('fake_alpha.shape', np.shape(save_alpha))
                box = (0, 0, w_r, h_r)
                save_alpha = save_alpha.crop(box)

                if not os.path.exists(trimap_floder):
                    os.mkdir(trimap_floder)
                print('save_alpha.shape', np.shape(save_alpha))
                save_alpha.save(trimap_floder + '/' + name)
    return
Пример #3
0
def whole_adobe():
    netG = NetG(False).cuda()
    netG.load_state_dict(t.load(MODEL_DIR, map_location=t.device('cpu')))
    netG.eval()
    tester = Tester(
        net_G=netG,
        test_root=
        '/data1/zzl/dataset/Combined_Dataset/Test_set/Adobe-licensed_images',
        device='cuda:0')
    test_result = tester.test()
    for k, v in test_result.items():
        print(k, v)
Пример #4
0
def main():
    netG = nn.DataParallel(NetG()).cuda()
    netG.load_state_dict(t.load(MODEL_DIR, map_location=t.device('cpu')))
    netG.eval()

    img_root = 'examples/image/spring-289527_1920_15.png'  # '/home/zzl/dataset/Combined_Dataset/Test_set/Adobe-licensed_images/image/antique-honiton-lace-1182740_1920_0.png'
    trimap_root = 'examples/trimap/spring-289527_1920_15.png'  #  '/home/zzl/dataset/Combined_Dataset/Test_set/Adobe-licensed_images/trimaps/antique-honiton-lace-1182740_1920_0.png'

    image = cv2.imread(img_root)
    trimap = cv2.imread(trimap_root)[:, :, 0]

    pred_mattes = inference_img_whole(netG, image, trimap)

    pred_mattes = pred_mattes.astype(np.uint8)
    # pred_mattes[trimap == 255] = 255
    # pred_mattes[trimap == 0] = 0

    cv2.imwrite('result.png', pred_mattes)
Пример #5
0
def resize_input():
    with t.no_grad():
        net_G = NetG().cuda()
        net_G.eval()
        net_G.load_state_dict(t.load(MODEL_DIR, map_location=t.device('cpu')))

        img_root = '/data1/zzl/dataset/matting/alphamatting/input_lowers'
        trimap_root = '/data1/zzl/dataset/matting/alphamatting/trimap_lowres'

        img_name = os.listdir(img_root)

        for name in tqdm.tqdm(img_name):
            for i in range(1, 4):

                trimap_floder = 'Trimap' + str(i)

                img = Image.open(os.path.join(img_root, name))
                (w, h) = img.size
                w_large = w // 320 + 1
                h_large = h // 320 + 1

                img = img.resize((w_large * 320, h_large * 320))

                img = transform(img)

                trimap = Image.open(
                    os.path.join(trimap_root, trimap_floder, name))
                trimap = trimap.resize((w_large * 320, h_large * 320))
                (w, h) = np.shape(trimap)
                trimap = np.reshape(trimap, (w, h, 1))
                trimap = transform(trimap)

                input_img = t.cat((img, trimap), dim=0)
                input_img = input_img[None]
                input_img = input_img.cuda()

                fake_alpha = net_G(input_img)
                vis.images(fake_alpha.cpu().numpy(), win='fake_alpha')
                vis.images(img.numpy() * 0.5 + 0.5, win='input')
                # print(fake_alpha[0].size())
                save_alpha = to_pil(fake_alpha.cpu()[0])
                save_alpha = save_alpha.convert('L')
                box = (0, 0, w, h)
                save_alpha = save_alpha.crop(box)
                if not os.path.exists(trimap_floder):
                    os.mkdir(trimap_floder)
                print(np.shape(save_alpha))
                save_alpha.save(trimap_floder + '/' + name)
Пример #6
0
def alphamatting():
    netG = nn.DataParallel(NetG()).cuda()
    netG.load_state_dict(t.load(MODEL_DIR, map_location=t.device('cpu')))
    netG.eval()

    img_root = '/data0/zzl/dataset/matting/alphamatting/input_lowers'
    trimap_root = '/data0/zzl/dataset/matting/alphamatting/trimap_lowres'

    img_name = os.listdir(img_root)

    for name in tqdm.tqdm(img_name):
        for i in range(1, 4):
            trimap_floder = 'Trimap' + str(i)

            img = cv2.imread(os.path.join(img_root, name))
            trimap = cv2.imread(os.path.join(trimap_root, trimap_floder,
                                             name))[:, :, 0]

            pred_mattes = inference_img_whole(netG, img, trimap)

            pred_mattes = pred_mattes.astype(np.uint8)

            cv2.imwrite(trimap_floder + '/' + name, pred_mattes)
Пример #7
0
def adobe():
    netG = nn.DataParallel(NetG()).cuda()
    netG.load_state_dict(t.load(MODEL_DIR, map_location=t.device('cpu')))
    netG.eval()

    ROOT = '/home/zzl/dataset/Combined_Dataset/Test_set/Adobe-licensed_images'
    img_root = os.path.join(ROOT, 'image')
    trimap_root = os.path.join(ROOT, 'trimaps')

    img_names = sorted(os.listdir(img_root))

    out_root = '/home/zzl/result'

    for name in img_names:
        img_path = os.path.join(img_root, name)
        trimap_path = os.path.join(trimap_root, name)

        img = cv2.imread(img_path)
        trimap = cv2.imread(trimap_path)[:, :, 0]

        pred_mattes = inference_img_whole(netG, img, trimap)

        pred_mattes = pred_mattes.astype(np.uint8)
        cv2.imwrite(out_root + '/' + name, pred_mattes)
Пример #8
0
def main():
    netG = NetG(False).cuda()
    netG.load_state_dict(t.load(MODEL_DIR, map_location=t.device('cpu')))
    netG.eval()

    img_root = './examples/images'
    trimap_root = './examples/trimaps'
    save_root = './result'
    images = os.listdir(img_root)

    for img in images:

        image = cv2.imread(os.path.join(img_root, img))
        trimap = cv2.imread(os.path.join(trimap_root, img))[:, :, 0]

        pred_mattes = inference_img_whole(netG, image, trimap)

        pred_mattes = pred_mattes.astype(np.uint8)
        # pred_mattes[trimap == 255] = 255
        # pred_mattes[trimap == 0] = 0
        if not os.path.exists(save_root):
            os.mkdir(save_root)
        cv2.imwrite(os.path.join(save_root, img), pred_mattes)
Пример #9
0
import torchvision.transforms as transforms
import numpy as np
import torch as t
import os
import math
from model.AlphaGAN import NetG
from visualize import Visualizer
from PIL import Image

device = t.device('cuda:0')
vis = Visualizer('alphaGAN_test')

net_G = NetG()
net_G.load_state_dict(t.load('/home/zzl/model/alphaGAN/netG/new_aspp/netG_5.pth'))

net_G.to(device)

transform = transforms.Compose([
            transforms.ToTensor(),
            transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
        ])


def clip_img(img, h_clip, w_clip):

    img_list = []
    for x in range(w_clip):
        for y in range(h_clip):
            region = (x*320, y*320, x*320+320, y*320+320)
            crop_img = img.crop(region)
            crop_img = transform(crop_img)