Ejemplo n.º 1
0
def run_bulk(config):
    device = 'cpu' if config.cpu or not torch.cuda.is_available() else 'cuda:0'
    device = torch.device(device)

    transfer_at = set()
    if config.transfer_at_encoder:
        transfer_at.add('encoder')
    if config.transfer_at_decoder:
        transfer_at.add('decoder')
    if config.transfer_at_skip:
        transfer_at.add('skip')

    # The filenames of the content and style pair should match
    fnames = set(os.listdir(config.content)) & set(os.listdir(config.style))

    if config.content_segment and config.style_segment:
        fnames &= set(os.listdir(config.content_segment))
        fnames &= set(os.listdir(config.style_segment))

    for fname in tqdm.tqdm(fnames):
        if not fname.endswith('.png'):
            print('invalid file (should end with .png), ', fname)
            continue
        _content = os.path.join(config.content, fname)
        _style = os.path.join(config.style, fname)
        _content_segment = os.path.join(config.content_segment, fname) if config.content_segment else None
        _style_segment = os.path.join(config.style_segment, fname) if config.style_segment else None
        _output = os.path.join(config.output, fname)

        content = open_image(_content, config.image_size).to(device)
        style = open_image(_style, config.image_size).to(device)
        content_segment = load_segment(_content_segment, config.image_size)
        style_segment = load_segment(_style_segment, config.image_size)

        if not config.transfer_all:
            with Timer('Elapsed time in whole WCT: {}', config.verbose):
                postfix = '_'.join(sorted(list(transfer_at)))
                fname_output = _output.replace('.png', '_{}_{}.png'.format(config.option_unpool, postfix))
                print('------ transfer:', _output)
                wct2 = WCT2(transfer_at=transfer_at, option_unpool=config.option_unpool, device=device, verbose=config.verbose)
                with torch.no_grad():
                    img = wct2.transfer(content, style, content_segment, style_segment, alpha=config.alpha)
                save_image(img.clamp_(0, 1), fname_output, padding=0)
        else:
            for _transfer_at in get_all_transfer():
                with Timer('Elapsed time in whole WCT: {}', config.verbose):
                    postfix = '_'.join(sorted(list(_transfer_at)))
                    fname_output = _output.replace('.png', '_{}_{}.png'.format(config.option_unpool, postfix))
                    print('------ transfer:', fname)
                    wct2 = WCT2(transfer_at=_transfer_at, option_unpool=config.option_unpool, device=device, verbose=config.verbose)
                    with torch.no_grad():
                        img = wct2.transfer(content, style, content_segment, style_segment, alpha=config.alpha)
                    save_image(img.clamp_(0, 1), fname_output, padding=0)
Ejemplo n.º 2
0
def run_bulk(config):
    device = 'cpu' if config.cpu or not torch.cuda.is_available() else 'cuda:0'
    device = torch.device(device)

    transfer_at = set()
    if config.transfer_at_encoder:
        transfer_at.add('encoder')
    if config.transfer_at_decoder:
        transfer_at.add('decoder')
    if config.transfer_at_skip:
        transfer_at.add('skip')

    # The filenames of the content and style pair should match
    fnames = set(os.listdir(config.content)) & set(os.listdir(config.style))

    if config.content_segment and config.style_segment:
        fnames &= set(os.listdir(config.content_segment))
        fnames &= set(os.listdir(config.style_segment))

    for fname in tqdm.tqdm(fnames):
        if not is_image_file(fname):
            print('invalid file (is not image), ', fname)
            continue
        _content = os.path.join(config.content, fname)
        _style = os.path.join(config.style, fname)
        _content_segment = os.path.join(
            config.content_segment, fname) if config.content_segment else None
        _style_segment = os.path.join(config.style_segment,
                                      fname) if config.style_segment else None
        _output = os.path.join(config.output, fname)

        # content_transfroms = tv.transforms.Compose([
        #     #tv.transforms.Resize(config.image_size),
        #     #tv.transforms.CenterCrop(config.image_size),
        #     tv.transforms.ToTensor(),
        #     # tv.transforms.Normalize(mean=IMAGENET_MEAN, std=IMAGENET_STD),
        #     tv.transforms.Lambda(lambda x: x * 255)
        # ])
        # content_dataset = tv.datasets.ImageFolder(_content, content_transfroms)
        # content_dataloader = data.DataLoader(content_dataset, 1)
        #
        # style_transform = tv.transforms.Compose([
        #     # tv.transforms.Resize(256),
        #     # tv.transforms.RandomCrop(256),
        #     tv.transforms.ToTensor(),
        #     tv.transforms.Normalize(mean=IMAGENET_MEAN, std=IMAGENET_STD),
        #     # tv.transforms.Lambda(lambda x: x*255)
        # ])
        #
        # style_image_dataset = tv.datasets.ImageFolder(_style, style_transform)
        #
        # style_dataloader = data.DataLoader(style_image_dataset, 1)

        #content = open_image(_content, config.image_size).to(device)
        content = Image.open(_content)
        content_transforms = []
        content = transforms.Resize(config.image_size)(content)
        # _transforms.append(transforms.Resize(image_size))
        w, h = content.size
        content_transforms.append(
            transforms.CenterCrop((h // 16 * 16, w // 16 * 16)))
        content_transforms.append(transforms.ToTensor())
        content_transforms.append(
            transforms.Normalize(mean=IMAGENET_MEAN, std=IMAGENET_STD))
        content_transforms.append(transforms.Lambda(lambda x: x * 255))
        content_transform = transforms.Compose(content_transforms)
        content = content_transform(content).unsqueeze(0).to(device)

        #style = open_image(_style, config.image_size).to(device)
        style = Image.open(_style)
        style_transforms = []
        style = transforms.Resize(config.image_size)(style)
        # _transforms.append(transforms.Resize(image_size))
        w, h = style.size
        style_transforms.append(
            transforms.CenterCrop((h // 16 * 16, w // 16 * 16)))
        style_transforms.append(transforms.ToTensor())
        style_transforms.append(
            transforms.Normalize(mean=IMAGENET_MEAN, std=IMAGENET_STD))
        #style_transforms.append(transforms.Lambda(lambda x: x * 255))
        style_transform = transforms.Compose(style_transforms)
        style = style_transform(style).unsqueeze(0).to(device)

        content_segment = load_segment(_content_segment, config.image_size)
        style_segment = load_segment(_style_segment, config.image_size)
        _, ext = os.path.splitext(fname)

        if not config.transfer_all:
            with Timer('Elapsed time in whole WCT: {}', config.verbose):
                postfix = '_'.join(sorted(list(transfer_at)))
                fname_output = _output.replace(
                    ext, '_{}_{}.{}'.format(config.option_unpool, postfix,
                                            ext))
                print('------ transfer:', _output)
                wct2 = WCT2(transfer_at=transfer_at,
                            option_unpool=config.option_unpool,
                            device=device,
                            verbose=config.verbose)
                with torch.no_grad():
                    img = wct2.transfer(content,
                                        style,
                                        content_segment,
                                        style_segment,
                                        alpha=config.alpha)
                save_image(img.clamp_(0, 1), fname_output, padding=0)
        else:
            for _transfer_at in get_all_transfer():
                with Timer('Elapsed time in whole WCT: {}', config.verbose):
                    postfix = '_'.join(sorted(list(_transfer_at)))
                    fname_output = _output.replace(
                        ext, '_{}_{}.{}'.format(config.option_unpool, postfix,
                                                ext))
                    print('------ transfer:', fname)
                    wct2 = WCT2(transfer_at=_transfer_at,
                                option_unpool=config.option_unpool,
                                device=device,
                                verbose=config.verbose)
                    with torch.no_grad():
                        img = wct2.transfer(content,
                                            style,
                                            content_segment=content_segment,
                                            style_segment=style_segment,
                                            alpha=config.alpha)
                        img = utils_.normalize_batch(img)
                        #img = img.data.cpu()[0] * 0.225 + 0.45
                    save_image(img.clamp_(0, 1), fname_output, padding=0)
def run_bulk(config):
    device = 'cpu' if config.cpu or not torch.cuda.is_available() else 'cuda:0'
    device = torch.device(device)

    transfer_at = set()
    if config.transfer_at_encoder:
        transfer_at.add('encoder')
    if config.transfer_at_decoder:
        transfer_at.add('decoder')
    if config.transfer_at_skip:
        transfer_at.add('skip')

    # The filenames of the content and style pair should match
    fnames = set(os.listdir(config.content)) & set(os.listdir(config.style))

    if config.content_segment and config.style_segment:
        fnames &= set(os.listdir(config.content_segment))
        fnames &= set(os.listdir(config.style_segment))

    for fname in tqdm.tqdm(fnames):
        if not is_image_file(fname):
            print('invalid file (is not image), ', fname)
            continue
        _content = os.path.join(config.content, fname)
        _style = os.path.join(config.style, fname)
        _content_segment = os.path.join(
            config.content_segment, fname) if config.content_segment else None
        _style_segment = os.path.join(config.style_segment,
                                      fname) if config.style_segment else None
        _output = os.path.join(config.output, fname)

        content = open_image(_content, config.image_size).to(device)
        style = open_image(_style, config.image_size).to(device)
        content_segment = load_segment(_content_segment, config.image_size)
        style_segment = load_segment(_style_segment, config.image_size)
        _, ext = os.path.splitext(fname)

        if not config.transfer_all:
            with Timer('Elapsed time in whole WCT: {}', config.verbose):
                postfix = '_'.join(sorted(list(transfer_at)))
                fname_output = _output.replace(
                    ext, '_{}_{}.{}'.format(config.option_unpool, postfix,
                                            ext))
                print('------ transfer:', _output)
                LapSobGaus = Lap_Sob_Gaus(transfer_at=transfer_at,
                                          option_unpool=config.option_unpool,
                                          device=device,
                                          verbose=config.verbose)
                with torch.no_grad():
                    img = LapSobGaus.transfer(content,
                                              style,
                                              content_segment,
                                              style_segment,
                                              alpha=config.alpha)
                save_image(img.clamp_(0, 1), fname_output, padding=0)
        else:
            for _transfer_at in get_all_transfer():
                with Timer('Elapsed time in whole WCT: {}', config.verbose):
                    postfix = '_'.join(sorted(list(_transfer_at)))
                    fname_output = _output.replace(
                        ext, '_{}_{}.{}'.format(config.option_unpool, postfix,
                                                ext))
                    print('------ transfer:', fname)
                    LapSobGaus = Lap_Sob_Gaus(
                        transfer_at=_transfer_at,
                        option_unpool=config.option_unpool,
                        device=device,
                        verbose=config.verbose)
                    with torch.no_grad():
                        # content = content * 255
                        # style = style * 255
                        img = LapSobGaus.transfer(
                            content,
                            style,
                            content_segment=content_segment,
                            style_segment=style_segment,
                            alpha=config.alpha,
                            fname_output=None)
                        # img = utils_.normalize_batch(img)
                        #img = img.data.cpu()[0] * 0.225 + 0.45
                    save_image(img.clamp_(0, 1), fname_output, padding=0)