Ejemplo n.º 1
0
def run_bulk(config):
    device = 'cpu' if config.cpu or not torch.cuda.is_available() else 'cuda:0'
    device = torch.device(device)

    transfer_at = set()
    if config.transfer_at_encoder:
        transfer_at.add('encoder')
    if config.transfer_at_decoder:
        transfer_at.add('decoder')
    if config.transfer_at_skip:
        transfer_at.add('skip')

    # The filenames of the content and style pair should match
    fnames = set(os.listdir(config.content)) & set(os.listdir(config.style))

    if config.content_segment and config.style_segment:
        fnames &= set(os.listdir(config.content_segment))
        fnames &= set(os.listdir(config.style_segment))

    for fname in tqdm.tqdm(fnames):
        if not fname.endswith('.png'):
            print('invalid file (should end with .png), ', fname)
            continue
        _content = os.path.join(config.content, fname)
        _style = os.path.join(config.style, fname)
        _content_segment = os.path.join(config.content_segment, fname) if config.content_segment else None
        _style_segment = os.path.join(config.style_segment, fname) if config.style_segment else None
        _output = os.path.join(config.output, fname)

        content = open_image(_content, config.image_size).to(device)
        style = open_image(_style, config.image_size).to(device)
        content_segment = load_segment(_content_segment, config.image_size)
        style_segment = load_segment(_style_segment, config.image_size)

        if not config.transfer_all:
            with Timer('Elapsed time in whole WCT: {}', config.verbose):
                postfix = '_'.join(sorted(list(transfer_at)))
                fname_output = _output.replace('.png', '_{}_{}.png'.format(config.option_unpool, postfix))
                print('------ transfer:', _output)
                wct2 = WCT2(transfer_at=transfer_at, option_unpool=config.option_unpool, device=device, verbose=config.verbose)
                with torch.no_grad():
                    img = wct2.transfer(content, style, content_segment, style_segment, alpha=config.alpha)
                save_image(img.clamp_(0, 1), fname_output, padding=0)
        else:
            for _transfer_at in get_all_transfer():
                with Timer('Elapsed time in whole WCT: {}', config.verbose):
                    postfix = '_'.join(sorted(list(_transfer_at)))
                    fname_output = _output.replace('.png', '_{}_{}.png'.format(config.option_unpool, postfix))
                    print('------ transfer:', fname)
                    wct2 = WCT2(transfer_at=_transfer_at, option_unpool=config.option_unpool, device=device, verbose=config.verbose)
                    with torch.no_grad():
                        img = wct2.transfer(content, style, content_segment, style_segment, alpha=config.alpha)
                    save_image(img.clamp_(0, 1), fname_output, padding=0)
Ejemplo n.º 2
0
def run_bulk(args):
    device = 'cuda:0' if args.is_cuda else 'cpu'
    device = torch.device(device)

    # The filenames of the content and style pair should match
    fnames = set(os.listdir(args.content)) & set(os.listdir(args.style))
    # print(len(fnames))

    for fname in fnames:

        _content = os.path.join(args.content, fname)
        _style = os.path.join(args.style, fname)

        content = open_image(_content, args.image_size).to(device)
        style = open_image(_style, args.image_size).to(device)
        # content_segment = load_segment(_content_segment, config.image_size)
        # style_segment = load_segment(_style_segment, config.image_size)
        # print(style.shape)
        # print(get_all_transfer())
        for _transfer_at in get_all_transfer():
            # print(_transfer_at)
            # with Timer('Elapsed time in whole WCT: {}', config.verbose):
            postfix = '_'.join(sorted(list(_transfer_at)))
            # print(postfix)
            # fname_output = _output.replace('.png', '_{}_{}.png'.format(config.option_unpool, postfix))
            # print('------ transfer:', fname)
            #
            wct2 = WCT2(transfer_at=_transfer_at,
                        option_unpool=args.option_unpool,
                        device=device)

            with torch.no_grad():
                img = wct2.transfer(content,
                                    style,
                                    None,
                                    None,
                                    alpha=args.alpha)
            # ss('in run bulk')
            import torchvision
            img.clamp(0, 1)
            # torchvision.utils.save_image(content, './imgs/c.png')  # , normalize=True)
            # torchvision.utils.save_image(style, './imgs/s.png')  # , normalize=True)
            torchvision.utils.save_image(img, './imgs/' + postfix +
                                         't.png')  # , normalize=True)
def run_bulk(config):
    device = 'cpu' if config.cpu or not torch.cuda.is_available() else 'cuda:0'
    device = torch.device(device)

    transfer_at = set()
    if config.transfer_at_encoder:
        transfer_at.add('encoder')
    if config.transfer_at_decoder:
        transfer_at.add('decoder')
    if config.transfer_at_skip:
        transfer_at.add('skip')

    # The filenames of the content and style pair should match
    fnames = set(os.listdir(config.content)) & set(os.listdir(config.style))

    if config.content_segment and config.style_segment:
        fnames &= set(os.listdir(config.content_segment))
        fnames &= set(os.listdir(config.style_segment))

    for fname in tqdm.tqdm(fnames):
        if not is_image_file(fname):
            print('invalid file (is not image), ', fname)
            continue
        _content = os.path.join(config.content, fname)
        _style = os.path.join(config.style, fname)
        _content_segment = os.path.join(
            config.content_segment, fname) if config.content_segment else None
        _style_segment = os.path.join(config.style_segment,
                                      fname) if config.style_segment else None
        _output = os.path.join(config.output, fname)

        content = open_image(_content, config.image_size).to(device)
        style = open_image(_style, config.image_size).to(device)
        content_segment = load_segment(_content_segment, config.image_size)
        style_segment = load_segment(_style_segment, config.image_size)
        _, ext = os.path.splitext(fname)

        if not config.transfer_all:
            with Timer('Elapsed time in whole WCT: {}', config.verbose):
                postfix = '_'.join(sorted(list(transfer_at)))
                fname_output = _output.replace(
                    ext, '_{}_{}.{}'.format(config.option_unpool, postfix,
                                            ext))
                print('------ transfer:', _output)
                LapSobGaus = Lap_Sob_Gaus(transfer_at=transfer_at,
                                          option_unpool=config.option_unpool,
                                          device=device,
                                          verbose=config.verbose)
                with torch.no_grad():
                    img = LapSobGaus.transfer(content,
                                              style,
                                              content_segment,
                                              style_segment,
                                              alpha=config.alpha)
                save_image(img.clamp_(0, 1), fname_output, padding=0)
        else:
            for _transfer_at in get_all_transfer():
                with Timer('Elapsed time in whole WCT: {}', config.verbose):
                    postfix = '_'.join(sorted(list(_transfer_at)))
                    fname_output = _output.replace(
                        ext, '_{}_{}.{}'.format(config.option_unpool, postfix,
                                                ext))
                    print('------ transfer:', fname)
                    LapSobGaus = Lap_Sob_Gaus(
                        transfer_at=_transfer_at,
                        option_unpool=config.option_unpool,
                        device=device,
                        verbose=config.verbose)
                    with torch.no_grad():
                        # content = content * 255
                        # style = style * 255
                        img = LapSobGaus.transfer(
                            content,
                            style,
                            content_segment=content_segment,
                            style_segment=style_segment,
                            alpha=config.alpha,
                            fname_output=None)
                        # img = utils_.normalize_batch(img)
                        #img = img.data.cpu()[0] * 0.225 + 0.45
                    save_image(img.clamp_(0, 1), fname_output, padding=0)