Esempio n. 1
0
    def __init__(self,
                 model_path='./model_checkpoints',
                 transfer_at=['encoder', 'skip', 'decoder'],
                 option_unpool='cat5',
                 device='cuda:0',
                 verbose=False):

        # This convert the transfer_at to non_repeated set.
        self.transfer_at = set(transfer_at)
        assert not (self.transfer_at - set(['encoder', 'decoder', 'skip'])
                    ), 'invalid transfer_at: {}'.format(transfer_at)
        assert self.transfer_at, 'empty transfer_at'

        # Checks if using GPU
        self.device = torch.device(device)
        self.verbose = verbose
        # Initialize the encoder and decoder from transfer.py file.
        self.encoder = WaveEncoder(option_unpool).to(self.device)
        self.decoder = WaveDecoder(option_unpool).to(self.device)
        self.encoder.load_state_dict(
            torch.load(os.path.join(
                model_path, 'wave_encoder_{}_l4.pth'.format(option_unpool)),
                       map_location=lambda storage, loc: storage))
        self.decoder.load_state_dict(
            torch.load(os.path.join(
                model_path, 'wave_decoder_{}_l4.pth'.format(option_unpool)),
                       map_location=lambda storage, loc: storage))
Esempio n. 2
0
    def __init__(self, model_path='./model_checkpoints', transfer_at=['encoder', 'skip', 'decoder'], option_unpool='cat5', device='cuda:0', verbose=False):

        self.transfer_at = set(transfer_at)
        assert not(self.transfer_at - set(['encoder', 'decoder', 'skip'])), 'invalid transfer_at: {}'.format(transfer_at)
        assert self.transfer_at, 'empty transfer_at'

        self.device = torch.device(device)
        self.verbose = verbose
        self.encoder = WaveEncoder(option_unpool).to(self.device)
        self.decoder = WaveDecoder(option_unpool).to(self.device)
        self.encoder.load_state_dict(torch.load(os.path.join(model_path, 'wave_encoder_{}_l4.pth'.format(option_unpool)), map_location=self.device))
        self.decoder.load_state_dict(torch.load(os.path.join(model_path, 'wave_decoder_{}_l4.pth'.format(option_unpool)), map_location=self.device))
Esempio n. 3
0
    def __init__(self,
                 model_path='./model_checkpoints',
                 transfer_at=['encoder', 'skip', 'decoder'],
                 option_unpool='cat5',
                 device='cuda:0',
                 verbose=False):
        # ss('in wct2 init')
        self.transfer_at = set(transfer_at)

        self.device = torch.device(device)
        self.verbose = verbose
        self.encoder = WaveEncoder(option_unpool).to(self.device)
        self.decoder = WaveDecoder(option_unpool).to(self.device)
        self.encoder.load_state_dict(
            torch.load(os.path.join(
                model_path, 'wave_encoder_{}_l4.pth'.format(option_unpool)),
                       map_location=lambda storage, loc: storage))
        self.decoder.load_state_dict(
            torch.load(os.path.join(
                model_path, 'wave_decoder_{}_l4.pth'.format(option_unpool)),
                       map_location=lambda storage, loc: storage))
Esempio n. 4
0
class WCT2:
    def __init__(self,
                 model_path='./model_checkpoints',
                 transfer_at=['encoder', 'skip', 'decoder'],
                 option_unpool='cat5',
                 device='cuda:0',
                 verbose=False):

        self.transfer_at = set(transfer_at)
        assert not (self.transfer_at - set(['encoder', 'decoder', 'skip'])
                    ), 'invalid transfer_at: {}'.format(transfer_at)
        assert self.transfer_at, 'empty transfer_at'

        self.device = torch.device(device)
        self.verbose = verbose
        self.encoder = WaveEncoder(option_unpool).to(self.device)
        self.decoder = WaveDecoder(option_unpool).to(self.device)
        self.encoder.load_state_dict(
            torch.load(os.path.join(
                model_path, 'wave_encoder_{}_l4.pth'.format(option_unpool)),
                       map_location=lambda storage, loc: storage))
        self.decoder.load_state_dict(
            torch.load(os.path.join(
                model_path, 'wave_decoder_{}_l4.pth'.format(option_unpool)),
                       map_location=lambda storage, loc: storage))

    def print_(self, msg):
        if self.verbose:
            print(msg)

    def encode(self, x, skips, level):
        return self.encoder.encode(x, skips, level)

    def decode(self, x, skips, level):
        return self.decoder.decode(x, skips, level)

    def get_all_feature(self, x):
        skips = {}
        feats = {'encoder': {}, 'decoder': {}}
        for level in [1, 2, 3, 4]:
            x = self.encode(x, skips, level)
            if 'encoder' in self.transfer_at:
                feats['encoder'][level] = x

        if 'encoder' not in self.transfer_at:
            feats['decoder'][4] = x
        for level in [4, 3, 2]:
            x = self.decode(x, skips, level)
            if 'decoder' in self.transfer_at:
                feats['decoder'][level - 1] = x
        return feats, skips

    def transfer(self,
                 content,
                 style,
                 content_segment,
                 style_segment,
                 alpha=1):
        label_set, label_indicator = compute_label_info(
            content_segment, style_segment)
        content_feat, content_skips = content, {}
        style_feats, style_skips = self.get_all_feature(style)

        wct2_enc_level = [1, 2, 3, 4]
        wct2_dec_level = [1, 2, 3, 4]
        wct2_skip_level = ['pool1', 'pool2', 'pool3']

        for level in [1, 2, 3, 4]:
            content_feat = self.encode(content_feat, content_skips, level)
            if 'encoder' in self.transfer_at and level in wct2_enc_level:
                content_feat = feature_wct(content_feat,
                                           style_feats['encoder'][level],
                                           content_segment,
                                           style_segment,
                                           label_set,
                                           label_indicator,
                                           alpha=alpha,
                                           device=self.device)
                self.print_('transfer at encoder {}'.format(level))
        if 'skip' in self.transfer_at:
            for skip_level in wct2_skip_level:
                for component in [0, 1, 2]:  # component: [LH, HL, HH]
                    content_skips[skip_level][component] = feature_wct(
                        content_skips[skip_level][component],
                        style_skips[skip_level][component],
                        content_segment,
                        style_segment,
                        label_set,
                        label_indicator,
                        alpha=alpha,
                        device=self.device)
                self.print_('transfer at skip {}'.format(skip_level))

        for level in [4, 3, 2, 1]:
            if 'decoder' in self.transfer_at and level in style_feats[
                    'decoder'] and level in wct2_dec_level:
                content_feat = feature_wct(content_feat,
                                           style_feats['decoder'][level],
                                           content_segment,
                                           style_segment,
                                           label_set,
                                           label_indicator,
                                           alpha=alpha,
                                           device=self.device)
                self.print_('transfer at decoder {}'.format(level))
            content_feat = self.decode(content_feat, content_skips, level)
        return content_feat