Example #1
0
def get_system(name, args, schema=None, timed=False, model_path=None):
    from core.price_tracker import PriceTracker
    lexicon = PriceTracker(args.price_tracker_model)

    if name == 'rulebased':
        from rulebased_system import RulebasedSystem
        from model.generator import Templates, Generator
        from model.manager import Manager
        templates = Templates.from_pickle(args.templates)
        generator = Generator(templates)
        manager = Manager.from_pickle(args.policy)
        return RulebasedSystem(lexicon, generator, manager, timed)
    elif name == 'hybrid':
        from hybrid_system import HybridSystem
        templates = Templates.from_pickle(args.templates)
        manager = PytorchNeuralSystem(args, schema, lexicon, model_path, timed)
        generator = Generator(templates)
        return HybridSystem(lexicon, generator, manager, timed)
    elif name == 'cmd':
        from cmd_system import CmdSystem
        return CmdSystem()
    elif name == 'pt-neural':
        from neural_system import PytorchNeuralSystem
        assert model_path
        return PytorchNeuralSystem(args, schema, lexicon, model_path, timed)
    else:
        raise ValueError('Unknown system %s' % name)
Example #2
0
    def get_generators(self, len_val, src_vocab, text_embedding, args):
        gate_gen = None
        if args['slot_gating']:
            gate_gen = Generator(len(GATES), scope_name="gate")
        fertility_generator = Generator(len_val + 1, scope_name="fertility")
        state_generator = Generator(src_vocab,
                                    text_embedding.get_embedding_wieght(),
                                    scope_name='state')
        point_state_generator = PointerGenerator(
            state_generator=state_generator, scope_name='point_generator')

        return fertility_generator, point_state_generator, gate_gen
Example #3
0
 def load_generators(self):
     """Load the generator dictionary."""
     self.generators = {}
     try:
         df = pd.read_csv(GENERATORS_PATH, index_col=0, keep_default_na=False)
         for index, row in df.iterrows():
             weight_map = {name: value for name, value in zip(df.columns, row)}
             generator_name = weight_map.pop('generator_name')
             generator = Generator(weight_map)
             self.generators[generator_name] = generator
     except Exception as e:
         print(f"Unable to load file '{GENERATORS_PATH}', aborting: {e}")
         self.generators = {
             "Custom": Generator()
         }
Example #4
0
def get_system(name, args, schema=None, timed=False, model_path=None):
    lexicon = PriceTracker(args.price_tracker_model)
    if name == 'rulebased':
        templates = Templates.from_pickle(args.templates)
        generator = Generator(templates)
        manager = Manager.from_pickle(args.policy)
        return RulebasedSystem(lexicon, generator, manager, timed)
    #elif name == 'config-rulebased':
    #    configs = read_json(args.rulebased_configs)
    #    return ConfigurableRulebasedSystem(configs, lexicon, timed_session=timed, policy=args.config_search_policy, max_chats_per_config=args.chats_per_config, db=args.trials_db, templates=templates)
    elif name == 'cmd':
        return CmdSystem()
    elif name.startswith('ranker'):
        # TODO: hack
        #retriever1 = Retriever(args.index+'-1', context_size=args.retriever_context_len, num_candidates=args.num_candidates)
        #retriever2 = Retriever(args.index+'-2', context_size=args.retriever_context_len, num_candidates=args.num_candidates)
        retriever = Retriever(args.index, context_size=args.retriever_context_len, num_candidates=args.num_candidates)
        if name == 'ranker-ir':
            return IRRankerSystem(schema, lexicon, retriever)
        elif name == 'ranker-ir1':
            return IRRankerSystem(schema, lexicon, retriever1)
        elif name == 'ranker-ir2':
            return IRRankerSystem(schema, lexicon, retriever2)
        elif name == 'ranker-neural':
            return NeuralRankerSystem(schema, lexicon, retriever, model_path, args.mappings)
        else:
            raise ValueError
    elif name in ('neural-gen', 'neural-sel'):
        assert model_path
        return NeuralSystem(schema, lexicon, model_path, args.mappings, args.decoding, index=args.index, num_candidates=args.num_candidates, retriever_context_len=args.retriever_context_len, timed_session=timed)
    else:
        raise ValueError('Unknown system %s' % name)
def main(args):
    checkpoint = torch.load(args.checkpoint_path)
    if args.config is not None:
        hp = HParam(args.config)
    else:
        hp = load_hparam_str(checkpoint['hp_str'])

    model = Generator(hp.audio.n_mel_channels).cuda()
    model.load_state_dict(checkpoint['model_g'])
    model.eval(inference=False)

    with torch.no_grad():
        for melpath in tqdm.tqdm(
                glob.glob(os.path.join(args.input_folder, '*.mel'))):
            mel = torch.load(melpath)
            if len(mel.shape) == 2:
                mel = mel.unsqueeze(0)
            mel = mel.cuda()

            audio = model.inference(mel)
            audio = audio.cpu().detach().numpy()

            out_path = melpath.replace(
                '.mel', '_reconstructed_epoch%04d.wav' % checkpoint['epoch'])
            write(out_path, hp.audio.sampling_rate, audio)
Example #6
0
def get_system(name, args, schema=None, timed=False, model_path=None):
    if name in ('rulebased', 'neural'):
        lexicon = Lexicon(schema,
                          args.learned_lex,
                          stop_words=args.stop_words,
                          lexicon_path=args.lexicon)
        if args.inverse_lexicon:
            realizer = InverseLexicon.from_file(args.inverse_lexicon)
        else:
            realizer = DefaultInverseLexicon()
    if name == 'rulebased':
        templates = Templates.from_pickle(args.templates)
        generator = Generator(templates)
        manager = Manager.from_pickle(args.policy)
        return RulebasedSystem(lexicon, generator, manager, timed)
    elif name == 'neural':
        assert args.model_path
        return NeuralSystem(schema,
                            lexicon,
                            args.model_path,
                            args.fact_check,
                            args.decoding,
                            realizer=realizer)
    elif name == 'cmd':
        return CmdSystem()
    else:
        raise ValueError('Unknown system %s' % name)
Example #7
0
def load_generator(g_params=None, is_g_clone=False, ckpt_dir=None):
    from model.generator import Generator

    if g_params is None:
        g_params = {
            'z_dim': 512,
            'w_dim': 512,
            'labels_dim': 254,
            'n_mapping': 8,
            'resolutions': [4, 8, 16, 32, 64, 128, 256, 512, 1024],
            'featuremaps': [512, 512, 512, 512, 512, 256, 128, 64, 32],
        }

    #test_latent = tf.ones((1, g_params['z_dim']), dtype=tf.float32)
    test_labels = tf.ones((1, g_params['labels_dim']), dtype=tf.float32)

    # build generator model
    generator = Generator(g_params)
    _ = generator(test_labels)

    if ckpt_dir is not None:
        if is_g_clone:
            ckpt = tf.train.Checkpoint(g_clone=generator)
        else:
            ckpt = tf.train.Checkpoint(generator=generator)
        manager = tf.train.CheckpointManager(ckpt, ckpt_dir, max_to_keep=1)
        ckpt.restore(manager.latest_checkpoint).expect_partial()
        if manager.latest_checkpoint:
            print(f'Generator restored from {manager.latest_checkpoint}')
    return generator
Example #8
0
def main(args):
    checkpoint = torch.load(args.checkpoint_path)
    if args.config is not None:
        hp = HParam(args.config)
    else:
        hp = load_hparam_str(checkpoint['hp_str'])

    model = Generator(hp.audio.n_mel_channels).cuda()
    model.load_state_dict(checkpoint['model_g'])
    model.eval()

    with torch.no_grad():
        for melpath in tqdm.tqdm(glob.glob(os.path.join(args.input_folder, '*.mel'))):
            mel = torch.load(melpath)
            if len(mel.shape) == 2:
                mel = mel.unsqueeze(0)
            mel = mel.cuda()

            # pad input mel with zeros to cut artifact
            # see https://github.com/seungwonpark/melgan/issues/8
            zero = torch.full((1, hp.audio.n_mel_channels, 10), -11.5129).cuda()
            mel = torch.cat((mel, zero), axis=2)

            audio = model(mel)
            audio = audio.squeeze() # collapse all dimension except time axis
            audio = audio[:-(hp.audio.hop_length*10)]
            audio = MAX_WAV_VALUE * audio
            audio = audio.clamp(min=-MAX_WAV_VALUE, max=MAX_WAV_VALUE)
            audio = audio.short()
            audio = audio.cpu().detach().numpy()

            out_path = melpath.replace('.mel', '_reconstructed_epoch%04d.wav' % checkpoint['epoch'])
            write(out_path, hp.audio.sampling_rate, audio)
Example #9
0
    def __init__(self,
                 width,
                 height,
                 ichan,
                 ochan,
                 l1_weight=100.,
                 lr=0.0002,
                 beta1=0.5):
        # def __init__(self, width, height, ichan, ochan, l1_weight=1000., lr=0.0002, beta1=0.5): # I enlarge the l1_weight to see what will happen
        """
            width: image width in pixel.
            height: image height in pixel.
            ichan: number of channels used by input images.
            ochan: number of channels used by output images.
            l1_weight: L1 loss weight.
            lr: learning rate for ADAM optimizer.
            beta1: beta1 parameter for ADAM optimizer.
        """
        self._is_training = tf.placeholder(tf.bool)

        self._g_inputs = tf.placeholder(tf.float32,
                                        [None, width, height, ichan])
        self._d_inputs_a = tf.placeholder(tf.float32,
                                          [None, width, height, ichan])
        self._d_inputs_b = tf.placeholder(tf.float32,
                                          [None, width, height, ochan])
        self._g = Generator(self._g_inputs, self._is_training, ochan)
        self._real_d = Discriminator(
            tf.concat([self._d_inputs_a, self._d_inputs_b], axis=3),
            self._is_training)
        self._fake_d = Discriminator(tf.concat(
            [self._d_inputs_a, self._g._decoder['cl9']['fmap']], axis=3),
                                     self._is_training,
                                     reuse=True)

        self._g_loss = -tf.reduce_mean(
            tf.log(self._fake_d._discriminator['l5']['fmap'])
        ) + l1_weight * tf.reduce_mean(
            tf.abs(self._d_inputs_b - self._g._decoder['cl9']['fmap']))
        self._d_loss = -tf.reduce_mean(
            tf.log(self._real_d._discriminator['l5']['fmap']) +
            tf.log(1.0 - self._fake_d._discriminator['l5']['fmap']))
        # self._g_loss = tf.reduce_mean(tf.square(self._d_inputs_b - self._g._decoder['cl9']['fmap'])) # set g_loss = mse loss !!!!!
        # self._d_loss = -tf.reduce_mean(tf.log(self._real_d._discriminator['l5']['fmap']) + tf.log(1.0 - self._fake_d._discriminator['l5']['fmap']))

        g_update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS, scope='G')
        with tf.control_dependencies(g_update_ops):
            self._g_train_step = tf.train.AdamOptimizer(
                lr, beta1=beta1).minimize(self._g_loss,
                                          var_list=tf.get_collection(
                                              tf.GraphKeys.TRAINABLE_VARIABLES,
                                              scope='G'))

        d_update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS, scope='D')
        with tf.control_dependencies(d_update_ops):
            self._d_train_step = tf.train.AdamOptimizer(
                lr, beta1=beta1).minimize(self._d_loss,
                                          var_list=tf.get_collection(
                                              tf.GraphKeys.TRAINABLE_VARIABLES,
                                              scope='D'))
Example #10
0
def main():

    if __name__ == '__main__':
        writer = tensorboard.SummaryWriter(log_dir='./logs')
        device = torch.device(
            'cuda:0') if torch.cuda.is_available() else torch.device('cpu')
        vggloss = VGGLosses(device=device).to(device)

        dataset = Dataset(root='dataset/Shinkai',
                          style_transform=transform,
                          smooth_transform=transform)

        dataloader = DataLoader(dataset, batch_size=16, shuffle=True)

        G = Generator().to(device)
        D = PatchDiscriminator().to(device)

        G.apply(weights_init)
        D.apply(weights_init)

        optimizer_G = optim.Adam(G.parameters(), lr=0.0001)  #Based on paper
        optimizer_D = optim.Adam(D.parameters(), lr=0.0004)  #Based on paper

        init_train(20, lr=0.1, con_weight=1.0)
        train(epoch=10, con_weight=1.2, gra_weight=2., col_weight=10.)
Example #11
0
def transformer(args):
    d_model = args.d_model
    d_hidden = args.d_hidden
    n_heads = args.n_heads
    src_vocab = args.src_vocab
    trg_vocab = args.trg_vocab
    n_layers = args.n_layers
    input_dp = args.input_drop_ratio
    model_dp = args.drop_ratio
    src_emb_pos = nn.Sequential(Embeddings(d_model, src_vocab), PositionalEncoding(d_model, input_dp))
    enc_dp = args.enc_dp
    dec_dp = args.dec_dp

    encoder = GATEncoder(d_model, d_hidden, n_heads, enc_dp, args.n_enclayers)
    tgt_emb_pos = nn.Sequential(Embeddings(d_model, trg_vocab), PositionalEncoding(d_model, input_dp))
    decoder = Decoder(clone(DecoderLayer(d_model, n_heads, d_hidden, dec_dp), n_layers))
    generator = Generator(d_model, trg_vocab)
    model = EncoderDecoder(encoder, decoder, src_emb_pos, tgt_emb_pos, generator)

    if args.share_vocab:
        model.src_embed[0].lut.weight = model.tgt_embed[0].lut.weight

    if args.share_embed:
        model.generator.proj.weight = model.tgt_embed[0].lut.weight

    return model
def test(config):
    print(config)

    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')

    transform = transforms.Compose([
        transforms.Resize((config.image_size, config.image_size)),
        transforms.ToTensor(),
    ])

    test_dataset = TestDataset(test_dir=config.test_dir, transforms=transform)
    test_loader = torch.utils.data.DataLoader(test_dataset, batch_size=1)

    generator = Generator(image_size=config.image_size).to(device)

    if config.load_model:
        generator.load_state_dict(torch.load(config.load_model))

    if not exists(config.result_path):
        os.makedirs(config.result_path)

    generator.eval()
    if not exists(config.result_path):
        os.makedirs(config.result_path)
    for i, data in enumerate(test_loader):
        source = data['source'].to(device).float()
        fake = generator(source)
        plt.subplot(121)
        plt.imshow(tensor2image(source))
        plt.title('source')
        plt.subplot(122)
        plt.imshow(tensor2image(fake))
        plt.title('fake')
        plt.savefig(join(config.result_path, 'step-%d.png' % (i + 1)))
    generator.train()
Example #13
0
def make_model(src_vocab,
               tgt_vocab,
               N=6,
               d_model=512,
               d_ff=2048,
               h=8,
               dropout=0.1):
    c = copy.deepcopy
    attn = MultiHeadedAttention(h, d_model).to(args.device)
    ff = PositionwiseFeedForward(d_model, d_ff, dropout).to(args.device)
    position = PositionalEncoding(d_model, dropout).to(args.device)
    model = Transformer(
        Encoder(
            EncoderLayer(d_model, c(attn), c(ff), dropout).to(args.device),
            N).to(args.device),
        Decoder(
            DecoderLayer(d_model, c(attn), c(attn), c(ff),
                         dropout).to(args.device), N).to(args.device),
        nn.Sequential(
            Embeddings(d_model, src_vocab).to(args.device), c(position)),
        nn.Sequential(
            Embeddings(d_model, tgt_vocab).to(args.device), c(position)),
        Generator(d_model, tgt_vocab)).to(args.device)

    # This was important from their code.
    # Initialize parameters with Glorot / fan_avg.
    for p in model.parameters():
        if p.dim() > 1:
            nn.init.xavier_uniform_(p)
    return model.to(args.device)
Example #14
0
def main(args):
    checkpoint = torch.load(args.checkpoint_path)
    if args.config is not None:
        hp = HParam(args.config)
    else:
        hp = load_hparam_str(checkpoint['hp_str'])

    model = Generator(hp.audio.n_mel_channels).cuda()

    model.load_state_dict(checkpoint['model_g'])
    model.eval(inference=True)

    with torch.no_grad():
        mel = torch.from_numpy(np.load(args.input))
        if len(mel.shape) == 2:
            mel = mel.unsqueeze(0)
        mel = mel.cuda()
        audio = model.inference(mel)
        # For multi-band inference
        print(audio.shape)
        audio = audio.squeeze(0)  # collapse all dimension except time axis
        if args.d:
            denoiser = Denoiser(model).cuda()
            audio = denoiser(audio, 0.1)
        audio = audio.squeeze()
        audio = audio[:-(hp.audio.hop_length * 10)]
        audio = MAX_WAV_VALUE * audio
        audio = audio.clamp(min=-MAX_WAV_VALUE, max=MAX_WAV_VALUE - 1)
        audio = audio.short()
        audio = audio.cpu().detach().numpy()

        out_path = args.input.replace(
            '.npy', '_hifi_GAN_epoch%04d.wav' % checkpoint['epoch'])
        write(out_path, hp.audio.sampling_rate, audio)
Example #15
0
    def load_gan(self):
        # Get the restored generator
        self.G = Generator(0, 0, restore=True)
        # Use generator's latent size
        self.latent_size = self.G.z_length

        # Get the restored discriminator
        self.D = Discriminator(0, restore=True)
Example #16
0
    def __init__(self, **kwargs):

        super(Prawler, self).__init__(**kwargs)

        self.toolbox = None
        self.field = Field()
        self.generator = Generator(field=self.field)
        self.relation_manager = RelationManager(field=self.field)
        self.drag_manager = DragManager(field=self.field)
    def prepare_models(config):
        discriminator = Discriminator.create_model(
            config["model"]["discriminator"])
        ModelInitializator._init_model(discriminator, "discriminator", config)
        generator = Generator.create_model(config["model"]["generator"])
        ModelInitializator._init_model(generator, "generator", config)
        gan = Gan.create_model(discriminator, generator, config)
        ModelInitializator._init_model(gan, "gan", config)

        return generator, discriminator, gan
Example #18
0
def melgan(model_name='nvidia_tacotron2_LJ11_epoch3200', pretrained=True, progress=True):
    params = model_params[model_name]
    model = Generator(params['mel_channel'])

    if pretrained:
        state_dict = torch.hub.load_state_dict_from_url(params['model_url'],
                                                        progress=progress)
        model.load_state_dict(state_dict['model_g'])

    model.eval(inference=True)

    return model
Example #19
0
    def restore(self):
        print('Restoring model...')

        generator = Generator(self.resolution, self.batch_size)
        generator.restore()

        discriminator = Discriminator(self.resolution)
        discriminator.restore()

        self.restored = True

        self.initialize(generator, discriminator)
Example #20
0
def get_system(name, args, schema, model_path=None, timed=False):
    lexicon = Lexicon.from_pickle(args.lexicon)
    templates = Templates.from_pickle(args.templates)
    if name == 'rulebased':
        templates = Templates.from_pickle(args.templates)
        generator = Generator(templates)
        manager = Manager.from_pickle(args.policy)
        return RulebasedSystem(lexicon, generator, manager, timed)
    elif name == 'cmd':
        return CmdSystem()
    else:
        raise ValueError('Unknown system %s' % name)
Example #21
0
    def __init__(self, model_dir, g_optimizer, d_optimizer, lr, warmup,
                 max_iters):
        super().__init__()
        self.model_dir = model_dir
        if not os.path.exists(f'checkpoints/{model_dir}'):
            os.makedirs(f'checkpoints/{model_dir}')
        self.logs_dir = f'checkpoints/{model_dir}/logs'
        if not os.path.exists(self.logs_dir):
            os.makedirs(self.logs_dir)
        self.writer = SummaryWriter(self.logs_dir)

        self.arcface = ArcFaceNet(50, 0.6, 'ir_se').cuda()
        self.arcface.eval()
        self.arcface.load_state_dict(torch.load(
            'checkpoints/model_ir_se50.pth', map_location='cuda'),
                                     strict=False)

        self.mobiface = MobileFaceNet(512).cuda()
        self.mobiface.eval()
        self.mobiface.load_state_dict(torch.load(
            'checkpoints/mobilefacenet.pth', map_location='cuda'),
                                      strict=False)

        self.generator = Generator().cuda()
        self.discriminator = Discriminator().cuda()

        self.adversarial_weight = 1
        self.src_id_weight = 5
        self.tgt_id_weight = 1
        self.attributes_weight = 10
        self.reconstruction_weight = 10

        self.lr = lr
        self.warmup = warmup
        self.g_optimizer = g_optimizer(self.generator.parameters(),
                                       lr=lr,
                                       betas=(0, 0.999))
        self.d_optimizer = d_optimizer(self.discriminator.parameters(),
                                       lr=lr,
                                       betas=(0, 0.999))

        self.generator, self.g_optimizer = amp.initialize(self.generator,
                                                          self.g_optimizer,
                                                          opt_level="O1")
        self.discriminator, self.d_optimizer = amp.initialize(
            self.discriminator, self.d_optimizer, opt_level="O1")

        self._iter = nn.Parameter(torch.tensor(1), requires_grad=False)
        self.max_iters = max_iters

        if torch.cuda.is_available():
            self.cuda()
Example #22
0
    def new(self):
        print('Creating a new model...')

        generator = Generator(self.resolution, self.batch_size, depth=12)
        discriminator = Discriminator(self.resolution, depth=12)

        lr = 5e-5
        generator.model.optimizer = keras.optimizers.RMSprop(lr)
        discriminator.model.optimizer = keras.optimizers.RMSprop(lr)

        self.restored = False

        self.initialize(generator, discriminator)
Example #23
0
def get_system(name, args, schema=None, timed=False):
    lexicon = Lexicon(schema.values['owner'])
    if name == 'rulebased':
        templates = Templates.from_pickle(args.templates)
        generator = Generator(templates)
        manager = Manager.from_pickle(args.policy)
        return RulebasedSystem(lexicon, generator, manager, timed)
    elif name == 'cmd':
        return CmdSystem()
    # elif name == 'neural':
    #     return NeuralSystem(args.model_file, args.temperature, timed_session=timed, gpu=args.gpu)
    else:
        raise ValueError('Unknown system %s' % name)
Example #24
0
 def __init__(self, properties: SimulationProperties):
     self.__stats = Stats(properties.num_sources, properties.num_devices)
     self.__generators = [
         Generator(i, l, self.__stats)
         for i, l in enumerate(properties.source_lambdas)
     ]
     self.__devices = [
         Device(i, t, self.__stats)
         for i, t in enumerate(properties.device_taus)
     ]
     self.__buffer = Buffer(properties.buffer_capacity, self.__stats)
     self.__max_requests = properties.max_requests
     self.__global_time = 0
Example #25
0
def generate_fakes(config_path, num_fake_batches, checkpoint_dir, generated_images_dir):
    cfg = ModelConfig(config_path)

    discriminator = Discriminator(cfg)
    generator = Generator(cfg)

    # Initialize models
    generator(tf.ones([cfg.batch_size, cfg.latent_size]), tf.ones([cfg.batch_size, cfg.labels_size]))
    discriminator(tf.ones([cfg.batch_size, cfg.resolution, cfg.resolution,
                           cfg.num_channels]), tf.ones([cfg.batch_size, cfg.labels_size]))

    # Initialize checkpoint and manager
    checkpoint = tf.train.Checkpoint(discriminator=discriminator, generator=generator)
    manager = tf.train.CheckpointManager(
        checkpoint, checkpoint_dir, max_to_keep=10)

    if cfg.label_conditioning:
        fake_labels = np.zeros([cfg.batch_size, cfg.labels_size], dtype=np.float32)
        fake_labels[np.arange(cfg.batch_size), np.random.randint(cfg.labels_size, size=cfg.batch_size)] = 1.0
    random_input = tf.random.normal([cfg.batch_size, cfg.latent_size])

    for checkpoint_path in manager.checkpoints:
        print('Restoring checkpoint from {}'.format(checkpoint_path))
        checkpoint.restore(checkpoint_path).assert_consumed()
        fake_image_batches = []
        fake_labels_batches = []
        for _ in range(num_fake_batches):
            if cfg.label_conditioning:
                fake_labels_batch = np.zeros([cfg.batch_size, cfg.labels_size], dtype=np.float32)
                fake_labels_batch[np.arange(cfg.batch_size), np.random.randint(
                    cfg.labels_size, size=cfg.batch_size)] = 1.0
            else:
                fake_labels_batch = None
            random_input = tf.random.normal([cfg.batch_size, cfg.latent_size])

            fake_images_batch = generator(random_input, fake_labels_batch)

            fake_image_batches.append(fake_images_batch)
            fake_labels_batches.append(fake_labels_batch)

        fake_images = np.concatenate(fake_image_batches, axis=0)
        if cfg.label_conditioning:
            fake_labels = np.concatenate(fake_labels_batches, axis=0)
        else:
            fake_labels = None

        if not os.path.exists(generated_images_dir):
            os.makedirs(generated_images_dir)

        plot_batch(fake_images, fake_labels, generated_images_dir + '/checkpoint-' +
                   checkpoint_path.split('-')[-1], cfg.label_conditioning)
Example #26
0
    def init_single_layer_gan(self):
        generator = Generator(self.config).to(self.config.device)
        generator.apply(weights_init)
        if self.config.generator_path is not None:
            generator.load_state_dict(torch.load(self.config.generator_path))
        # print(generator)

        discriminator = ACMDiscriminator(self.config, self.config.num_heads, self.reals[len(self.Gs)]).to(self.config.device)
        discriminator.apply(weights_init)
        if self.config.discriminator_path is not None:
            discriminator.load_state_dict(torch.load(self.config.discriminator_path))
        print(discriminator)

        return discriminator, generator
Example #27
0
def get_system(name, args, schema=None, timed=False, model_path=None):
    lexicon = Lexicon(schema.values['item'])
    if name == 'rulebased':
        templates = Templates.from_pickle(args.templates)
        generator = Generator(templates)
        manager = Manager.from_pickle(args.policy)
        return RulebasedSystem(lexicon, generator, manager, timed)
    elif name == 'hybrid':
        assert model_path
        templates = Templates.from_pickle(args.templates)
        manager = PytorchNeuralSystem(args, schema, lexicon, model_path, timed)
        generator = Generator(templates)
        return HybridSystem(lexicon, generator, manager, timed)
    elif name == 'cmd':
        return CmdSystem()
    elif name == 'fb-neural':
        assert model_path
        return FBNeuralSystem(model_path, args.temperature, timed_session=timed, gpu=False)
    elif name == 'pt-neural':
        assert model_path
        return PytorchNeuralSystem(args, schema, lexicon, model_path, timed)
    else:
        raise ValueError('Unknown system %s' % name)
Example #28
0
    def build_base_component(self):
        # hyper params
        self.lr = tf.placeholder(tf.float32, shape=(),
                                 name='lr')  # learning rate
        self.dropout = tf.placeholder(tf.float32, shape=(), name='dropout')

        # input of the graph
        self.img = tf.placeholder(tf.uint8,
                                  shape=(None, None, None, 1),
                                  name='img')  # (N, H, W, C),这里C=1,因为是灰度图
        self.formula = tf.placeholder(tf.int32,
                                      shape=(None, None),
                                      name='formula')  # (N, formula_tokens)
        self.formula_length = tf.placeholder(tf.int32,
                                             shape=(None, ),
                                             name='formula_length')  # (N, 1)

        # self.pred_train, self.pred_test
        # tensorflow 只有静态计算图,只好同时把 train 和 test 部分的计算图都建了
        self.generator = Generator(self._config, self._vocab.n_tok,
                                   self._vocab.id_end)
        train, test = self.generator(self.img, self.formula, self.dropout)
        self.pred_train = train
        self.pred_test = test

        self.discriminator = Discriminator(self._config, self._vocab.n_tok)
        self.D_loss = self.discriminator(self.pred_test.ids, self.formula,
                                         self.dropout)
        self.D_optimizer = tf.train.AdamOptimizer().minimize(self.D_loss)

        # self.loss 生成器第一阶段的 loss
        losses = tf.nn.sparse_softmax_cross_entropy_with_logits(
            logits=self.pred_train, labels=self.formula)
        mask = tf.sequence_mask(self.formula_length)
        losses = tf.boolean_mask(losses, mask)
        self.loss = tf.reduce_mean(losses)

        # to compute perplexity for test
        self.ce_words = tf.reduce_sum(losses)  # sum of CE for each word
        self.n_words = tf.reduce_sum(self.formula_length)  # number of words

        # tensorboard
        tf.summary.image("img", self.img)
        tf.summary.scalar("learning_rate", self.lr)
        tf.summary.scalar("dropout", self.dropout)
        tf.summary.scalar("G_loss", self.loss)
        tf.summary.scalar("D_loss", self.D_loss)
        tf.summary.scalar("sum_of_CE_for_each_word", self.ce_words)
        tf.summary.scalar("number_of_words", self.n_words)
Example #29
0
def init(config, checkpoint_path, device="cuda"):
    checkpoint = torch.load(checkpoint_path)
    if config is not None:
        hp = HParam(config)
    else:
        hp = load_hparam_str(checkpoint['hp_str'])

    model = Generator(hp.audio.n_mel_channels,
                      hp.model.n_residual_layers,
                      ratios=hp.model.generator_ratio,
                      mult=hp.model.mult,
                      out_band=hp.model.out_channels).to(device)
    model.load_state_dict(checkpoint['model_g'])
    model.eval(inference=True)
    return hp, model
Example #30
0
    def init_models(self):
        generator = Generator(self.config).to(self.config.device)
        generator.apply(weights_init)
        if self.config.generator_path is not None:
            generator.load_state_dict(torch.load(self.config.generator_path))
        # print(generator)

        discriminator = Discriminator(self.config).to(self.config.device)
        discriminator.apply(weights_init)
        if self.config.discriminator_path is not None:
            discriminator.load_state_dict(
                torch.load(self.config.discriminator_path))
        # print(discriminator)

        return discriminator, generator