Пример #1
0
    def __init__(self,
                 num_emb,
                 emb_dim,
                 hidden_dim,
                 seq_len,
                 batch_size,
                 use_cuda,
                 test_mode=False):
        super(Generator_attention, self).__init__()
        # Constants Initialization
        self.SOS_Index = 0
        self.EOS_Index = 1
        self.PAD_Index = 2

        # Embeddings
        self.emb = nn.Embedding(num_emb, emb_dim)
        self.model = transformer.Transformer(self.emb,
                                             self.PAD_Index,
                                             self.emb.num_embeddings,
                                             max_seq_len=max(seq_len, seq_len))
        self.test_mode = test_mode
        if not test_mode:
            self.data_loader = GenDataIter('inshorts_test/real.data',
                                           batch_size)
            self.data_loader.reset()
        """
    def calc_bleu(self):
        data_file = '../seq_gan_with_attention/real.data'

        # metadata = load_vocab(self.CHECKPOINT_PATH + '/seq_gan/metadata.data')
        # idx_to_word = metadata['idx_to_word']
        train_data_iter = GenDataIter(data_file, 1)
        reference = self.get_references(train_data_iter)

        # pred = self.test_predict(self.seq_gan, train_data_iter, self.seq_gan_metadata)
        train_data_iter = GenDataIter(data_file, 1)
        candidates_ga = self.test_predict(self.gen_attention, train_data_iter,
                                          self.seq_gan_metadata)

        train_data_iter = GenDataIter(data_file, 1)
        candidates_sg = self.test_predict(self.seq_gan, train_data_iter,
                                          self.seq_gan_metadata)

        # train_data_iter = GenDataIter(data_file, 1)

        candidates_ao = []
        for sentence in reference:
            s = " ".join(sentence)

            # s = "kindergarten is great"
            sent = test_attention_only.test(s)
            candidates_ao.append(sent)

        # candidates_sg = self.test_predict(self.seq_gan, train_data_iter, self.seq_gan_metadata)

        # CALC BLEU

        # references =
        # candidates_words = []
        # for id in candidates:
        #     candidates_words.append(idx_to_word[])
        # load
        # candidatates_id to words
        # [0, 12] => [['a', 'aaa']]

        sen_score = 0
        for sent in candidates_ga:
            sen_score += sentence_bleu(reference, sent)
        print("@@@@@@@@@@@@@@@@", sen_score)  # / len(candidates_ga))

        # print('Individual 3-gram: %f' % corpus_bleu(reference, candidates_ga, weights=(1, 0, 0, 0)))
        print('Individual 3-gram: %f' % sen_score)
        # print('Individual 4-gram: %f' % corpus_bleu(reference, candidates_ga, weights=(1, 0, 0, 0)))
        print('Individual 3-gram: %f' %
              corpus_bleu(reference, candidates_sg, weights=(1, 0, 0, 0)))
        # print('Individual 4-gram: %f' % corpus_bleu(reference, candidates_sg, weights=(1, 0, 0, 0)))
        print('Individual 3-gram: %f' %
              corpus_bleu(reference, candidates_ao, weights=(1, 0, 0, 0)))
def MLE(epochs, generator_):

    generator_loss_mle = []
    true_loss_mle = []
    generator = copy.deepcopy(generator_)
    if opt.cuda:
        generator = generator.cuda()

    gen_criterion = nn.NLLLoss(reduction='sum')
    gen_optimizer = optim.Adam(generator.parameters())
    if opt.cuda:
        gen_criterion = gen_criterion.cuda()

    for epoch in range(epochs):
        loss = train_epoch(generator, gen_data_iter, gen_criterion,
                           gen_optimizer)
        print('Epoch [%d] Model Loss: %f' % (epoch, loss))
        generator_loss_mle.append(loss)
        generate_samples(generator, BATCH_SIZE, GENERATED_NUM, EVAL_FILE)
        eval_iter = GenDataIter(EVAL_FILE, BATCH_SIZE)
        loss = eval_epoch(target_lstm, eval_iter, gen_criterion)
        true_loss_mle.append(loss)
        print('Epoch [%d] True Loss: %f' % (epoch, loss))

    return true_loss_mle, generator_loss_mle, generator
Пример #4
0
def demo():
    idx_to_word, word_to_idx, VOCAB_SIZE = load_vocab(CHECKPOINT_PATH)
    test_iter = GenDataIter(TEST_FILE, BATCH_SIZE)
    generator = Generator(VOCAB_SIZE, g_emb_dim, g_hidden_dim, opt.cuda)
    generator = generator.cuda()
    generator.load_state_dict(torch.load(CHECKPOINT_PATH + 'generator.model'))
    test_predict(generator, test_iter, idx_to_word)
    def demo(self, model, metadata, test_sentence):
        # idx_to_word, word_to_idx, VOCAB_SIZE = load_vocab(CHECKPOINT_PATH)
        self.test_sentence = test_sentence
        self.max_test_sentence_len = 10

        # Get padded sentencels
        self.padded_sentence = pad_sentences(self.test_sentence,
                                             self.max_test_sentence_len)
        # print(self.padded_sentence)

        # Get ids of padded sentence
        padded_sent_ids = get_ids(self.padded_sentence,
                                  metadata['idx_to_word'],
                                  metadata['word_to_idx'],
                                  metadata['vocab_size'])
        # print(padded_sent_ids)

        # print("&"*80, padded_sent_ids)
        # return [["NONE"]]
        # Write to temporary file
        out_file = self.TEST_FILE
        fp = open(out_file, "w")
        fp.writelines(["%s " % item for item in padded_sent_ids])
        fp.close()

        test_iter = GenDataIter(self.TEST_FILE, 1)
        return self.test_predict(model, test_iter, metadata)
Пример #6
0
def demo():
    idx_to_word, word_to_idx, VOCAB_SIZE = load_vocab(CHECKPOINT_PATH)
    test_iter = GenDataIter(TEST_FILE, 1)
    generator = Generator(VOCAB_SIZE, g_emb_dim, g_hidden_dim, g_sequence_len,
                          BATCH_SIZE, opt.cuda)
    generator = generator.cuda()
    generator.load_state_dict(
        torch.load(CHECKPOINT_PATH + 'generator.model',
                   map_location={'cuda:1': 'cpu'}))
    return test_predict(generator, test_iter, idx_to_word)
Пример #7
0
def demo():
    metadata = load_vocab(CHECKPOINT_PATH)
    idx_to_word=metadata['idx_to_word']
    word_to_idx=metadata['word_to_idx']
    VOCAB_SIZE=metadata['vocab_size']
    test_iter = GenDataIter(TEST_FILE, BATCH_SIZE)
    generator = Generator(VOCAB_SIZE, g_emb_dim, g_hidden_dim, opt.cuda)
    if opt.cuda:
        generator = generator.cuda()
    generator.load_state_dict(torch.load(CHECKPOINT_PATH + 'generator_seqgan.model'))
    #test_predict(generator, test_iter, idx_to_word)
    generate_samples(generator, BATCH_SIZE, GENERATED_NUM, NEGATIVE_FILE)

    show_some_generated_sequences(idx_to_word, 100, NEGATIVE_FILE)
def training_fucntion_generator(conf_data):
    """Training Process for generator network.
    
    Parameters
    ----------
    conf_data: dict
        Dictionary containing all parameters and objects.       

    Returns
    -------
    conf_data: dict
        Dictionary containing all parameters and objects.       

    """
    PRE_EPOCH_NUM = 2
    seq = conf_data['GAN_model']['seq']
    BATCH_SIZE = 64
    GENERATED_NUM = 10000 
    EVAL_FILE = 'eval.data'
    POSITIVE_FILE = 'real.data'
    NEGATIVE_FILE = 'gene.data'
    
    classes = int(conf_data['GAN_model']['classes'])
    w_loss = int(conf_data['GAN_model']['w_loss'])
    g_loss_func = conf_data['generator_loss']
    
    epoch = conf_data['epoch']
    epochs = conf_data['epochs']

    generator = conf_data['generator_model']
    discriminator = conf_data['discriminator_model']
    optimizer_G = conf_data['generator_optimizer']
    mini_batch_size = (conf_data['GAN_model']['mini_batch_size'])

    optimizer_G.zero_grad()

    # Generate a batch of images
    if seq == 0:
        valid = conf_data['valid']
        gen_imgs = conf_data['gen_imgs']
        z = conf_data['noise']
        if classes <= 0:
            #gen_imgs = generator(z)
            # Loss measures generator's ability to fool the discriminator
            validity = discriminator(gen_imgs)
        elif classes > 0:
            gen_labels = conf_data['gen_labels']
            #gen_imgs = generator(z,gen_labels)
            validity = discriminator(gen_imgs, gen_labels)
           
        if w_loss == 1:
            g_loss = -g_loss_func.loss(validity,valid)
        elif w_loss == 0:
            g_loss = g_loss_func.loss(validity,valid) 
        conf_data['g_loss'] = g_loss
        g_loss.backward()
        optimizer_G.step()
    elif seq == 1:
        #print ("Reached Here 3 ---------> ")
        gen_gan_loss = GANLoss()
        rollout = conf_data['rollout']
        target_lstm = conf_data['target_lstm']
        for it in range(1):
            samples = generator.sample(mini_batch_size, conf_data['generator']['sequece_length'])
            # construct the input to the genrator, add zeros before samples and delete the last column
            zeros = torch.zeros((mini_batch_size, 1)).type(torch.LongTensor)
            if samples.is_cuda:
                zeros = zeros.cuda()
            inputs = Variable(torch.cat([zeros, samples.data], dim = 1)[:, :-1].contiguous())
            targets = Variable(samples.data).contiguous().view((-1,))
            # calculate the reward
            rewards = rollout.get_reward(samples, 16, discriminator)
            rewards = Variable(torch.Tensor(rewards))
            if conf_data['cuda']:
                rewards = torch.exp(rewards.cuda()).contiguous().view((-1,))
            prob = generator.forward(inputs)
            rewards = rewards.contiguous().view(-1,)
            loss = gen_gan_loss(prob, targets, rewards)
            optimizer_G.zero_grad()
            loss.backward()
            optimizer_G.step()
        #TODO : Change back. Uncomment and indent till line above to rollout
        #if total_batch % 1 == 0 or total_batch == TOTAL_BATCH - 1:
        generate_samples(generator, mini_batch_size, GENERATED_NUM, EVAL_FILE,conf_data)
        #print ("Reached Here 4 ---------> ")
        eval_iter = GenDataIter(EVAL_FILE, mini_batch_size)
        #print ("Reached Here 5 ---------> ")
        loss = eval_epoch(target_lstm, eval_iter, g_loss_func,conf_data)
        conf_data['g_loss']= loss
        #print ("Reached Here 6 ---------> ")
       #print('Batch [%d] True Loss: %f' % (total_batch, loss))
        rollout.update_params()

    #g_loss = g_loss_func.loss(validity, valid)

    # print ("[Epoch %d/%d] [Batch %d/%d] [D loss: %f] [G loss: %f]" % (epoch, epochs, conf_data['iterator'], 5,
    #                                                 conf_data['d_loss'].item(), g_loss.item()))
    if seq == 0:
        print ("[Epoch %d/%d] [Batch %d/%d] [D loss: %f] [G loss: %f]" % (epoch, epochs, conf_data['iterator'], len(conf_data['data_learn']),
                                                       conf_data['d_loss'].item(), g_loss.item()))
    elif seq == 1:
        print("[Epoch %d/%d] [Batch %d] [D loss: %f] [G loss: %f]"% (epoch, epochs, conf_data['iterator'],
                                                       conf_data['d_loss'], conf_data['g_loss']))
    #print ("Done")

    conf_data['generator_model'] = generator
    conf_data['generator_optimizer'] = optimizer_G

    conf_data['discriminator_model'] = discriminator
    conf_data['generator_loss'] = g_loss_func
    if seq == 1:
        conf_data['rollout'] = rollout
    return conf_data
Пример #9
0
class Generator_attention(nn.Module):
    """Generator """
    def __init__(self,
                 num_emb,
                 emb_dim,
                 hidden_dim,
                 seq_len,
                 batch_size,
                 use_cuda,
                 test_mode=False):
        super(Generator_attention, self).__init__()
        # Constants Initialization
        self.SOS_Index = 0
        self.EOS_Index = 1
        self.PAD_Index = 2

        # Embeddings
        self.emb = nn.Embedding(num_emb, emb_dim)
        self.model = transformer.Transformer(self.emb,
                                             self.PAD_Index,
                                             self.emb.num_embeddings,
                                             max_seq_len=max(seq_len, seq_len))
        self.test_mode = test_mode
        if not test_mode:
            self.data_loader = GenDataIter('inshorts_test/real.data',
                                           batch_size)
            self.data_loader.reset()
        """
        self.num_emb = num_emb
        self.emb_dim = emb_dim
        self.hidden_dim = hidden_dim
        self.use_cuda = use_cuda
        self.emb = nn.Embedding(num_emb, emb_dim)
        self.lstm = nn.LSTM(emb_dim, hidden_dim, batch_first=True)
        self.lin = nn.Linear(hidden_dim, num_emb)
        self.softmax = nn.LogSoftmax()
        self.init_params()
        """

    def forward(self, x):
        """
        Args:
            x: (batch_size, seq_len), sequence of tokens generated by generator
        """
        return self.model(x, x)
        """
        emb = self.emb(x)
        h0, c0 = self.init_hidden(x.size(0))
        output, (h, c) = self.lstm(emb, (h0, c0))
        pred = self.softmax(self.lin(output.contiguous().view(-1, self.hidden_dim)))
        return pred
        """

    def step(self, x, h, c):
        """
        Args:
            x: (batch_size,  1), sequence of tokens generated by generator
            h: (1, batch_size, hidden_dim), lstm hidden state
            c: (1, batch_size, hidden_dim), lstm cell state
        """
        emb = self.emb(x)
        output, (h, c) = self.lstm(emb, (h, c))
        pred = F.softmax(self.lin(output.view(-1, self.hidden_dim)))
        return pred, h, c

    def init_hidden(self, batch_size):
        h = Variable(torch.zeros((1, batch_size, self.hidden_dim)))
        c = Variable(torch.zeros((1, batch_size, self.hidden_dim)))
        if self.use_cuda:
            h, c = h.cuda(), c.cuda()
        return h, c

    def init_params(self):
        for param in self.parameters():
            param.data.uniform_(-0.05, 0.05)

    def sample(self, batch_size, seq_len, x=torch.tensor([])):
        if self.test_mode:
            print('In Test mode')
            return None

        if self.data_loader.idx >= self.data_loader.data_num:
            self.data_loader.reset()
        if len(x.shape) > 1:
            input_seq = x
        else:
            input_seq = self.data_loader.next()[0]
        input_seq = input_seq.cuda()
        sampled_output = transformer.sample_output(self.model, input_seq,
                                                   self.EOS_Index,
                                                   self.PAD_Index,
                                                   input_seq.shape[1])
        return sampled_output
        """
def PG_BLEU(generator_):
    def read_file(data_file):
        with open(data_file, 'r') as f:
            lines = f.readlines()
        lis = []
        for line in lines:
            l = line.strip().split(' ')
            l = [int(s) for s in l]
            lis.append(l)
        return lis

    lines = read_file(POSITIVE_FILE)
    train_dataset = AmazonReviewGloveDataset('train_small.csv')
    vocab = train_dataset.indexer.index_to_word[1::]
    del train_dataset

    refrences = []
    for line in lines:
        phrase = []
        for char in line:
            phrase.append(vocab[char])

        #refrences.append(' '.join(phrase))
        refrences.append(phrase)

    hypotheses = []
    for line in lines[:3]:
        phrase = []
        for char in line:
            phrase.append(vocab[char])

        #hypotheses.append(' '.join(phrase))
        hypotheses.append(phrase)

    BLEUscore = nltk.translate.bleu_score.sentence_bleu(
        refrences, hypotheses[0])

    nb_batch_per_epoch = int(GENERATED_NUM / BATCH_SIZE)

    # Define Networks
    generator = copy.deepcopy(generator_)
    discriminator = Discriminator(d_num_class, VOCAB_SIZE, d_emb_dim,
                                  d_filter_sizes, d_num_filters, d_dropout)
    #target_lstm = TargetLSTM(VOCAB_SIZE, g_emb_dim, g_hidden_dim, opt.cuda)
    if opt.cuda:
        generator = generator.cuda()
        discriminator = discriminator.cuda()
        #target_lstm = target_lstm.cuda()
    # Generate toy data using target lstm
    #print('Generating data ...')
    #generate_samples(target_lstm, BATCH_SIZE, GENERATED_NUM, POSITIVE_FILE)

    # Load data from file
    #gen_data_iter = GenDataIter(POSITIVE_FILE, BATCH_SIZE)

    # Pretrain Generator using MLE
    gen_criterion = nn.NLLLoss(reduction='sum')
    gen_optimizer = optim.Adam(generator.parameters())
    if opt.cuda:
        gen_criterion = gen_criterion.cuda()
    print('Pretrain with MLE ...')
    for epoch in range(40):
        loss = train_epoch(generator, gen_data_iter, gen_criterion,
                           gen_optimizer)
        print('Epoch [%d] Model Loss: %f' % (epoch, loss))
        generator_loss.append(loss)
        generate_samples(generator, BATCH_SIZE, GENERATED_NUM, EVAL_FILE)
        eval_iter = GenDataIter(EVAL_FILE, BATCH_SIZE)
        loss = eval_epoch(target_lstm, eval_iter, gen_criterion)
        true_loss.append(loss)
        print('Epoch [%d] True Loss: %f' % (epoch, loss))

    # Pretrain Discriminator
    dis_criterion = nn.NLLLoss(reduction='sum')
    dis_optimizer = optim.Adam(discriminator.parameters())
    if opt.cuda:
        dis_criterion = dis_criterion.cuda()
    print('Pretrain Discriminator ...')
    for epoch in range(1):
        generate_samples(generator, BATCH_SIZE, GENERATED_NUM, NEGATIVE_FILE)
        dis_data_iter = DisDataIter(POSITIVE_FILE, NEGATIVE_FILE, BATCH_SIZE)
        for _ in range(1):
            loss = train_epoch(discriminator, dis_data_iter, dis_criterion,
                               dis_optimizer)
            disc_loss.append(loss)
            print('Epoch [%d], loss: %f' % (epoch, loss))
    # Adversarial Training
    rollout = bleu_Rollout(generator, 0.8, refrences, vocab)
    print('#####################################################')
    print('Start Adeversatial Training...\n')
    gen_gan_loss = GANLoss()
    gen_gan_optm = optim.Adam(generator.parameters())
    if opt.cuda:
        gen_gan_loss = gen_gan_loss.cuda()

    for total_batch in range(TOTAL_BATCH):
        ## Train the generator for one step
        #nb_batch_per_epoch
        for it in range(1):
            #print(it)
            samples = generator.sample(BATCH_SIZE, g_sequence_len)
            # construct the input to the genrator, add zeros before samples and delete the last column
            zeros = torch.zeros((BATCH_SIZE, 1)).type(torch.LongTensor)
            if samples.is_cuda:
                zeros = zeros.cuda()
            inputs = Variable(
                torch.cat([zeros, samples.data], dim=1)[:, :-1].contiguous())
            targets = Variable(samples.data).contiguous().view((-1, ))
            # calculate the reward
            rewards = rollout.get_reward(samples, 2)
            rewards = Variable(torch.Tensor(rewards))
            rewards = torch.exp(rewards).contiguous().view((-1, ))
            if opt.cuda:
                rewards = rewards.cuda()
            prob = generator.forward(inputs)
            loss = gen_gan_loss(prob, targets, rewards)
            gen_gan_optm.zero_grad()
            loss.backward()
            gen_gan_optm.step()

        if total_batch % 1 == 0 or total_batch == TOTAL_BATCH - 1:
            generate_samples(generator, BATCH_SIZE, GENERATED_NUM, EVAL_FILE)
            eval_iter = GenDataIter(EVAL_FILE, BATCH_SIZE)
            loss = eval_epoch(target_lstm, eval_iter, gen_criterion)
            true_loss.append(loss)
            print('Batch [%d] True Loss: %f' % (total_batch, loss))
            loss_gen = eval_epoch(generator, gen_data_iter, gen_criterion)
            print('Epoch [%d] Model Loss: %f' % (total_batch, loss_gen))
            generator_loss.append(loss_gen)
        rollout.update_params()

        for _ in range(4):
            generate_samples(generator, BATCH_SIZE, GENERATED_NUM,
                             NEGATIVE_FILE)
            dis_data_iter = DisDataIter(POSITIVE_FILE, NEGATIVE_FILE,
                                        BATCH_SIZE)
            for _ in range(2):
                loss = train_epoch(discriminator, dis_data_iter, dis_criterion,
                                   dis_optimizer)
                disc_loss.append(loss)

    perf_dict_pgbleu['true_loss'] = true_loss
    perf_dict_pgbleu['generator_loss'] = generator_loss
    perf_dict_pgbleu['disc_loss'] = disc_loss
    np.save('perf_dict', perf_dict)
Пример #11
0
    dis_adversarial_train_acc = []
    dis_adversarial_eval_loss = []
    dis_adversarial_eval_acc = []

    # Generate toy data using target LSTM
    print('#####################################################')
    print('Generating data ...')
    print('#####################################################\n\n')
    generate_samples(target_lstm, args.batch_size, args.n_samples,
                     POSITIVE_FILE)

    # Pre-train generator using MLE
    print('#####################################################')
    print('Start pre-training generator with MLE...')
    print('#####################################################\n')
    gen_data_iter = GenDataIter(POSITIVE_FILE, args.batch_size)
    for i in range(args.g_pretrain_steps):
        print("G-Step {}".format(i))
        train_generator_MLE(generator, gen_data_iter, nll_loss, gen_optimizer,
                            args.gk_epochs, gen_pretrain_train_loss, args)
        generate_samples(generator, args.batch_size, args.n_samples,
                         NEGATIVE_FILE)
        eval_iter = GenDataIter(NEGATIVE_FILE, args.batch_size)
        gen_loss = eval_generator(target_lstm, eval_iter, nll_loss, args)
        gen_pretrain_eval_loss.append(gen_loss)
        print("eval loss: {:.5f}\n".format(gen_loss))
    print('#####################################################\n\n')

    # Pre-train discriminator
    print('#####################################################')
    print('Start pre-training discriminator...')
Пример #12
0
def INIT_TRAIN():
    opinion = str(input("expected opinion: e.g. pos neg neu \n"))
    REF_SENT = str(input("enter one sentence representing the basic information of generated paragraph \n"))
    logging.basicConfig(level=logging.DEBUG, filename='train_init.log')
    VOCAB_SIZE, word_index_dict, index_word_dict,g_sequence_len = text_process(TRAIN_LOC)
    # Define Networks
    generator = Generator(VOCAB_SIZE, g_emb_dim, g_hidden_dim)
    discriminator_general = DiscriminatorGeneral(d_emb_dim, VOCAB_SIZE, general_D_hidden_dim_top, general_D_hidden_dim_base, d_dropout, BATCH_SIZE,general_D_hidden_dim_out)
    discriminator_special = DiscriminatorSpecial(feature_vec_len,hidden_dim_mlp,hidden_dim_out_layer)
    if torch.cuda.is_available():
        generator = generator.cuda()
        discriminator_general = discriminator_general.cuda()
        discriminator_special = discriminator_special.cuda()
    
    # Load data from file
    gen_data_iter = GenDataIter(POSITIVE_FILE, BATCH_SIZE)
    print('Pretrain Generator:')
    for epoch in range(PRE_EPOCH_NUM):
        time_before = time.time()
        loss = pretrain_generator_epoch(generator, gen_data_iter)
        time_behind = time.time()
        print('Pretrain Generator: Epoch [%d] ; Loss: %f ; Time: %f'% (epoch, loss, time_behind-time_before))
        logging.info('Pretrain Generator: Epoch [%d] ; Loss: %f ; Time: %f'% (epoch, loss, time_behind-time_before))

    print('Pretrain Discriminator-General:')
    for epoch in range(3):
        generate_samples(generator, BATCH_SIZE, GENERATED_NUM, NEGATIVE_FILE,index_word_dict,g_sequence_len,"general")
        dis_data_iter = DisGenDataIter(POSITIVE_FILE, NEGATIVE_FILE, BATCH_SIZE)
        for _ in range(3):
            time_before = time.time()
            loss = discriminator_gen_epoch(discriminator_general, dis_data_iter)
            time_behind = time.time()
            print('Pretrain Discriminator-General: Epoch [%d] ; Loss: %f ; Time: %f' % (epoch, loss, time_behind-time_before))
            logging.info('Pretrain Discriminator-General: Epoch [%d] ; Loss: %f ; Time: %f' % (epoch, loss, time_behind-time_before))           
    
    print('Pretrain Discriminator-Special:')
    for epoch in range(5):
        generate_samples(generator, BATCH_SIZE, GENERATED_NUM, NEGATIVE_FILE_SPE,index_word_dict,g_sequence_len,"special")
        dis_data_iter = DisSpeDataIter(NEGATIVE_FILE_SPE, BATCH_SIZE, REF_SENT, g_sequence_len, opinion, length_control, synthetic_num)
        time_before = time.time()
        loss = discriminator_spe_epoch(discriminator_special, dis_data_iter)
        time_behind = time.time()
        print('Pretrain Discriminator-Special: Epoch [%d] ; Loss: %f ; Time: %f' % (epoch, loss, time_behind-time_before))
        logging.info('Pretrain Discriminator-Special: Epoch [%d] ; Loss: %f ; Time: %f' % (epoch, loss, time_behind-time_before))

    # Adversarial Training
    rollout = Rollout(generator, 0.8, theta)
    print('#####################################################')
    print('Start Adeversatial Training\n')
    gen_gan_loss = GANLoss()
    gen_gan_optm = optim.Adam(generator.parameters())
    if torch.cuda.is_available():
        gen_gan_loss = gen_gan_loss.cuda()
    gen_criterion = nn.NLLLoss(reduction='sum')
    if torch.cuda.is_available():
        gen_criterion = gen_criterion.cuda()
    (k,distance_left,distance_right) = dis_data_iter.get_para()
    for total_batch in range(TOTAL_TRAIN_NUM):
        ## Train the generator for one step
        for it in range(1):
            samples = generator.sample(BATCH_SIZE, g_sequence_len)
            # construct the input to the genrator, add zeros before samples and delete the last column
            zeros = torch.zeros((BATCH_SIZE, 1)).type(torch.LongTensor)
            if samples.is_cuda:
                zeros = zeros.cuda()
            inputs = Variable(torch.cat([zeros, samples.data], dim = 1)[:, :-1].contiguous())
            targets = Variable(samples.data).contiguous().view((-1,))
            # calculate the reward
            time_before = time.time()
            rewards = rollout.get_reward(samples, 16, discriminator_general, discriminator_special,k,distance_left,distance_right,REF_SENT,index_word_dict)
            rewards = Variable(torch.Tensor(rewards))
            rewards = torch.exp(rewards).contiguous().view((-1,))
            if torch.cuda.is_available():
                rewards = rewards.cuda()
            prob = generator.forward(inputs)
            loss = gen_gan_loss(prob, targets, rewards)
            time_behind = time.time()
            print("Train Generator: Epoch [%d] ; Loss: %f ; Time: %f"%(total_batch,loss, time_behind-time_before))
            logging.info('Train Generator: Epoch [%d] ; Loss: %f ; Time: %f' % (total_batch, loss, time_behind-time_before))            
            gen_gan_optm.zero_grad()
            loss.backward()
            gen_gan_optm.step()

        rollout.update_params()
        
        for epoch in range(2):
            generate_samples(generator, BATCH_SIZE, GENERATED_NUM, NEGATIVE_FILE,index_word_dict,g_sequence_len,"general")
            dis_data_iter = DisGenDataIter(POSITIVE_FILE, NEGATIVE_FILE, BATCH_SIZE)
            for _ in range(2):
                time_before = time.time()
                loss = discriminator_gen_epoch(discriminator_general, dis_data_iter)
                time_behind = time.time()
                print('Train Discriminator-General: Epoch [%d] ; Loss: %f ; Time: %f' % (epoch, loss, time_behind-time_before))
                logging.info('Pretrain Discriminator-General: Epoch [%d] ; Loss: %f ; Time: %f' % (epoch, loss, time_behind-time_before))

        for epoch in range(1):
            generate_samples(generator, BATCH_SIZE, GENERATED_NUM, NEGATIVE_FILE_SPE,index_word_dict,g_sequence_len,"special")
            dis_data_iter = DisSpeDataIter(NEGATIVE_FILE_SPE, BATCH_SIZE, REF_SENT, g_sequence_len, opinion, length_control, synthetic_num)
            time_before = time.time()
            loss = discriminator_spe_epoch(discriminator_special, dis_data_iter)
            time_behind = time.time()
            print('Train Discriminator-Special: Epoch [%d] ; Loss: %f ; Time: %f' % (epoch, loss, time_behind-time_before))
            logging.info('Train Discriminator-Special: Epoch [%d] ; Loss: %f ; Time: %f' % (epoch, loss, time_behind-time_before))
    torch.save(discriminator_general, 'DiscriminatorGeneral.pkl')    
Пример #13
0
def main():
    random.seed(SEED)
    np.random.seed(SEED)
    calc_bleu([1, 10, 12])
    exit()
    # Build up dataset
    s_train, s_test = load_from_big_file('../data/train_data_obama.txt')
    # idx_to_word: List of id to word
    # word_to_idx: Dictionary mapping word to id
    idx_to_word, word_to_idx = fetch_vocab(s_train, s_train, s_test)
    # TODO: 1. Prepare data for attention model
    # input_seq, target_seq = prepare_data(DATA_GERMAN, DATA_ENGLISH, word_to_idx)

    global VOCAB_SIZE
    VOCAB_SIZE = len(idx_to_word)

    save_vocab(CHECKPOINT_PATH + 'metadata.data', idx_to_word, word_to_idx,
               VOCAB_SIZE, g_emb_dim, g_hidden_dim, g_sequence_len)

    print('VOCAB SIZE:', VOCAB_SIZE)
    # Define Networks
    generator = Generator(VOCAB_SIZE, g_emb_dim, g_hidden_dim, g_sequence_len,
                          BATCH_SIZE, opt.cuda)
    discriminator = Discriminator(d_num_class, VOCAB_SIZE, d_emb_dim,
                                  d_filter_sizes, d_num_filters, d_dropout)
    target_lstm = TargetLSTM(VOCAB_SIZE, g_emb_dim, g_hidden_dim, opt.cuda)
    if opt.cuda:
        generator = generator.cuda()
        discriminator = discriminator.cuda()
        target_lstm = target_lstm.cuda()
    # Generate toy data using target lstm
    print('Generating data ...')
    generate_real_data('../data/train_data_obama.txt', BATCH_SIZE,
                       GENERATED_NUM, idx_to_word, word_to_idx, POSITIVE_FILE,
                       TEST_FILE)
    # Create Test data iterator for testing
    test_iter = GenDataIter(TEST_FILE, BATCH_SIZE)
    # generate_samples(target_lstm, BATCH_SIZE, GENERATED_NUM, POSITIVE_FILE, idx_to_word)

    # Load data from file
    gen_data_iter = GenDataIter(POSITIVE_FILE, BATCH_SIZE)

    # Pretrain Generator using MLE
    # gen_criterion = nn.NLLLoss(size_average=False)
    gen_criterion = nn.CrossEntropyLoss()
    gen_optimizer = optim.Adam(generator.parameters())
    if opt.cuda:
        gen_criterion = gen_criterion.cuda()
    print('Pretrain with MLE ...')
    for epoch in range(PRE_EPOCH_NUM):
        loss = train_epoch(generator, gen_data_iter, gen_criterion,
                           gen_optimizer)
        print('Epoch [%d] Model Loss: %f' % (epoch, loss))
        print('Training Output')
        test_predict(generator, test_iter, idx_to_word, train_mode=True)

        sys.stdout.flush()
        # TODO: 2. Flags to ensure dimension of model input is handled
        # generate_samples(generator, BATCH_SIZE, GENERATED_NUM, EVAL_FILE)
        """
        eval_iter = GenDataIter(EVAL_FILE, BATCH_SIZE)
        print('Iterator Done')
        loss = eval_epoch(target_lstm, eval_iter, gen_criterion)
        print('Epoch [%d] True Loss: %f' % (epoch, loss))
        """
    print('OUTPUT AFTER PRE-TRAINING')
    test_predict(generator, test_iter, idx_to_word, train_mode=True)

    # Pretrain Discriminator
    dis_criterion = nn.NLLLoss(size_average=False)
    dis_optimizer = optim.Adam(discriminator.parameters())
    if opt.cuda:
        dis_criterion = dis_criterion.cuda()
    print('Pretrain Discriminator ...')
    for epoch in range(3):
        generate_samples(generator, BATCH_SIZE, GENERATED_NUM, NEGATIVE_FILE)
        dis_data_iter = DisDataIter(POSITIVE_FILE, NEGATIVE_FILE, BATCH_SIZE)
        for _ in range(3):
            loss = train_epoch(discriminator, dis_data_iter, dis_criterion,
                               dis_optimizer)
            print('Epoch [%d], loss: %f' % (epoch, loss))
            sys.stdout.flush()
    # Adversarial Training
    rollout = Rollout(generator, 0.8)
    print('#####################################################')
    print('Start Adversarial Training...\n')
    gen_gan_loss = GANLoss()

    gen_gan_optm = optim.Adam(generator.parameters())
    if opt.cuda:
        gen_gan_loss = gen_gan_loss.cuda()
    gen_criterion = nn.NLLLoss(size_average=False)
    if opt.cuda:
        gen_criterion = gen_criterion.cuda()
    dis_criterion = nn.NLLLoss(size_average=False)
    dis_optimizer = optim.Adam(discriminator.parameters())
    if opt.cuda:
        dis_criterion = dis_criterion.cuda()
    real_iter = GenDataIter(POSITIVE_FILE, BATCH_SIZE)
    for total_batch in range(TOTAL_BATCH):
        ## Train the generator for one step
        for it in range(1):
            if real_iter.idx >= real_iter.data_num:
                real_iter.reset()
            inputs = real_iter.next()[0]
            inputs = inputs.cuda()
            samples = generator.sample(BATCH_SIZE, g_sequence_len, inputs)
            samples = samples.cpu()
            rewards = rollout.get_reward(samples, 16, discriminator)
            rewards = Variable(torch.Tensor(rewards))
            if opt.cuda:
                rewards = torch.exp(rewards.cuda()).contiguous().view((-1, ))
            prob = generator.forward(inputs)
            mini_batch = prob.shape[0]
            prob = torch.reshape(
                prob,
                (prob.shape[0] * prob.shape[1], -1))  #prob.view(-1, g_emb_dim)
            targets = copy.deepcopy(inputs).contiguous().view((-1, ))
            loss = gen_gan_loss(prob, targets, rewards)
            gen_gan_optm.zero_grad()
            loss.backward()
            gen_gan_optm.step()
            """
            samples = generator.sample(BATCH_SIZE, g_sequence_len)
            # construct the input to the genrator, add zeros before samples and delete the last column
            zeros = torch.zeros((BATCH_SIZE, 1)).type(torch.LongTensor)
            if samples.is_cuda:
                zeros = zeros.cuda()
            inputs = Variable(torch.cat([zeros, samples.data], dim = 1)[:, :-1].contiguous())
            targets = Variable(samples.data).contiguous().view((-1,))
            print('', inputs.shape, targets.shape)
            print(inputs, targets)
            # calculate the reward
            rewards = rollout.get_reward(samples, 16, discriminator)
            rewards = Variable(torch.Tensor(rewards))
            if opt.cuda:
                rewards = torch.exp(rewards.cuda()).contiguous().view((-1,))
            prob = generator.forward(inputs)
            mini_batch = prob.shape[0]
            prob = torch.reshape(prob, (prob.shape[0] * prob.shape[1], -1)) #prob.view(-1, g_emb_dim)
            loss = gen_gan_loss(prob, targets, rewards)
            gen_gan_optm.zero_grad()
            loss.backward()
            gen_gan_optm.step()
            """
        print('Batch [%d] True Loss: %f' % (total_batch, loss))

        if total_batch % 1 == 0 or total_batch == TOTAL_BATCH - 1:
            # generate_samples(generator, BATCH_SIZE, GENERATED_NUM, EVAL_FILE)
            # eval_iter = GenDataIter(EVAL_FILE, BATCH_SIZE)
            # loss = eval_epoch(target_lstm, eval_iter, gen_criterion)
            if len(prob.shape) > 2:
                prob = torch.reshape(prob, (prob.shape[0] * prob.shape[1], -1))
            predictions = torch.max(prob, dim=1)[1]
            predictions = predictions.view(mini_batch, -1)
            for each_sen in list(predictions):
                print('Train Output:',
                      generate_sentence_from_id(idx_to_word, each_sen))

            test_predict(generator, test_iter, idx_to_word, train_mode=True)
            torch.save(generator.state_dict(),
                       CHECKPOINT_PATH + 'generator.model')
            torch.save(discriminator.state_dict(),
                       CHECKPOINT_PATH + 'discriminator.model')
        rollout.update_params()

        for _ in range(4):
            generate_samples(generator, BATCH_SIZE, GENERATED_NUM,
                             NEGATIVE_FILE)
            dis_data_iter = DisDataIter(POSITIVE_FILE, NEGATIVE_FILE,
                                        BATCH_SIZE)
            for _ in range(2):
                loss = train_epoch(discriminator, dis_data_iter, dis_criterion,
                                   dis_optimizer)
Пример #14
0
def main():
    random.seed(SEED)
    np.random.seed(SEED)
    
    # Build up dataset
    s_train, s_test = load_from_big_file('../data/train_data_obama.txt')
    # idx_to_word: List of id to word
    # word_to_idx: Dictionary mapping word to id
    idx_to_word, word_to_idx = fetch_vocab(s_train, s_train, s_test)
    # TODO: 1. Prepare data for attention model
    # input_seq, target_seq = prepare_data(DATA_GERMAN, DATA_ENGLISH, word_to_idx)

    global VOCAB_SIZE
    VOCAB_SIZE = len(idx_to_word)

    print('VOCAB SIZE:' , VOCAB_SIZE)
    # Define Networks
    generator = Generator(VOCAB_SIZE, g_emb_dim, g_hidden_dim, g_sequence_len, BATCH_SIZE, opt.cuda)
    discriminator = Discriminator(d_num_class, VOCAB_SIZE, d_emb_dim, d_filter_sizes, d_num_filters, d_dropout)
    target_lstm = TargetLSTM(VOCAB_SIZE, g_emb_dim, g_hidden_dim, opt.cuda)
    if opt.cuda:
        generator = generator.cuda()
        discriminator = discriminator.cuda()
        target_lstm = target_lstm.cuda()
    # Generate toy data using target lstm
    print('Generating data ...')
    generate_samples(target_lstm, BATCH_SIZE, GENERATED_NUM, POSITIVE_FILE, idx_to_word)
    
    # Load data from file
    gen_data_iter = GenDataIter(POSITIVE_FILE, BATCH_SIZE)

    # Pretrain Generator using MLE
    gen_criterion = nn.NLLLoss(size_average=False)
    gen_optimizer = optim.Adam(generator.parameters())
    if opt.cuda:
        gen_criterion = gen_criterion.cuda()
    print('Pretrain with MLE ...')
    for epoch in range(PRE_EPOCH_NUM):
        loss = train_epoch(generator, gen_data_iter, gen_criterion, gen_optimizer)
        print('Epoch [%d] Model Loss: %f'% (epoch, loss))
        # TODO: 2. Flags to ensure dimension of model input is handled
        generate_samples(generator, BATCH_SIZE, GENERATED_NUM, EVAL_FILE)
        print('Gen sampled')
        eval_iter = GenDataIter(EVAL_FILE, BATCH_SIZE)
        print('Iterator Done')
        loss = eval_epoch(target_lstm, eval_iter, gen_criterion)
        print('Epoch [%d] True Loss: %f' % (epoch, loss))

    # Pretrain Discriminator
    dis_criterion = nn.NLLLoss(size_average=False)
    dis_optimizer = optim.Adam(discriminator.parameters())
    if opt.cuda:
        dis_criterion = dis_criterion.cuda()
    print('Pretrain Dsicriminator ...')
    for epoch in range(3):
        generate_samples(generator, BATCH_SIZE, GENERATED_NUM, NEGATIVE_FILE)
        dis_data_iter = DisDataIter(POSITIVE_FILE, NEGATIVE_FILE, BATCH_SIZE)
        for _ in range(3):
            loss = train_epoch(discriminator, dis_data_iter, dis_criterion, dis_optimizer)
            print('Epoch [%d], loss: %f' % (epoch, loss))
    # Adversarial Training 
    rollout = Rollout(generator, 0.8)
    print('#####################################################')
    print('Start Adeversatial Training...\n')
    gen_gan_loss = GANLoss()
    gen_gan_optm = optim.Adam(generator.parameters())
    if opt.cuda:
        gen_gan_loss = gen_gan_loss.cuda()
    gen_criterion = nn.NLLLoss(size_average=False)
    if opt.cuda:
        gen_criterion = gen_criterion.cuda()
    dis_criterion = nn.NLLLoss(size_average=False)
    dis_optimizer = optim.Adam(discriminator.parameters())
    if opt.cuda:
        dis_criterion = dis_criterion.cuda()
    for total_batch in range(TOTAL_BATCH):
        ## Train the generator for one step
        for it in range(1):
            samples = generator.sample(BATCH_SIZE, g_sequence_len)
            # construct the input to the genrator, add zeros before samples and delete the last column
            zeros = torch.zeros((BATCH_SIZE, 1)).type(torch.LongTensor)
            if samples.is_cuda:
                zeros = zeros.cuda()
            inputs = Variable(torch.cat([zeros, samples.data], dim = 1)[:, :-1].contiguous())
            targets = Variable(samples.data).contiguous().view((-1,))
            # calculate the reward
            rewards = rollout.get_reward(samples, 16, discriminator)
            rewards = Variable(torch.Tensor(rewards))
            if opt.cuda:
                rewards = torch.exp(rewards.cuda()).contiguous().view((-1,))
            prob = generator.forward(inputs)
            prob = torch.reshape(prob, (prob.shape[0] * prob.shape[1], -1)) #prob.view(-1, g_emb_dim)
            loss = gen_gan_loss(prob, targets, rewards)
            gen_gan_optm.zero_grad()
            loss.backward()
            gen_gan_optm.step()

        print('Batch [%d] True Loss: %f' % (total_batch, loss))

        if total_batch % 10 == 0 or total_batch == TOTAL_BATCH - 1:
            generate_samples(generator, BATCH_SIZE, GENERATED_NUM, EVAL_FILE)
            eval_iter = GenDataIter(EVAL_FILE, BATCH_SIZE)
            loss = eval_epoch(target_lstm, eval_iter, gen_criterion)
            
            predictions = torch.max(prob, dim=1)[1]
            predictions = predictions.view(BATCH_SIZE, -1)
            # print('PRED SHAPE:' , predictions.shape)
            for each_sen in list(predictions):
                print('Sample Output:', generate_sentence_from_id(idx_to_word, each_sen, DEBUG_FILE))
            sys.stdout.flush()

            torch.save(generator.state_dict(), './experiment_3_10000/generator.model')
            torch.save(discriminator.state_dict(), './experiment_3_10000/discriminator.model')
        rollout.update_params()
        
        for _ in range(4):
            generate_samples(generator, BATCH_SIZE, GENERATED_NUM, NEGATIVE_FILE)
            dis_data_iter = DisDataIter(POSITIVE_FILE, NEGATIVE_FILE, BATCH_SIZE)
            for _ in range(2):
                loss = train_epoch(discriminator, dis_data_iter, dis_criterion, dis_optimizer)
Пример #15
0
def train_GAN(conf_data):
    """Training Process for GAN.
    
    Parameters
    ----------
    conf_data: dict
        Dictionary containing all parameters and objects.       

    Returns
    -------
    conf_data: dict
        Dictionary containing all parameters and objects.       

    """
    seq = conf_data['GAN_model']['seq']
    if seq == 1:
        pre_epoch_num = conf_data['generator']['pre_epoch_num']
        GENERATED_NUM = 10000
        EVAL_FILE = 'eval.data'
        POSITIVE_FILE = 'real.data'
        NEGATIVE_FILE = 'gene.data'
    temp = 1  #TODO Determines how many times is the discriminator updated. Take this as a value input
    epochs = int(conf_data['GAN_model']['epochs'])
    if seq == 0:
        dataloader = conf_data['data_learn']
    mini_batch_size = int(conf_data['GAN_model']['mini_batch_size'])
    data_label = int(conf_data['GAN_model']['data_label'])
    cuda = conf_data['cuda']
    g_latent_dim = int(conf_data['generator']['latent_dim'])
    classes = int(conf_data['GAN_model']['classes'])

    w_loss = int(conf_data['GAN_model']['w_loss'])

    clip_value = float(conf_data['GAN_model']['clip_value'])
    n_critic = int(conf_data['GAN_model']['n_critic'])

    lambda_gp = int(conf_data['GAN_model']['lambda_gp'])

    log_file = open(conf_data['performance_log'] + "/log.txt", "w+")
    #Covert these to parameters of the config data
    Tensor = torch.cuda.FloatTensor if cuda else torch.FloatTensor
    conf_data['Tensor'] = Tensor
    LongTensor = torch.cuda.LongTensor if cuda else torch.LongTensor
    conf_data['LongTensor'] = LongTensor
    FloatTensor = torch.cuda.FloatTensor if cuda else torch.FloatTensor
    conf_data['FloatTensor'] = FloatTensor

    conf_data['epochs'] = epochs

    #print ("Just before training")
    if seq == 1:  #TODO: Change back to 1
        target_lstm = TargetLSTM(conf_data['GAN_model']['vocab_size'],
                                 conf_data['generator']['embedding_dim'],
                                 conf_data['generator']['hidden_dim'],
                                 conf_data['cuda'])
        if cuda == True:
            target_lstm = target_lstm.cuda()
        conf_data['target_lstm'] = target_lstm
        gen_data_iter = GenDataIter('real.data', mini_batch_size)
        generator = conf_data['generator_model']
        discriminator = conf_data['discriminator_model']
        g_loss_func = conf_data['generator_loss']
        d_loss_func = conf_data['discriminator_loss']
        optimizer_D = conf_data['discriminator_optimizer']
        optimizer_G = conf_data['generator_optimizer']
        #print('Pretrain with MLE ...')
        for epoch in range(pre_epoch_num):  #TODO: Change the range
            loss = train_epoch(generator, gen_data_iter, g_loss_func,
                               optimizer_G, conf_data, 'g')
            print('Epoch [%d] Model Loss: %f' % (epoch, loss))
            generate_samples(generator, mini_batch_size, GENERATED_NUM,
                             EVAL_FILE, conf_data)
            eval_iter = GenDataIter(EVAL_FILE, mini_batch_size)
            loss = eval_epoch(target_lstm, eval_iter, g_loss_func, conf_data)
            print('Epoch [%d] True Loss: %f' % (epoch, loss))

        dis_criterion = d_loss_func
        dis_optimizer = optimizer_D
        #TODO: Understand why the below two code line were there ?
        # if conf_data['cuda']:
        #     dis_criterion = dis_criterion.cuda()

        #print('Pretrain Dsicriminator ...')
        dis_data_iter = DisDataIter(POSITIVE_FILE, NEGATIVE_FILE,
                                    mini_batch_size)
        for epoch in range(5):  #TODO: change back 5
            generate_samples(generator, mini_batch_size, GENERATED_NUM,
                             NEGATIVE_FILE, conf_data)
            dis_data_iter = DisDataIter(POSITIVE_FILE, NEGATIVE_FILE,
                                        mini_batch_size)
            for _ in range(3):  #TODO: change back 3
                loss = train_epoch(discriminator, dis_data_iter, dis_criterion,
                                   dis_optimizer, conf_data, 'd')
                print('Epoch [%d], loss: %f' % (epoch, loss))
        conf_data['generator_model'] = generator
        conf_data['discriminator_model'] = discriminator
        torch.save(conf_data['generator_model'].state_dict(),
                   conf_data['save_model_path'] + '/Seq/' + 'pre_generator.pt')
        torch.save(
            conf_data['discriminator_model'].state_dict(),
            conf_data['save_model_path'] + '/Seq/' + 'pre_discriminator.pt')

        conf_data['rollout'] = Rollout(generator, 0.8)

    for epoch in range(epochs):
        conf_data['epoch'] = epoch
        if seq == 0:
            to_iter = dataloader
        elif seq == 1:  #TODO: Change this back to 1
            to_iter = [1]

        for i, iterator in enumerate(to_iter):
            optimizer_D = conf_data['discriminator_optimizer']
            optimizer_G = conf_data['generator_optimizer']

            generator = conf_data['generator_model']
            discriminator = conf_data['discriminator_model']

            g_loss_func = conf_data['generator_loss']
            d_loss_func = conf_data['discriminator_loss']

            # if aux = 1:

            #print ("Reached here --------------> ")
            conf_data['iterator'] = i
            if seq == 0:

                if data_label == 1:
                    imgs, labels = iterator
                else:
                    imgs = iterator
                # Adversarial ground truths
                valid = Variable(Tensor(imgs.size(0), 1).fill_(1.0),
                                 requires_grad=False)
                conf_data['valid'] = valid
                fake = Variable(Tensor(imgs.size(0), 1).fill_(0.0),
                                requires_grad=False)
                conf_data['fake'] = fake
                # Configure input
                real_imgs = Variable(imgs.type(Tensor))

                if data_label == 1:
                    labels = Variable(labels.type(LongTensor))
                # Sample noise as generator input
                z = Variable(
                    Tensor(
                        np.random.normal(0, 1, (imgs.shape[0], g_latent_dim))))
                if classes > 0:
                    gen_labels = Variable(
                        LongTensor(np.random.randint(0, classes,
                                                     imgs.shape[0])))
                    conf_data['gen_labels'] = gen_labels
            # elif seq == 1: #If yes seqGAN
            #     # samples = generator.sample(mini_batch_size,conf_data['generator']['sequece_length'])
            #     # zeros = torch.zeros((mini_batch_size,1)).type(LongTensor)
            #     # imgs = Variable(torch.cat([zeros,samples.data]),dim=1)[:,:-1].contiguous() #TODO: change imgs to inps all, to make more sense of the code
            #     # targets = Variable(sample.data).contiguous().view((-1,))
            #     # rewards = rollout.get_reward(sample,16,discriminator)
            #     # rewards = Variable(Tensor(rewards))
            #     # prob = generator.forward(inputs)
            #     # loss = gen_gan_loss(prob)
            #     pass
            #     #optimizer_G

            # ---------------------
            #  Train Discriminator
            # ---------------------
            optimizer_D.zero_grad()
            if seq == 1:  #TODO change this back to 1
                dis_data_iter = DisDataIter(POSITIVE_FILE, NEGATIVE_FILE,
                                            mini_batch_size)
            for i in range(
                    temp
            ):  # TODO: Make this a parameter -> for x updates --> I am read the stored models here as well. Should I reamove this ???
                optimizer_D = conf_data['discriminator_optimizer']
                optimizer_G = conf_data['generator_optimizer']

                generator = conf_data['generator_model']
                discriminator = conf_data['discriminator_model']

                g_loss_func = conf_data['generator_loss']
                d_loss_func = conf_data['discriminator_loss']
                if classes <= 0:
                    #print ("Reached here 2 --------------> ")
                    if seq == 0:
                        gen_imgs = generator(z)
                        # Measure discriminator's ability to classify real from generated samples
                        #Real images
                        real_validity = discriminator(real_imgs)
                        #Fake images
                        fake_validity = discriminator(gen_imgs.detach())
                    if seq == 1:
                        generate_samples(generator, mini_batch_size,
                                         GENERATED_NUM, NEGATIVE_FILE,
                                         conf_data)
                        dis_data_iter = DisDataIter(POSITIVE_FILE,
                                                    NEGATIVE_FILE,
                                                    mini_batch_size)
                        loss = train_epoch(discriminator, dis_data_iter,
                                           d_loss_func, optimizer_D, conf_data,
                                           'd')
                        conf_data['d_loss'] = loss
                        #exit()

                else:
                    if seq == 0:
                        gen_imgs = generator(z, gen_labels)
                        real_validity = discriminator(real_imgs, labels)
                        fake_validity = discriminator(gen_imgs.detach(),
                                                      labels)

                if seq == 0:
                    conf_data['gen_imgs'] = gen_imgs
                if seq == 0:
                    if w_loss == 0:
                        real_loss = d_loss_func.loss(real_validity, valid)
                        fake_loss = d_loss_func.loss(fake_validity, fake)
                        d_loss = (real_loss + fake_loss) / 2
                    elif w_loss == 1:
                        d_loss = -d_loss_func.loss(real_validity,
                                                   valid) + d_loss_func.loss(
                                                       fake_validity, fake)
                        if lambda_gp > 0:
                            conf_data['real_data_sample'] = real_imgs.data
                            conf_data['fake_data_sample'] = gen_imgs.data
                            conf_data = compute_gradient_penalty(conf_data)
                            gradient_penalty = conf_data['gradient_penalty']
                            d_loss = d_loss + lambda_gp * gradient_penalty
                    conf_data['d_loss'] = d_loss
                    d_loss.backward()
                    optimizer_D.step()

                if clip_value > 0:
                    # Clip weights of discriminator
                    for p in discriminator.parameters():
                        p.data.clamp_(-clip_value, clip_value)

            # -----------------
            #  Train Generator
            # -----------------
            conf_data['generator_model'] = generator
            conf_data['discriminator_model'] = discriminator

            #Next 4 lines were recently added maybe have to remove this.
            conf_data['optimizer_G'] = optimizer_G
            conf_data['optimizer_D'] = optimizer_D
            conf_data['generator_loss'] = g_loss_func
            conf_data['discriminator_loss'] = d_loss_func
            if seq == 0:
                conf_data['noise'] = z

            if n_critic <= 0:
                conf_data = training_fucntion_generator(conf_data)
            elif n_critic > 0:
                # Train the generator every n_critic iterations
                if i % n_critic == 0:
                    conf_data = training_fucntion_generator(conf_data)
            #exit()

        # print ("------------------ Here (train_GAN.py)")

            if seq == 0:
                batches_done = epoch * len(dataloader) + i
                if batches_done % int(conf_data['sample_interval']) == 0:
                    if classes <= 0:
                        # print ("Here")
                        # print (type(gen_imgs.data[:25]))
                        # print (gen_imgs.data[:25].shape)
                        save_image(gen_imgs.data[:25],
                                   conf_data['result_path'] +
                                   '/%d.png' % batches_done,
                                   nrow=5,
                                   normalize=True)
                    elif classes > 0:
                        sample_image(10, batches_done, conf_data)
        if seq == 0:
            log_file.write("[Epoch %d/%d] [D loss: %f] [G loss: %f] \n" %
                           (epoch, epochs, conf_data['d_loss'].item(),
                            conf_data['g_loss'].item()))
        elif seq == 1:
            # print ("Done")
            log_file.write(
                "[Epoch %d/%d] [D loss: %f] [G loss: %f] \n" %
                (epoch, epochs, conf_data['d_loss'], conf_data['g_loss']))
    conf_data['generator_model'] = generator
    conf_data['discriminator_model'] = discriminator
    conf_data['log_file'] = log_file
    return conf_data
Пример #16
0
def main():
    random.seed(SEED)
    np.random.seed(SEED)

    # Define Networks
    generator = Generator(VOCAB_SIZE, g_emb_dim, g_hidden_dim, opt.cuda)
    discriminator = Discriminator(d_num_class, VOCAB_SIZE, d_emb_dim,
                                  d_filter_sizes, d_num_filters, d_dropout)
    if opt.cuda:
        generator = generator.cuda()
        discriminator = discriminator.cuda()
    # Generate toy data using target lstm 也就是新建一个data,假装他就是真实数据
    print('啊啊')

    # Load data from file
    # 每个iter输出一个data和一个target,其中data是每个point前填个0,每个target是后面添个0
    gen_data_iter = GenDataIter(POSITIVE_FILE, BATCH_SIZE)

    # Pretrain Generator using MLE
    gen_criterion = nn.NLLLoss(
        reduction='sum'
    )  #You may use CrossEntropyLoss instead, if you prefer not to add an extra LogSoftmax layer .
    gen_optimizer = optim.Adam(generator.parameters())
    if opt.cuda:
        gen_criterion = gen_criterion.cuda()
    print('Pretrain with MLE ...')
    for epoch in range(PRE_EPOCH_NUM):  #PRE_EPOCH_NUM =120
        loss = train_epoch(generator, gen_data_iter, gen_criterion,
                           gen_optimizer)  #使得generator的参数更新,使之适应gen_data_iter
        print('Epoch [%d] Model Loss: %f' % (epoch, loss))  # 9月1日

    # Pretrain Discriminator
    dis_criterion = nn.NLLLoss(reduction='sum')
    dis_optimizer = optim.Adam(discriminator.parameters())
    if opt.cuda:
        dis_criterion = dis_criterion.cuda()
    print('Pretrain Discriminator ...')
    for epoch in range(5):
        generate_samples(generator, BATCH_SIZE, GENERATED_NUM, NEGATIVE_FILE)
        dis_data_iter = DisDataIter(POSITIVE_FILE, NEGATIVE_FILE, BATCH_SIZE)
        for _ in range(3):
            loss = train_epoch(discriminator, dis_data_iter, dis_criterion,
                               dis_optimizer)
            print('Epoch [%d], loss: %f' % (epoch, loss))
    # Adversarial Training
    rollout = Rollout(generator, 0.8)
    print('#####################################################')
    print('Start Adeversatial Training...\n')
    gen_gan_loss = GANLoss()
    gen_gan_optm = optim.Adam(generator.parameters())
    if opt.cuda:
        gen_gan_loss = gen_gan_loss.cuda()
    gen_criterion = nn.NLLLoss(reduction='sum')
    if opt.cuda:
        gen_criterion = gen_criterion.cuda()
    dis_criterion = nn.NLLLoss(reduction='sum')
    dis_optimizer = optim.Adam(discriminator.parameters())
    if opt.cuda:
        dis_criterion = dis_criterion.cuda()
    for total_batch in range(TOTAL_BATCH):
        ## Train the generator for one step
        for it in range(1):
            samples = generator.sample(BATCH_SIZE, g_sequence_len)
            # construct the input to the genrator, add zeros before samples and delete the last column
            zeros = torch.zeros((BATCH_SIZE, 1)).type(torch.LongTensor)
            if samples.is_cuda:
                zeros = zeros.cuda()
            inputs = Variable(
                torch.cat([zeros, samples.data], dim=1)[:, :-1].contiguous())
            targets = Variable(samples.data).contiguous().view(
                (-1, ))  # 这里我不明白,为什么inputs:(batch_size,seq_len),而targets是一个序列
            # calculate the reward 为什么在rollout里reward不能直接直接由generator来sample
            rewards = rollout.get_reward(
                samples, 16, discriminator)  # rewards:(batch_size,seq_len)
            rewards = Variable(torch.Tensor(rewards))
            rewards = torch.exp(rewards).contiguous().view(
                (-1, ))  # 这是因为Discriminator的最后一层是log_Softmax
            if opt.cuda:
                rewards = rewards.cuda()
            prob = generator.forward(inputs)
            loss = gen_gan_loss(prob, targets, rewards)  #这里是点睛之笔
            gen_gan_optm.zero_grad()
            loss.backward()
            gen_gan_optm.step()  #这里更新的是rollout里面的ori_model吗?

        rollout.update_params()  #这里理解了的话,基本没问题了

        for _ in range(4):
            generate_samples(generator, BATCH_SIZE, GENERATED_NUM,
                             NEGATIVE_FILE)
            dis_data_iter = DisDataIter(POSITIVE_FILE, NEGATIVE_FILE,
                                        BATCH_SIZE)
            for _ in range(2):
                loss = train_epoch(discriminator, dis_data_iter, dis_criterion,
                                   dis_optimizer)

        print('Adversarial Training %d complete \n' % (total_batch))

    print('保存模型genetor')
    torch.save(generator.state_dict(), PATH_GPU)
Пример #17
0
def main(generator_, discriminator, model_index):
    #random.seed(SEED)
    #np.random.seed(SEED)

    perf_dict = {}
    true_loss = []
    generator_loss = []
    disc_loss = []
    nb_batch_per_epoch = int(GENERATED_NUM / BATCH_SIZE)

    # Define Networks
    generator = copy.deepcopy(generator_)

    #target_lstm = TargetLSTM(VOCAB_SIZE, g_emb_dim, g_hidden_dim, opt.cuda)
    if opt.cuda:
        generator = generator.cuda()
        discriminator = discriminator.cuda()
        #target_lstm = target_lstm.cuda()
    # Generate toy data using target lstm
    #print('Generating data ...')
    #generate_samples(target_lstm, BATCH_SIZE, GENERATED_NUM, POSITIVE_FILE)

    # Load data from file
    #gen_data_iter = GenDataIter(POSITIVE_FILE, BATCH_SIZE)

    # Pretrain Generator using MLE
    gen_criterion = nn.NLLLoss(reduction='sum')
    gen_optimizer = optim.Adam(generator.parameters())
    if opt.cuda:
        gen_criterion = gen_criterion.cuda()
    print('Pretrain with MLE ...')
    for epoch in range(PRE_EPOCH_NUM):
        loss = train_epoch(generator, gen_data_iter, gen_criterion,
                           gen_optimizer)
        print('Epoch [%d] Model Loss: %f' % (epoch, loss))
        generator_loss.append(loss)
        generate_samples(generator, BATCH_SIZE, GENERATED_NUM, EVAL_FILE)
        eval_iter = GenDataIter(EVAL_FILE, BATCH_SIZE)
        loss = eval_epoch(target_lstm, eval_iter, gen_criterion)
        true_loss.append(loss)
        print('Epoch [%d] True Loss: %f' % (epoch, loss))

    # Pretrain Discriminator
    dis_criterion = nn.NLLLoss(reduction='sum')
    dis_optimizer = optim.Adam(discriminator.parameters())
    if opt.cuda:
        dis_criterion = dis_criterion.cuda()
    print('Pretrain Discriminator ...')
    for epoch in range(5):
        generate_samples(generator, BATCH_SIZE, GENERATED_NUM, NEGATIVE_FILE)
        dis_data_iter = DisDataIter(POSITIVE_FILE, NEGATIVE_FILE, BATCH_SIZE)
        for _ in range(3):
            loss = train_epoch(discriminator, dis_data_iter, dis_criterion,
                               dis_optimizer)
            disc_loss.append(loss)
            print('Epoch [%d], loss: %f' % (epoch, loss))
    # Adversarial Training
    rollout = Rollout(generator, 0.8)
    print('#####################################################')
    print('Start Adeversatial Training...\n')
    gen_gan_loss = GANLoss()
    gen_gan_optm = optim.Adam(generator.parameters())
    if opt.cuda:
        gen_gan_loss = gen_gan_loss.cuda()

    # gen_criterion = nn.NLLLoss(reduction='sum')
    # if opt.cuda:
    #     gen_criterion = gen_criterion.cuda()
    # dis_criterion = nn.NLLLoss(reduction='sum')
    # dis_optimizer = optim.Adam(discriminator.parameters())
    # if opt.cuda:
    #     dis_criterion = dis_criterion.cuda()

    for total_batch in range(TOTAL_BATCH):
        ## Train the generator for one step
        for it in range(1):
            print(it)
            samples = generator.sample(BATCH_SIZE, g_sequence_len)
            # construct the input to the genrator, add zeros before samples and delete the last column
            zeros = torch.zeros((BATCH_SIZE, 1)).type(torch.LongTensor)
            if samples.is_cuda:
                zeros = zeros.cuda()
            inputs = Variable(
                torch.cat([zeros, samples.data], dim=1)[:, :-1].contiguous())
            targets = Variable(samples.data).contiguous().view((-1, ))
            # calculate the reward
            rewards = rollout.get_reward(samples, 16, discriminator)
            rewards = Variable(torch.Tensor(rewards))
            rewards = torch.exp(rewards).contiguous().view((-1, ))
            if opt.cuda:
                rewards = rewards.cuda()
            prob = generator.forward(inputs)
            loss = gen_gan_loss(prob, targets, rewards)
            gen_gan_optm.zero_grad()
            loss.backward()
            gen_gan_optm.step()

        if total_batch % 1 == 0 or total_batch == TOTAL_BATCH - 1:
            generate_samples(generator, BATCH_SIZE, GENERATED_NUM, EVAL_FILE)
            eval_iter = GenDataIter(EVAL_FILE, BATCH_SIZE)
            loss = eval_epoch(target_lstm, eval_iter, gen_criterion)
            true_loss.append(loss)
            print('Batch [%d] True Loss: %f' % (total_batch, loss))
            loss_gen = eval_epoch(generator, gen_data_iter, gen_criterion)
            print('Epoch [%d] Model Loss: %f' % (total_batch, loss_gen))
            generator_loss.append(loss_gen)
        rollout.update_params()

        for _ in range(4):
            generate_samples(generator, BATCH_SIZE, GENERATED_NUM,
                             NEGATIVE_FILE)
            dis_data_iter = DisDataIter(POSITIVE_FILE, NEGATIVE_FILE,
                                        BATCH_SIZE)
            for _ in range(2):
                loss = train_epoch(discriminator, dis_data_iter, dis_criterion,
                                   dis_optimizer)
                disc_loss.append(loss)

    perf_dict['true_loss'] = true_loss
    perf_dict['generator_loss'] = generator_loss
    perf_dict['disc_loss'] = disc_loss
    np.save('perf_dict' + str(model_index), perf_dict)
Пример #18
0
        if prob.is_cuda:
            one_hot = one_hot.cuda()
        loss = torch.masked_select(prob, one_hot)
        loss = loss * reward
        loss = -torch.sum(loss)
        return loss


target_lstm = TargetLSTM(VOCAB_SIZE, g_emb_dim, g_hidden_dim, opt.cuda)
if opt.cuda:
    target_lstm = target_lstm.cuda()
# Generate toy data using target lstm
print('Generating data ...')
generate_samples(target_lstm, BATCH_SIZE, GENERATED_NUM, POSITIVE_FILE)
# Load data from file
gen_data_iter = GenDataIter(POSITIVE_FILE, BATCH_SIZE)
original_generator = Generator(VOCAB_SIZE, g_emb_dim, g_hidden_dim, opt.cuda)


def main(generator_, discriminator, model_index):
    #random.seed(SEED)
    #np.random.seed(SEED)

    perf_dict = {}
    true_loss = []
    generator_loss = []
    disc_loss = []
    nb_batch_per_epoch = int(GENERATED_NUM / BATCH_SIZE)

    # Define Networks
    generator = copy.deepcopy(generator_)
Пример #19
0
def main():
    random.seed(SEED)
    np.random.seed(SEED)

    # Define Networks
    generator = Generator(VOCAB_SIZE, g_emb_dim, g_hidden_dim, opt.cuda)
    discriminator = Discriminator(d_num_class, VOCAB_SIZE, d_emb_dim,
                                  d_filter_sizes, d_num_filters, d_dropout)
    target_lstm = TargetLSTM(VOCAB_SIZE, g_emb_dim, g_hidden_dim,
                             opt.cuda)  #和Generator函数只有sample方法不一样;
    if opt.cuda:
        generator = generator.cuda()
        discriminator = discriminator.cuda()
        target_lstm = target_lstm.cuda()
    # Generate toy data using target lstm 也就是新建一个data,假装他就是真实数据
    print('Generating data ...')
    generate_samples(target_lstm, BATCH_SIZE, GENERATED_NUM,
                     POSITIVE_FILE)  #没经过啥子训练,直接随机一个lstm产出9984*20的矩阵

    # Load data from file
    # 每个iter输出一个data和一个target,其中data是每个point前填个0,每个target是后面添个0
    gen_data_iter = GenDataIter(POSITIVE_FILE, BATCH_SIZE)

    # Pretrain Generator using MLE
    gen_criterion = nn.NLLLoss(
        reduction='sum'
    )  #You may use CrossEntropyLoss instead, if you prefer not to add an extra LogSoftmax layer .
    gen_optimizer = optim.Adam(generator.parameters())
    if opt.cuda:
        gen_criterion = gen_criterion.cuda()
    print('Pretrain with MLE ...')
    for epoch in range(PRE_EPOCH_NUM):  #PRE_EPOCH_NUM =120
        loss = train_epoch(generator, gen_data_iter, gen_criterion,
                           gen_optimizer)  #使得generator的参数更新,使之适应gen_data_iter
        print('Epoch [%d] Model Loss: %f' % (epoch, loss))  # 9月1日
        generate_samples(generator, BATCH_SIZE, GENERATED_NUM, EVAL_FILE)
        eval_iter = GenDataIter(EVAL_FILE, BATCH_SIZE)
        loss = eval_epoch(target_lstm, eval_iter,
                          gen_criterion)  # generator有向target_lstm靠近吗?
        print('Epoch [%d] True Loss: %f' % (epoch, loss))

    # Pretrain Discriminator
    dis_criterion = nn.NLLLoss(reduction='sum')
    dis_optimizer = optim.Adam(discriminator.parameters())
    if opt.cuda:
        dis_criterion = dis_criterion.cuda()
    print('Pretrain Discriminator ...')
    for epoch in range(5):
        generate_samples(generator, BATCH_SIZE, GENERATED_NUM, NEGATIVE_FILE)
        dis_data_iter = DisDataIter(POSITIVE_FILE, NEGATIVE_FILE, BATCH_SIZE)
        for _ in range(3):
            loss = train_epoch(discriminator, dis_data_iter, dis_criterion,
                               dis_optimizer)
            print('Epoch [%d], loss: %f' % (epoch, loss))
    # Adversarial Training 前面的预训练的目的是什么??????
    rollout = Rollout(generator, 0.8)
    print('#####################################################')
    print('Start Adeversatial Training...\n')
    gen_gan_loss = GANLoss()
    gen_gan_optm = optim.Adam(generator.parameters())
    if opt.cuda:
        gen_gan_loss = gen_gan_loss.cuda()
    gen_criterion = nn.NLLLoss(reduction='sum')
    if opt.cuda:
        gen_criterion = gen_criterion.cuda()
    dis_criterion = nn.NLLLoss(reduction='sum')
    dis_optimizer = optim.Adam(discriminator.parameters())
    if opt.cuda:
        dis_criterion = dis_criterion.cuda()
    for total_batch in range(TOTAL_BATCH):
        ## Train the generator for one step
        for it in range(1):
            samples = generator.sample(BATCH_SIZE, g_sequence_len)
            # construct the input to the genrator, add zeros before samples and delete the last column
            zeros = torch.zeros((BATCH_SIZE, 1)).type(torch.LongTensor)
            if samples.is_cuda:
                zeros = zeros.cuda()
            inputs = Variable(
                torch.cat([zeros, samples.data], dim=1)[:, :-1].contiguous())
            targets = Variable(samples.data).contiguous().view(
                (-1, ))  # 这里我不明白,为什么inputs:(batch_size,seq_len),而targets是一个序列
            # calculate the reward
            rewards = rollout.get_reward(
                samples, 16, discriminator)  # rewards:(batch_size,seq_len)
            rewards = Variable(torch.Tensor(rewards))
            rewards = torch.exp(rewards).contiguous().view((-1, ))
            if opt.cuda:
                rewards = rewards.cuda()
            prob = generator.forward(inputs)
            loss = gen_gan_loss(prob, targets, rewards)
            gen_gan_optm.zero_grad()
            loss.backward()
            gen_gan_optm.step()

        if total_batch % 1 == 0 or total_batch == TOTAL_BATCH - 1:  #
            generate_samples(generator, BATCH_SIZE, GENERATED_NUM, EVAL_FILE)
            eval_iter = GenDataIter(EVAL_FILE, BATCH_SIZE)
            loss = eval_epoch(target_lstm, eval_iter, gen_criterion)
            print('Batch [%d] True Loss: %f' % (total_batch, loss))
        rollout.update_params()  #这里理解了的话,基本没问题了

        for _ in range(4):
            generate_samples(generator, BATCH_SIZE, GENERATED_NUM,
                             NEGATIVE_FILE)
            dis_data_iter = DisDataIter(POSITIVE_FILE, NEGATIVE_FILE,
                                        BATCH_SIZE)
            for _ in range(2):
                loss = train_epoch(discriminator, dis_data_iter, dis_criterion,
                                   dis_optimizer)
Пример #20
0
def main():
    random.seed(SEED)
    np.random.seed(SEED)
    track_blue = []
    # Build up dataset
    s_train, s_test = load_from_big_file('obama_speech',g_sequence_len)
    # idx_to_word: List of id to word
    # word_to_idx: Dictionary mapping word to id
    idx_to_word, word_to_idx = fetch_vocab(s_train, s_train, s_test)
    # input_seq, target_seq = prepare_data(DATA_GERMAN, DATA_ENGLISH, word_to_idx)

    global VOCAB_SIZE
    VOCAB_SIZE = len(idx_to_word)
    save_vocab(CHECKPOINT_PATH + 'metadata.data', idx_to_word, word_to_idx, VOCAB_SIZE, g_emb_dim, g_hidden_dim)

    print('VOCAB SIZE:', VOCAB_SIZE)
    # Define Networks
    generator = Generator(VOCAB_SIZE, g_emb_dim, g_hidden_dim, opt.cuda)
    discriminator = Discriminator(d_num_class, VOCAB_SIZE, d_emb_dim, d_filter_sizes, d_num_filters, d_dropout)
    if opt.cuda:
        generator = generator.cuda()
        discriminator = discriminator.cuda()

    # Generate toy data using target lstm
    print('Generating data ...')

    # Generate samples either from sentences file or lstm
    # Sentences file will be structured input sentences
    # LSTM based is BOG approach
    generate_real_data('obama_speech', BATCH_SIZE, GENERATED_NUM, idx_to_word, word_to_idx,
                       POSITIVE_FILE, TEST_FILE)
    # generate_samples(target_lstm, BATCH_SIZE, GENERATED_NUM, POSITIVE_FILE, idx_to_word)
    # generate_samples(target_lstm, BATCH_SIZE, 10, TEST_FILE, idx_to_word)
    # Create Test data iterator for testing
    test_iter = GenDataIter(TEST_FILE, BATCH_SIZE)
    #test_predict(generator, test_iter, idx_to_word, train_mode=True)

    # Load data from file
    gen_data_iter = GenDataIter(POSITIVE_FILE, BATCH_SIZE)
    lines = read_file(POSITIVE_FILE)

    refrences = []
    for line in lines:
        phrase = []
        for char in line:
            phrase.append(idx_to_word[char])

        refrences.append(' '.join(phrase))
        #refrences.append(phrase)



    # Pretrain Generator using MLE
    gen_criterion = nn.NLLLoss(size_average=False)
    gen_optimizer = optim.Adam(generator.parameters())
    if opt.cuda:
        gen_criterion = gen_criterion.cuda()
    print('Pretrain with MLE ...')
    for epoch in range(PRE_EPOCH_NUM):
        loss = train_epoch(generator, gen_data_iter, gen_criterion, gen_optimizer)
        print('Epoch [%d] Model Loss: %f' % (epoch, loss))
        sys.stdout.flush()
        generate_samples(generator, BATCH_SIZE, GENERATED_NUM, EVAL_FILE)
        if track_training:
            lines = read_file(EVAL_FILE)
            hypotheses = []
            for line in lines:
                phrase = []
                for char in line:
                    phrase.append(idx_to_word[char])

                hypotheses.append(' '.join(phrase))
                #hypotheses.append(phrase)

            bleu_score=get_moses_multi_bleu(hypotheses, refrences, lowercase=True)
            track_blue.append(bleu_score)
            print(track_blue)

        # generate_samples(generator, BATCH_SIZE, GENERATED_NUM, EVAL_FILE)
        # eval_iter = GenDataIter(EVAL_FILE, BATCH_SIZE)
        # loss = eval_epoch(target_lstm, eval_iter, gen_criterion)
        # print('Epoch [%d] True Loss: %f' % (epoch, loss))

    # Pretrain Discriminator
    dis_criterion = nn.NLLLoss(size_average=False)
    dis_optimizer = optim.Adam(discriminator.parameters())
    if opt.cuda:
        dis_criterion = dis_criterion.cuda()
    print('Pretrain Discriminator ...')
    for epoch in range(5):
        generate_samples(generator, BATCH_SIZE, GENERATED_NUM, NEGATIVE_FILE)
        dis_data_iter = DisDataIter(POSITIVE_FILE, NEGATIVE_FILE, BATCH_SIZE)
        for _ in range(3):
            loss = train_epoch(discriminator, dis_data_iter, dis_criterion, dis_optimizer)
            print('Epoch [%d], loss: %f' % (epoch, loss))
            #sys.stdout.flush()

    # Adversarial Training
    rollout = Rollout(generator, 0.8)
    print('#####################################################')
    print('Start Adversarial Training...\n')
    gen_gan_loss = GANLoss()
    gen_gan_optm = optim.Adam(generator.parameters())
    if opt.cuda:
        gen_gan_loss = gen_gan_loss.cuda()
    gen_criterion = nn.NLLLoss(size_average=False)
    if opt.cuda:
        gen_criterion = gen_criterion.cuda()
    dis_criterion = nn.NLLLoss(size_average=False)
    dis_optimizer = optim.Adam(discriminator.parameters())
    if opt.cuda:
        dis_criterion = dis_criterion.cuda()
    for total_batch in range(TOTAL_BATCH):
        ## Train the generator for one step
        for it in range(1):
            samples = generator.sample(BATCH_SIZE, g_sequence_len)
            # construct the input to the genrator, add zeros before samples and delete the last column
            zeros = torch.zeros((BATCH_SIZE, 1)).type(torch.LongTensor)
            if samples.is_cuda:
                zeros = zeros.cuda()
            inputs = Variable(torch.cat([zeros, samples.data], dim=1)[:, :-1].contiguous())
            targets = Variable(samples.data).contiguous().view((-1,))
            # calculate the reward
            rewards = rollout.get_reward(samples, 16, discriminator)
            rewards = Variable(torch.Tensor(rewards))
            rewards = torch.exp(rewards).contiguous().view((-1,))
            if opt.cuda:
                rewards = rewards.cuda()
            prob = generator.forward(inputs)
            # print('SHAPE: ', prob.shape, targets.shape, rewards.shape)
            loss = gen_gan_loss(prob, targets, rewards)
            gen_gan_optm.zero_grad()
            loss.backward()
            gen_gan_optm.step()
            # print('GEN PRED DIM: ', prob.shape)

        if total_batch % 1 == 0 or total_batch == TOTAL_BATCH - 1:
            # generate_samples(generator, BATCH_SIZE, GENERATED_NUM, EVAL_FILE)
            # eval_iter = GenDataIter(EVAL_FILE, BATCH_SIZE)
            # loss = eval_epoch(target_lstm, eval_iter, gen_criterion)
            # print('Batch [%d] True Loss: %f' % (total_batch, loss))

            # predictions = torch.max(prob, dim=1)[1]
            # predictions = predictions.view(BATCH_SIZE, -1)
            # # print('PRED SHAPE:' , predictions.shape)
            # for each_sen in list(predictions):
            #     print('Training Output:', generate_sentence_from_id(idx_to_word, each_sen, DEBUG_FILE))
            #
            # test_predict(generator, test_iter, idx_to_word, train_mode=True)
            loss_gen = eval_epoch(generator, gen_data_iter, gen_criterion)
            print('Epoch [%d] Model Loss: %f' % (total_batch, loss_gen))
            generate_samples(generator, BATCH_SIZE, GENERATED_NUM, EVAL_FILE)
            #show_some_generated_sequences(idx_to_word, 10, EVAL_FILE)
            sys.stdout.flush()
            if track_training:
                lines = read_file(EVAL_FILE)
                hypotheses = []
                for line in lines:
                    phrase = []
                    for char in line:
                        phrase.append(idx_to_word[char])

                    hypotheses.append(' '.join(phrase))
                    # hypotheses.append(phrase)

                bleu_score = get_moses_multi_bleu(hypotheses, refrences, lowercase=True)
                track_blue.append(bleu_score)
                print(track_blue)

            torch.save(generator.state_dict(), CHECKPOINT_PATH + 'generator_seqgan.model')
            torch.save(discriminator.state_dict(), CHECKPOINT_PATH + 'discriminator_seqgan.model')
        rollout.update_params()

        for _ in range(4):
            generate_samples(generator, BATCH_SIZE, GENERATED_NUM, NEGATIVE_FILE)
            dis_data_iter = DisDataIter(POSITIVE_FILE, NEGATIVE_FILE, BATCH_SIZE)
            for _ in range(2):
                loss = train_epoch(discriminator, dis_data_iter, dis_criterion, dis_optimizer)

    track_blue = np.array(track_blue)
    np.save(ROOT_PATH + 'track_blue_seqgan2.npy', track_blue)

    plt.plot(track_blue)
    plt.show()
Пример #21
0
def main():
    random.seed(SEED)
    np.random.seed(SEED)

    # Define Networks
    generator = Generator(VOCAB_SIZE, g_emb_dim, g_hidden_dim, opt.cuda)
    discriminator = Discriminator(d_num_class, VOCAB_SIZE, d_emb_dim,
                                  d_filter_sizes, d_num_filters, d_dropout)
    target_lstm = TargetLSTM(VOCAB_SIZE, g_emb_dim, g_hidden_dim, opt.cuda)
    if opt.cuda:
        generator = generator.cuda()
        discriminator = discriminator.cuda()
        target_lstm = target_lstm.cuda()
    # Generate toy data using target lstm
    print('Generating data ...')
    generate_samples(target_lstm, BATCH_SIZE, GENERATED_NUM, POSITIVE_FILE)

    # Load data from file
    gen_data_iter = GenDataIter(POSITIVE_FILE, BATCH_SIZE)

    # Pretrain Generator using MLE
    gen_criterion = nn.NLLLoss(size_average=False)
    gen_optimizer = optim.Adam(generator.parameters())
    if opt.cuda:
        gen_criterion = gen_criterion.cuda()
    print('Pretrain with MLE ...')
    for epoch in range(PRE_EPOCH_NUM):
        loss = train_epoch(generator, gen_data_iter, gen_criterion,
                           gen_optimizer)
        print('Epoch [%d] Model Loss: %f' % (epoch, loss))
        generate_samples(generator, BATCH_SIZE, GENERATED_NUM, EVAL_FILE)
        eval_iter = GenDataIter(EVAL_FILE, BATCH_SIZE)
        loss = eval_epoch(target_lstm, eval_iter, gen_criterion)
        print('Epoch [%d] True Loss: %f' % (epoch, loss))

    # Pretrain Discriminator
    dis_criterion = nn.NLLLoss(size_average=False)
    dis_optimizer = optim.Adam(discriminator.parameters())
    if opt.cuda:
        dis_criterion = dis_criterion.cuda()
    print('Pretrain Dsicriminator ...')
    for epoch in range(5):
        generate_samples(generator, BATCH_SIZE, GENERATED_NUM, NEGATIVE_FILE)
        dis_data_iter = DisDataIter(POSITIVE_FILE, NEGATIVE_FILE, BATCH_SIZE)
        for _ in range(3):
            loss = train_epoch(discriminator, dis_data_iter, dis_criterion,
                               dis_optimizer)
            print('Epoch [%d], loss: %f' % (epoch, loss))
    # Adversarial Training
    rollout = Rollout(generator, 0.8)
    print('#####################################################')
    print('Start Adeversatial Training...\n')
    gen_gan_loss = GANLoss()
    gen_gan_optm = optim.Adam(generator.parameters())
    if opt.cuda:
        gen_gan_loss = gen_gan_loss.cuda()
    gen_criterion = nn.NLLLoss(size_average=False)
    if opt.cuda:
        gen_criterion = gen_criterion.cuda()
    dis_criterion = nn.NLLLoss(size_average=False)
    dis_optimizer = optim.Adam(discriminator.parameters())
    if opt.cuda:
        dis_criterion = dis_criterion.cuda()
    for total_batch in range(TOTAL_BATCH):
        # Train the generator for one step
        for it in range(1):
            samples = generator.sample(BATCH_SIZE, g_sequence_len)
            # construct the input to the generator, add zeros before samples and delete the last column
            zeros = torch.zeros((BATCH_SIZE, 1)).type(torch.LongTensor)
            if samples.is_cuda:
                zeros = zeros.cuda()
            inputs = Variable(
                torch.cat([zeros, samples.data], dim=1)[:, :-1].contiguous())
            targets = Variable(samples.data).contiguous().view((-1, ))
            # calculate the reward
            rewards = rollout.get_reward(samples, 16, discriminator)
            rewards = Variable(torch.Tensor(rewards))
            if opt.cuda:
                rewards = torch.exp(rewards.cuda()).contiguous().view((-1, ))
            prob = generator.forward(inputs)
            loss = gen_gan_loss(prob, targets, rewards)
            gen_gan_optm.zero_grad()
            loss.backward()
            gen_gan_optm.step()

        if total_batch % 1 == 0 or total_batch == TOTAL_BATCH - 1:
            generate_samples(generator, BATCH_SIZE, GENERATED_NUM, EVAL_FILE)
            eval_iter = GenDataIter(EVAL_FILE, BATCH_SIZE)
            loss = eval_epoch(target_lstm, eval_iter, gen_criterion)
            print('Batch [%d] True Loss: %f' % (total_batch, loss))
        rollout.update_params()

        for _ in range(4):
            generate_samples(generator, BATCH_SIZE, GENERATED_NUM,
                             NEGATIVE_FILE)
            dis_data_iter = DisDataIter(POSITIVE_FILE, NEGATIVE_FILE,
                                        BATCH_SIZE)
            for _ in range(2):
                loss = train_epoch(discriminator, dis_data_iter, dis_criterion,
                                   dis_optimizer)
def main():
    random.seed(SEED)
    np.random.seed(SEED)
    track_blue = []

    # Build up dataset
    s_train, s_test = load_from_big_file('obama_speech', g_sequence_len)
    # idx_to_word: List of id to word
    # word_to_idx: Dictionary mapping word to id
    idx_to_word, word_to_idx = fetch_vocab(s_train, s_train, s_test)
    # input_seq, target_seq = prepare_data(DATA_GERMAN, DATA_ENGLISH, word_to_idx)

    global VOCAB_SIZE
    VOCAB_SIZE = len(idx_to_word)
    save_vocab(CHECKPOINT_PATH + 'metadata.data', idx_to_word, word_to_idx,
               VOCAB_SIZE, g_emb_dim, g_hidden_dim)

    print('VOCAB SIZE:', VOCAB_SIZE)
    # Define Networks
    generator = Generator(VOCAB_SIZE, g_emb_dim, g_hidden_dim, opt.cuda)

    if opt.cuda:
        generator = generator.cuda()

    # Generate toy data using target lstm
    print('Generating data ...')

    # Generate samples either from sentences file or lstm
    # Sentences file will be structured input sentences
    # LSTM based is BOG approach
    generate_real_data('obama_speech', BATCH_SIZE, GENERATED_NUM, idx_to_word,
                       word_to_idx, POSITIVE_FILE, TEST_FILE)
    # generate_samples(target_lstm, BATCH_SIZE, GENERATED_NUM, POSITIVE_FILE, idx_to_word)
    # generate_samples(target_lstm, BATCH_SIZE, 10, TEST_FILE, idx_to_word)
    # Create Test data iterator for testing
    test_iter = GenDataIter(TEST_FILE, BATCH_SIZE)
    #test_predict(generator, test_iter, idx_to_word, train_mode=True)

    # Load data from file
    gen_data_iter = GenDataIter(POSITIVE_FILE, BATCH_SIZE)
    lines = read_file(POSITIVE_FILE)

    refrences = []
    for line in lines:
        phrase = []
        for char in line:
            phrase.append(idx_to_word[char])

        refrences.append(' '.join(phrase))
        #refrences.append(phrase)

    # Pretrain Generator using MLE
    gen_criterion = nn.NLLLoss(size_average=False)
    gen_optimizer = optim.Adam(generator.parameters())
    if opt.cuda:
        gen_criterion = gen_criterion.cuda()
    print('Pretrain with MLE ...')
    for epoch in range(PRE_EPOCH_NUM):
        loss = train_epoch(generator, gen_data_iter, gen_criterion,
                           gen_optimizer)
        print('Epoch [%d] Model Loss: %f' % (epoch, loss))
        sys.stdout.flush()
        generate_samples(generator, BATCH_SIZE, GENERATED_NUM, EVAL_FILE)
        if track_training:
            lines = read_file(EVAL_FILE)
            hypotheses = []
            for line in lines:
                phrase = []
                for char in line:
                    phrase.append(idx_to_word[char])

                hypotheses.append(' '.join(phrase))
                #hypotheses.append(phrase)

            bleu_score = get_moses_multi_bleu(hypotheses,
                                              refrences,
                                              lowercase=True)
            track_blue.append(bleu_score)
            print(track_blue)

    torch.save(generator.state_dict(), CHECKPOINT_PATH + 'generator_mle.model')
    track_blue = np.array(track_blue)
    np.save(ROOT_PATH + 'track_blue_mle3.npy', track_blue)

    plt.plot(track_blue)
    plt.show()