Beispiel #1
0
class Generator_attention(nn.Module):
    """Generator """
    def __init__(self,
                 num_emb,
                 emb_dim,
                 hidden_dim,
                 seq_len,
                 batch_size,
                 use_cuda,
                 test_mode=False):
        super(Generator_attention, self).__init__()
        # Constants Initialization
        self.SOS_Index = 0
        self.EOS_Index = 1
        self.PAD_Index = 2

        # Embeddings
        self.emb = nn.Embedding(num_emb, emb_dim)
        self.model = transformer.Transformer(self.emb,
                                             self.PAD_Index,
                                             self.emb.num_embeddings,
                                             max_seq_len=max(seq_len, seq_len))
        self.test_mode = test_mode
        if not test_mode:
            self.data_loader = GenDataIter('inshorts_test/real.data',
                                           batch_size)
            self.data_loader.reset()
        """
        self.num_emb = num_emb
        self.emb_dim = emb_dim
        self.hidden_dim = hidden_dim
        self.use_cuda = use_cuda
        self.emb = nn.Embedding(num_emb, emb_dim)
        self.lstm = nn.LSTM(emb_dim, hidden_dim, batch_first=True)
        self.lin = nn.Linear(hidden_dim, num_emb)
        self.softmax = nn.LogSoftmax()
        self.init_params()
        """

    def forward(self, x):
        """
        Args:
            x: (batch_size, seq_len), sequence of tokens generated by generator
        """
        return self.model(x, x)
        """
        emb = self.emb(x)
        h0, c0 = self.init_hidden(x.size(0))
        output, (h, c) = self.lstm(emb, (h0, c0))
        pred = self.softmax(self.lin(output.contiguous().view(-1, self.hidden_dim)))
        return pred
        """

    def step(self, x, h, c):
        """
        Args:
            x: (batch_size,  1), sequence of tokens generated by generator
            h: (1, batch_size, hidden_dim), lstm hidden state
            c: (1, batch_size, hidden_dim), lstm cell state
        """
        emb = self.emb(x)
        output, (h, c) = self.lstm(emb, (h, c))
        pred = F.softmax(self.lin(output.view(-1, self.hidden_dim)))
        return pred, h, c

    def init_hidden(self, batch_size):
        h = Variable(torch.zeros((1, batch_size, self.hidden_dim)))
        c = Variable(torch.zeros((1, batch_size, self.hidden_dim)))
        if self.use_cuda:
            h, c = h.cuda(), c.cuda()
        return h, c

    def init_params(self):
        for param in self.parameters():
            param.data.uniform_(-0.05, 0.05)

    def sample(self, batch_size, seq_len, x=torch.tensor([])):
        if self.test_mode:
            print('In Test mode')
            return None

        if self.data_loader.idx >= self.data_loader.data_num:
            self.data_loader.reset()
        if len(x.shape) > 1:
            input_seq = x
        else:
            input_seq = self.data_loader.next()[0]
        input_seq = input_seq.cuda()
        sampled_output = transformer.sample_output(self.model, input_seq,
                                                   self.EOS_Index,
                                                   self.PAD_Index,
                                                   input_seq.shape[1])
        return sampled_output
        """
def main():
    random.seed(SEED)
    np.random.seed(SEED)
    calc_bleu([1, 10, 12])
    exit()
    # Build up dataset
    s_train, s_test = load_from_big_file('../data/train_data_obama.txt')
    # idx_to_word: List of id to word
    # word_to_idx: Dictionary mapping word to id
    idx_to_word, word_to_idx = fetch_vocab(s_train, s_train, s_test)
    # TODO: 1. Prepare data for attention model
    # input_seq, target_seq = prepare_data(DATA_GERMAN, DATA_ENGLISH, word_to_idx)

    global VOCAB_SIZE
    VOCAB_SIZE = len(idx_to_word)

    save_vocab(CHECKPOINT_PATH + 'metadata.data', idx_to_word, word_to_idx,
               VOCAB_SIZE, g_emb_dim, g_hidden_dim, g_sequence_len)

    print('VOCAB SIZE:', VOCAB_SIZE)
    # Define Networks
    generator = Generator(VOCAB_SIZE, g_emb_dim, g_hidden_dim, g_sequence_len,
                          BATCH_SIZE, opt.cuda)
    discriminator = Discriminator(d_num_class, VOCAB_SIZE, d_emb_dim,
                                  d_filter_sizes, d_num_filters, d_dropout)
    target_lstm = TargetLSTM(VOCAB_SIZE, g_emb_dim, g_hidden_dim, opt.cuda)
    if opt.cuda:
        generator = generator.cuda()
        discriminator = discriminator.cuda()
        target_lstm = target_lstm.cuda()
    # Generate toy data using target lstm
    print('Generating data ...')
    generate_real_data('../data/train_data_obama.txt', BATCH_SIZE,
                       GENERATED_NUM, idx_to_word, word_to_idx, POSITIVE_FILE,
                       TEST_FILE)
    # Create Test data iterator for testing
    test_iter = GenDataIter(TEST_FILE, BATCH_SIZE)
    # generate_samples(target_lstm, BATCH_SIZE, GENERATED_NUM, POSITIVE_FILE, idx_to_word)

    # Load data from file
    gen_data_iter = GenDataIter(POSITIVE_FILE, BATCH_SIZE)

    # Pretrain Generator using MLE
    # gen_criterion = nn.NLLLoss(size_average=False)
    gen_criterion = nn.CrossEntropyLoss()
    gen_optimizer = optim.Adam(generator.parameters())
    if opt.cuda:
        gen_criterion = gen_criterion.cuda()
    print('Pretrain with MLE ...')
    for epoch in range(PRE_EPOCH_NUM):
        loss = train_epoch(generator, gen_data_iter, gen_criterion,
                           gen_optimizer)
        print('Epoch [%d] Model Loss: %f' % (epoch, loss))
        print('Training Output')
        test_predict(generator, test_iter, idx_to_word, train_mode=True)

        sys.stdout.flush()
        # TODO: 2. Flags to ensure dimension of model input is handled
        # generate_samples(generator, BATCH_SIZE, GENERATED_NUM, EVAL_FILE)
        """
        eval_iter = GenDataIter(EVAL_FILE, BATCH_SIZE)
        print('Iterator Done')
        loss = eval_epoch(target_lstm, eval_iter, gen_criterion)
        print('Epoch [%d] True Loss: %f' % (epoch, loss))
        """
    print('OUTPUT AFTER PRE-TRAINING')
    test_predict(generator, test_iter, idx_to_word, train_mode=True)

    # Pretrain Discriminator
    dis_criterion = nn.NLLLoss(size_average=False)
    dis_optimizer = optim.Adam(discriminator.parameters())
    if opt.cuda:
        dis_criterion = dis_criterion.cuda()
    print('Pretrain Discriminator ...')
    for epoch in range(3):
        generate_samples(generator, BATCH_SIZE, GENERATED_NUM, NEGATIVE_FILE)
        dis_data_iter = DisDataIter(POSITIVE_FILE, NEGATIVE_FILE, BATCH_SIZE)
        for _ in range(3):
            loss = train_epoch(discriminator, dis_data_iter, dis_criterion,
                               dis_optimizer)
            print('Epoch [%d], loss: %f' % (epoch, loss))
            sys.stdout.flush()
    # Adversarial Training
    rollout = Rollout(generator, 0.8)
    print('#####################################################')
    print('Start Adversarial Training...\n')
    gen_gan_loss = GANLoss()

    gen_gan_optm = optim.Adam(generator.parameters())
    if opt.cuda:
        gen_gan_loss = gen_gan_loss.cuda()
    gen_criterion = nn.NLLLoss(size_average=False)
    if opt.cuda:
        gen_criterion = gen_criterion.cuda()
    dis_criterion = nn.NLLLoss(size_average=False)
    dis_optimizer = optim.Adam(discriminator.parameters())
    if opt.cuda:
        dis_criterion = dis_criterion.cuda()
    real_iter = GenDataIter(POSITIVE_FILE, BATCH_SIZE)
    for total_batch in range(TOTAL_BATCH):
        ## Train the generator for one step
        for it in range(1):
            if real_iter.idx >= real_iter.data_num:
                real_iter.reset()
            inputs = real_iter.next()[0]
            inputs = inputs.cuda()
            samples = generator.sample(BATCH_SIZE, g_sequence_len, inputs)
            samples = samples.cpu()
            rewards = rollout.get_reward(samples, 16, discriminator)
            rewards = Variable(torch.Tensor(rewards))
            if opt.cuda:
                rewards = torch.exp(rewards.cuda()).contiguous().view((-1, ))
            prob = generator.forward(inputs)
            mini_batch = prob.shape[0]
            prob = torch.reshape(
                prob,
                (prob.shape[0] * prob.shape[1], -1))  #prob.view(-1, g_emb_dim)
            targets = copy.deepcopy(inputs).contiguous().view((-1, ))
            loss = gen_gan_loss(prob, targets, rewards)
            gen_gan_optm.zero_grad()
            loss.backward()
            gen_gan_optm.step()
            """
            samples = generator.sample(BATCH_SIZE, g_sequence_len)
            # construct the input to the genrator, add zeros before samples and delete the last column
            zeros = torch.zeros((BATCH_SIZE, 1)).type(torch.LongTensor)
            if samples.is_cuda:
                zeros = zeros.cuda()
            inputs = Variable(torch.cat([zeros, samples.data], dim = 1)[:, :-1].contiguous())
            targets = Variable(samples.data).contiguous().view((-1,))
            print('', inputs.shape, targets.shape)
            print(inputs, targets)
            # calculate the reward
            rewards = rollout.get_reward(samples, 16, discriminator)
            rewards = Variable(torch.Tensor(rewards))
            if opt.cuda:
                rewards = torch.exp(rewards.cuda()).contiguous().view((-1,))
            prob = generator.forward(inputs)
            mini_batch = prob.shape[0]
            prob = torch.reshape(prob, (prob.shape[0] * prob.shape[1], -1)) #prob.view(-1, g_emb_dim)
            loss = gen_gan_loss(prob, targets, rewards)
            gen_gan_optm.zero_grad()
            loss.backward()
            gen_gan_optm.step()
            """
        print('Batch [%d] True Loss: %f' % (total_batch, loss))

        if total_batch % 1 == 0 or total_batch == TOTAL_BATCH - 1:
            # generate_samples(generator, BATCH_SIZE, GENERATED_NUM, EVAL_FILE)
            # eval_iter = GenDataIter(EVAL_FILE, BATCH_SIZE)
            # loss = eval_epoch(target_lstm, eval_iter, gen_criterion)
            if len(prob.shape) > 2:
                prob = torch.reshape(prob, (prob.shape[0] * prob.shape[1], -1))
            predictions = torch.max(prob, dim=1)[1]
            predictions = predictions.view(mini_batch, -1)
            for each_sen in list(predictions):
                print('Train Output:',
                      generate_sentence_from_id(idx_to_word, each_sen))

            test_predict(generator, test_iter, idx_to_word, train_mode=True)
            torch.save(generator.state_dict(),
                       CHECKPOINT_PATH + 'generator.model')
            torch.save(discriminator.state_dict(),
                       CHECKPOINT_PATH + 'discriminator.model')
        rollout.update_params()

        for _ in range(4):
            generate_samples(generator, BATCH_SIZE, GENERATED_NUM,
                             NEGATIVE_FILE)
            dis_data_iter = DisDataIter(POSITIVE_FILE, NEGATIVE_FILE,
                                        BATCH_SIZE)
            for _ in range(2):
                loss = train_epoch(discriminator, dis_data_iter, dis_criterion,
                                   dis_optimizer)