Exemplo n.º 1
0
def train_model(model, lr, epochs, train_loader, val_loader, patience):
    optimizer = Adagrad(model.parameters(), lr)
    criterion = nn.MSELoss()

    best_rmse = 100
    rounds_no_imporve = 0
    for epoch in range(epochs):
        for users, items, x, y in train_loader:
            y_pred = model(users, items, x)
            loss = criterion(y_pred.reshape(-1), y)
            optimizer.zero_grad()
            loss.backward()
            optimizer.step()
        logging.info('Last train loss: {0:.3f}'.format(
            loss.detach().cpu().numpy().tolist()))
        with torch.no_grad():
            errors = np.array([])
            for users, items, x, y in val_loader:
                y_pred = model(users, items, x)
                group_errors = (y_pred - y).reshape(-1).cpu().numpy()
                errors = np.concatenate([errors, group_errors])
            rmse = (errors**2).mean()**0.5
            logging.info('Test RMSE: {0:.3f}'.format(rmse))
            if rmse < best_rmse:
                best_rmse = rmse
                rounds_no_imporve = 0
            else:
                rounds_no_imporve += 1
            if rounds_no_imporve >= patience:
                return model
    return model
Exemplo n.º 2
0
def demo_pytorch_vae_mnist(hidden_sizes=[200, 200],
                           latent_dim=5,
                           distribution_type='bernoulli',
                           minibatch_size=20,
                           checkpoints=100,
                           n_epochs=20):

    cp = Checkpoints(checkpoints)

    model = VAEModel(
        encoder=make_mlp_encoder(visible_dim=784,
                                 hidden_sizes=hidden_sizes,
                                 latent_dim=latent_dim),
        decoder=make_mlp_decoder(latent_dim=latent_dim,
                                 hidden_sizes=hidden_sizes,
                                 visible_dim=784,
                                 dist_type=distribution_type),
        latent_dim=latent_dim,
    )
    # optimizer = Adam(params = model.parameters())
    # optimizer = RMSprop(params = model.parameters())
    # optimizer = Adamax(params = model.parameters())
    optimizer = Adagrad(params=model.parameters())
    # optimizer = SGD(lr=0.001, params = model.parameters())

    train_loader = torch.utils.data.DataLoader(datasets.MNIST(
        '../data',
        train=True,
        download=True,
        transform=transforms.Compose([transforms.ToTensor()])),
                                               batch_size=minibatch_size,
                                               shuffle=True)

    for epoch in range(n_epochs):
        for batch_idx, (x, y) in enumerate(train_loader):

            epoch_pt = epoch + batch_idx / len(train_loader)

            optimizer.zero_grad()
            loss = -model.elbo(x.flatten(1)).sum()
            loss.backward()
            optimizer.step()

            rate = measure_global_rate('training')

            if cp():

                print(f'Mean Rate at Epoch {epoch_pt:.2g}: {rate:.3g}iter/s')
                z_samples = model.prior().sample((64, ))
                x_dist = model.decode(z_samples)
                dbplot(x_dist.mean.reshape(-1, 28, 28),
                       'Sample Means',
                       title=f'Sample Means at epoch {epoch_pt:.2g}')
Exemplo n.º 3
0
    def fit(self, seq_list: List, objective='cross_entropy',
            print_freq=1000, num_epochs=10, sgd_kwargs={}):
        ''' train LSTM using DataLoader

        Parameters
        ----------
        seq_list : list
            each element corresponds to a sequence
        objective : str
            objective function
        print_freq : int
            how frequently loss is printed
        num_epochs : int
            the number of training epochs
        sgd_kwargs : dict
            keywords fed into SGD
        '''
        if objective == 'cross_entropy':
            criterion = nn.CrossEntropyLoss()
        elif objective == 'mse':
            criterion = nn.MSELoss()
        elif objective == 'nll': # nll stands for negative log-likelihood
            criterion = nn.NLLLoss()
        else:
            raise NotImplementedError

        optimizer = Adagrad(self.parameters(), **sgd_kwargs)
        i = 0
        running_loss = 0
        for epoch in range(num_epochs):
            for each_idx in range(0, len(seq_list), self.batch_size):
                each_seq = torch.stack(
                    seq_list[each_idx:each_idx + self.batch_size], dim=1)
                seq = Variable(each_seq, requires_grad=False)

                optimizer.zero_grad()
                pred_seq = self.forward(seq[:-1])
                loss = criterion(pred_seq, seq[:-1])
                loss.backward()
                optimizer.step()
                self.init_hidden()

                # print statistics
                running_loss += loss.data[0]
                i += 1
                if i % print_freq == print_freq-1:
                    print('epoch: {}\t total examples: {}\t loss: {}'.format(
                        epoch + 1, i + 1, running_loss / print_freq))
                    running_loss = 0.0

        print('Finished Training')
def train(inputs, outputs, model, l1, l2, lr=1e-5, epochs=10000):
    criterion = torch.nn.CrossEntropyLoss()
    optimizer = Adagrad(model.parameters(), lr=lr)

    log = []
    for _ in range(epochs):
        prediction = model(inputs)
        acc = tn((prediction.max(1)[1] == outputs).float().mean())
        original_loss = criterion(prediction, outputs)
        penalty = model.penalty(l1, l2)
        error = original_loss + penalty
        optimizer.zero_grad()
        error.backward()
        log.append(
            (tn(original_loss), tn(penalty), tn(get_sparsity(model)), acc))
        optimizer.step()

    return np.array(log)
Exemplo n.º 5
0
    def train(self, device):
        set_random_seed()
        self.loaded_data.negative_sample()
        # Compose Graph NN
        gnn_channel = GNNChannel(self.sr_ent_num, self.tg_ent_num, self.dim,
                                 self.layer_num, self.drop_out, self.channels)
        self.gnn_channel = gnn_channel
        gnn_channel.to(device)
        gnn_channel.train()

        # Prepare optimizer
        optimizer = Adagrad(filter(lambda p: p.requires_grad,
                                   gnn_channel.parameters()),
                            lr=self.learning_rate,
                            weight_decay=self.l2_regularization)
        criterion = AlignLoss(self.margin_gamma)

        best_hit_at_1 = 0
        best_epoch_num = 0

        for epoch_num in range(1, self.epoch_num + 1):
            gnn_channel.train()
            optimizer.zero_grad()
            sr_seed_hid, tg_seed_hid, _, _ = gnn_channel.forward(
                self.loaded_data.train_sr_ent_seeds,
                self.loaded_data.train_tg_ent_seeds)
            loss = criterion(sr_seed_hid, tg_seed_hid)
            loss.backward()
            optimizer.step()
            if epoch_num % self.nega_sample_freq == 0:
                if str(self.directory).find('DWY100k') >= 0:
                    self.loaded_data.negative_sample()
                else:
                    self.negative_sample()
                hit_at_1 = self.evaluate(epoch_num,
                                         gnn_channel,
                                         print_info=False,
                                         device=device)
                if hit_at_1 > best_hit_at_1:
                    best_hit_at_1 = hit_at_1
                    best_epoch_num = epoch_num
        print('Model best Hit@1 on valid set is %.2f at %d epoch.' %
              (best_hit_at_1, best_epoch_num))
        return best_hit_at_1, best_epoch_num
Exemplo n.º 6
0
def train(inputs, outputs, model, l1, l2, lr=1e-5, epochs=10000):
    criterion = torch.nn.MSELoss()
    optimizer = Adagrad(model.parameters(), lr=lr)

    # Everyone starts the same way
    for p in model.parameters():
        # p.data.fill_(1.0)
        pass

    log = []
    for _ in range(epochs):
        original_loss = criterion(model(inputs), outputs)
        penalty = model.penalty(l1, l2)
        error = original_loss + penalty
        optimizer.zero_grad()
        error.backward()
        log.append((tn(original_loss), tn(penalty), tn(get_sparsity(model))))
        optimizer.step()

    return np.array(log)
Exemplo n.º 7
0
def run(dim, ds, epochs, attempts, lrs, reg_coef):
    losses = pd.DataFrame(columns=['lr', 'epoch', 'attempt', 'loss'])
    total_epochs = len(lrs) * len(attempts) * len(epochs)
    with tqdm(total=total_epochs, desc='lr = NA, attempt = NA, epoch = NA, loss = NA', unit='epochs',
              ncols=140) as pbar:
        for lr in lrs:
            for attempt in attempts:
                x = torch.empty(dim, requires_grad=True, dtype=torch.double)
                torch.nn.init.normal_(x)
                opt = Adagrad([x], lr=lr)

                for epoch in epochs:
                    train_loss = 0
                    for X, y in DataLoader(ds, shuffle=True, batch_size=1):
                        opt.zero_grad()

                        if y.item() == 0:
                            score = -torch.dot(X[0, :], x)
                        else:
                            score = torch.dot(X[0, :], x)

                        loss = torch.log1p(torch.exp(score)) + (reg_coef / 2) * torch.dot(x, x)
                        loss.backward()

                        train_loss += loss.item()
                        opt.step()

                    train_loss /= len(ds)
                    losses = losses.append(pd.DataFrame.from_dict(
                        {'loss': [train_loss],
                         'epoch': [epoch],
                         'lr': [lr],
                         'attempt': [attempt]}), sort=True)
                    pbar.update()
                    pbar.set_description(desc=f'lr = {lr}, attempt = {attempt}, epoch = {epoch}, loss = {train_loss}')
    return losses
Exemplo n.º 8
0
class Trainer:
    def __init__(self, config):
        self.config = config
        self.step = 0
        self.vocab = Vocab(config.vocab_file, config.vocab_size)
        self.train_data = CNNDMDataset('train', config.data_path, config,
                                       self.vocab)
        self.validate_data = CNNDMDataset('val', config.data_path, config,
                                          self.vocab)
        # self.model = Model(config).to(device)
        # self.optimizer = None
        self.setup(config)

    def setup(self, config):

        model = Model(config)
        checkpoint = None
        if config.train_from != '':
            logging('Train from %s' % config.train_from)
            checkpoint = torch.load(config.train_from, map_location='cpu')
            model.load_state_dict(checkpoint['model'])
            self.step = checkpoint['step']

        self.model = model.to(device)
        self.optimizer = Adagrad(model.parameters(),
                                 lr=config.learning_rate,
                                 initial_accumulator_value=config.initial_acc)
        if checkpoint is not None:
            self.optimizer.load_state_dict(checkpoint['optimizer'])

    def train_one(self, batch):

        config = self.config
        enc_batch, enc_padding_mask, enc_lens, enc_batch_extend_vocab, extra_zeros, c_t_1, coverage = \
            get_input_from_batch(batch, config, device)
        dec_batch, dec_padding_mask, max_dec_len, dec_lens_var, target_batch = \
            get_output_from_batch(batch, device)

        encoder_outputs, encoder_feature, encoder_hidden = self.model.encoder(
            enc_batch, enc_lens)
        s_t_1 = self.model.reduce_state(encoder_hidden)

        step_losses = []
        for di in range(max_dec_len):
            y_t_1 = dec_batch[:, di]  # Teacher forcing
            final_dist, s_t_1, c_t_1, attn_dist, p_gen, next_coverage = self.model.decoder(
                y_t_1, s_t_1, encoder_outputs, encoder_feature,
                enc_padding_mask, c_t_1, extra_zeros, enc_batch_extend_vocab,
                coverage, di)
            target = target_batch[:, di]
            gold_probs = torch.gather(final_dist, 1,
                                      target.unsqueeze(1)).squeeze()
            step_loss = -torch.log(gold_probs + config.eps)
            if config.is_coverage:
                step_coverage_loss = torch.sum(torch.min(attn_dist, coverage),
                                               1)
                step_loss = step_loss + config.cov_loss_wt * step_coverage_loss
                coverage = next_coverage

            step_mask = dec_padding_mask[:, di]
            step_loss = step_loss * step_mask
            step_losses.append(step_loss)

        sum_losses = torch.sum(torch.stack(step_losses, 1), 1)
        batch_avg_loss = sum_losses / dec_lens_var
        loss = torch.mean(batch_avg_loss)
        return loss

    def train(self):

        config = self.config
        train_loader = DataLoader(self.train_data,
                                  batch_size=config.batch_size,
                                  shuffle=True,
                                  collate_fn=Collate())

        running_avg_loss = 0
        self.model.train()

        for e in range(config.train_epoch):
            for batch in train_loader:
                self.step += 1
                self.optimizer.zero_grad()
                loss = self.train_one(batch)
                loss.backward()
                clip_grad_norm_(self.model.parameters(), config.max_grad_norm)
                self.optimizer.step()
                #print(loss.item())
                running_avg_loss = calc_running_avg_loss(
                    loss.item(), running_avg_loss)

                if self.step % config.report_every == 0:
                    logging("Step %d Train loss %.3f" %
                            (self.step, running_avg_loss))
                if self.step % config.validate_every == 0:
                    self.validate()
                if self.step % config.save_every == 0:
                    self.save(self.step)
                if self.step % config.test_every == 0:
                    pass

    @torch.no_grad()
    def validate(self):
        self.model.eval()
        validate_loader = DataLoader(self.validate_data,
                                     batch_size=self.config.batch_size,
                                     shuffle=False,
                                     collate_fn=Collate())
        losses = []
        for batch in validate_loader:
            loss = self.train_one(batch)
            losses.append(loss.item())
        self.model.train()
        ave_loss = sum(losses) / len(losses)
        logging('Validate loss : %f' % ave_loss)

    def save(self, step):
        state = {
            'model': self.model.state_dict(),
            'optimizer': self.optimizer.state_dict(),
            'step': step
        }
        save_path = os.path.join(self.config.model_path, 'model_s%d.pt' % step)
        logging('Saving model step %d to %s...' % (step, save_path))
        torch.save(state, save_path)
Exemplo n.º 9
0
class VAE(BaseEstimator, TransformerMixin):
    """
    :param decoder: Address neural network in decoder. Possible values are
                    bernoulli and gaussian.
    """
    def __init__(self,
                 nohiddens: int = 400,
                 nolatents: int = 20,
                 nosamples: int = 1,
                 noepochs: int = 15,
                 batch_size: int = 100,
                 show_every: int = 100,
                 decoder: str = 'bernoulli',
                 outdir: str = 'output/'):

        super(BaseEstimator, self).__init__()

        makedirs(outdir, exist_ok=True)

        self.noinputs = None
        self.nohiddens = nohiddens
        self.nolatents = nolatents
        self.nosamples = nosamples
        self.noepochs = noepochs
        self.batch_size = batch_size
        self.show_every = show_every
        self.outdir = outdir

        if decoder == 'bernoulli':
            self.fit = self.__fit_bernoulli
        elif decoder == 'gaussian':
            self.fit = self.__fit_gaussian
        else:
            raise ValueError(f'Unknown decoder type: "{decoder}".')

        self.logger = getLogger(__name__)
        self.model = None
        self.opt = None

    def fit(self, X):
        """Method fit is overloaded during construction of VAE estimator. See
        constructor for details.
        """

    def transform(self, X: Tensor) -> LatentVariable:
        latvar = LatentVariable(self.model, *self.model.encode(X))
        return latvar

    def inverse_transform(self, X: Tensor) -> Tensor:
        origin = self.model.decode(X)

        if isinstance(origin, tuple):
            return origin[0]
        else:
            return origin

    def __fit(self, dataset: Tensor, model: VAEBase):
        it = DataLoader(dataset, batch_size=self.batch_size, shuffle=True)

        dur = self.noepochs * ceil(len(dataset) / self.batch_size)
        history = History(zeros(dur), zeros(dur), zeros(dur), zeros(dur),
                          zeros(dur))

        hooks = CombinedHook()
        hooks.add(LossHook)
        hooks.add(RekonstruktHook, dataset[:10, :])
        hooks.add(LatentSamplerHook, self.nolatents)
        hooks.prehook(self, history)

        self.model = model
        self.noinputs = model.noinputs
        self.opt = Adagrad(self.model.parameters(), lr=0.01)  # See Section 5.

        for epoch in range(self.noepochs):
            for i, x in enumerate(it):
                self.opt.zero_grad()

                # Apply model in the following steps:
                # (a) encode datapoint into latent space;
                # (b) sample points from latent space;
                # (c) decode sampled points from latent space.
                mu, logsigma2 = self.model.encode(x)
                z = self.model.sample(mu, logsigma2)
                X = self.model.decode(z)

                # Estimate KL-divergence and reconstruction error (RE).
                kl = self.model.kl(mu, logsigma2)
                re = self.model.re(x, X)

                # Do error backpropagation.
                loss = kl + re
                loss.backward()
                self.opt.step()

                # Aggregation runtime statistics.
                history.append(epoch=epoch,
                               batch=i,
                               kl=float(kl / self.batch_size),
                               re=float(re / self.batch_size))

                if i % self.show_every == 0:
                    hooks.hook(self, history)

        # Print status before exit.
        hooks.posthook(self, history)

        # Return itself for calls chaining.
        return self

    def __fit_bernoulli(self, dataset: Tensor):
        params = self.get_params()
        params['noinputs'] = dataset.shape[1]
        model = VAEBernoulliDecoder(**params)
        return self.__fit(dataset, model)

    def __fit_gaussian(self, dataset: Tensor):
        params = self.get_params()
        params['noinputs'] = dataset.shape[1]
        model = VAEGaussianDecoder(**params)
        return self.__fit(dataset, model)
Exemplo n.º 10
0
class trainW2V:

    """ To train a word2vec model on a text obtained from pubmed scraping. """

    def __init__(self, text, windowSize=5, negWords=15, embedDim=200, vocabSize=None, 
                 nOccur=10, phMinCount=5, phThresh=10, phDepth=2,
                 wInit='scaled-uniform', epochs=50, batchSize= 1024, 
                 optimizer='SGD', lr=0.01, patience=5, epsilon=1e-5, raw=False, 
                 tShuff=False, saveFreq=-1, restoreBest=True, outPath='./'):

        """ Args:
                text (nested list): input text as list of sentences.
                windowSize (int): size of the context window.
                negWords (int): number of negative words used in training.
                embedDim (int): dimensionality of the embedded space (default 200).
                vocabSize (int): size of the the vocabulary (default None)
                nOccur (int): minimum number of occurrencies to keep a word in the dictionary,
                          can be overwritten by vocabSiz (default 10).
                phMinCount (int): minimum number of occurrences to keep a phrase (default 5).
                phThresh (float): minimum score to keep a phrase (default 10).
                phDepth (int): number of recursions during phrase search (1 = bi-grams, default 2).
                wInit (string): distribution from which to draw initial node weights (only 'scaled-uniform'
                        and 'xavier' are currently available, default 'scaled-uniform').
                epochs (int): number of epochs  (default 50).
                batchSize (int): size of batches (default 1024).
                optimizer (str): optimizer choice, 'SGD' amd 'Adagrad' only 
                        (default 'SGD').
                lr (float): learning rage (default .01).
                patience (int): early stop patience (default 5).
                epsilon (float): early stop epsilon (default 1e-5).
                raw (bool): if True clean the input text (default True).                
                tShuff (bool): shuffle training set at each epoch (default false).
                saveFreq (int): frequency of model checkpoints, if < 0 don't save checkpoints (default -1).
                restoreBest (bool): restore and save best model by early stopping.
                outPath (string): path to directory where to save the trained models.
            """

        """ Set up training dataset and batches. """

        self.trainDs = textDataset(text, windowSize, negWords, vocabSize=vocabSize, nOccur=nOccur,
                                    phMinCount=phMinCount, phThresh=phThresh, phDepth=phDepth,  raw=raw)
        self.trainBatch = DataLoader(self.trainDs, batch_size = batchSize, shuffle = tShuff)
        
        """ Set up model """

        self.model = skipGram(int(self.trainDs.wDict.shape[0]), embedDim, wInit)

        """ Send model to GPU if available. """

        if torch.cuda.is_available():
            self.model.cuda()

        self.epochs = epochs
        

        if optimizer == 'SGD':
             # no momentum allowed with sparse matrices :(
            self.optimizer = SGD(self.model.parameters(), lr=lr)

        elif optimizer == 'Adagrad':
            self.optimizer = Adagrad(self.model.parameters(), lr=lr)

        else:
            print ('ERROR: '+optimizer+' is not available, please select SGD or Adagrad.')
            sys.exit(1)


        self.losses = []

        """ Set up early stopping. """

        self.earlStop = EarlyStopping(patience=patience, epsilon=epsilon, keepBest=True)
        self.restoreBest = restoreBest

        self.saveFreq = saveFreq
        if self.saveFreq < 0:
            self.saveFreq = self.epochs + 1 


        self.outPath = outPath
        if not os.path.exists(self.outPath):
            os.makedirs(self.outPath)


    def train(self):

        """ Run the training of the model. """    
            
        for epoch in tqdm(range(self.epochs), desc='Epoch'):
      
            pBarB = tqdm(enumerate(self.trainBatch), total=len(self.trainBatch),  desc='Batch')
            for batchNum, batch in pBarB:
        
                wordBatch = batch[0]
                contBatch = batch[1]
                negaBatch = batch[2]

                """ Move batches to GPU if available. """

                if torch.cuda.is_available():
                    wordBatch = wordBatch.cuda()
                    contBatch = contBatch.cuda()
                    negaBatch = negaBatch.cuda()

                """ Core of training. """

                self.optimizer.zero_grad()
                loss = self.model(wordBatch, contBatch, negaBatch)
                loss.backward()
                self.optimizer.step()    


                pBarB.set_postfix({'loss' : '{:.5f}'.format(loss.item())})
        
            """ Store loss. """

            self.losses.append(loss.item())

            """ Save checkpoint model every n-th epoch. """ 
            
            if epoch > 0 and epoch%self.saveFreq == 0:

                self.saveModel(name='_{:d}_{:.5f}'.format(epoch,loss))

            """ Early stop check. """

            self.earlStop(loss, self.model)

            if self.earlStop.earlyStop:

                print('Limit loss improvement reached, stopping the training.')

                break

        """ Restore and save best model. """

        if self.restoreBest:

            self.model = self.earlStop.bestModel        


    def saveModel(self, name):

        """ Saves any model and its dictionary. 

        Args:
            name (string): file name.
        """

        torch.save({'model_state_dict': self.model.state_dict(), 
                    'word_to_ix': self.trainDs.wDict['word'].to_dict()
                    },                  
                    os.path.join(self.outPath, 'model_'+name+'.pt'))


    def getEmbedded(self):

        """ Returns the embedding layer weights, equivalent to the word vectors in 
            the embedded space.

        Returns:
            (numpy array): the embedding layer weights.
        """

        return self.model.getEmbedded()
Exemplo n.º 11
0
class Train(object):
    def __init__(self):
        self.vocab = Vocab(config.vocab_path, config.vocab_size)
        self.batcher = Batcher(config.train_data_path,
                               self.vocab,
                               mode='train',
                               batch_size=config.batch_size,
                               single_pass=False)
        time.sleep(5)

        if not os.path.exists(config.log_root):
            os.mkdir(config.log_root)

        self.model_dir = os.path.join(config.log_root, 'train_model')
        if not os.path.exists(self.model_dir):
            os.mkdir(self.model_dir)

        self.train_log = os.path.join(config.log_root, 'train_log')
        if not os.path.exists(self.train_log):
            os.mkdir(self.train_log)
        self.summary_writer = tf.summary.FileWriter(self.train_log)

    def save_model(self, running_avg_loss, iter, mode):
        state = {
            'iter': iter,
            'encoder_state_dict': self.model.encoder.state_dict(),
            'decoder_state_dict': self.model.decoder.state_dict(),
            'reduce_state_dict': self.model.reduce_state.state_dict(),
            'optimizer': self.optimizer.state_dict(),
            'current_loss': running_avg_loss
        }
        if mode == 'train':
            save_model_dir = self.model_dir
        else:
            best_model_dir = os.path.join(config.log_root, 'best_model')
            if not os.path.exists(best_model_dir):
                os.mkdir(best_model_dir)
            save_model_dir = best_model_dir

        if len(os.listdir(save_model_dir)) > 0:
            shutil.rmtree(save_model_dir)
            time.sleep(2)
            os.mkdir(save_model_dir)
        train_model_path = os.path.join(save_model_dir,
                                        'model_best_%d' % (iter))
        torch.save(state, train_model_path)
        return train_model_path

    def setup_train(self, model_file_path=None):
        self.model = Model(model_file_path)

        params = list(self.model.encoder.parameters()) + list(self.model.decoder.parameters()) + \
                 list(self.model.reduce_state.parameters())
        initial_lr = config.lr_coverage if config.is_coverage else config.lr
        self.optimizer = Adagrad(
            params,
            lr=initial_lr,
            initial_accumulator_value=config.adagrad_init_acc,
            weight_decay=config.L2_loss)

        start_iter, start_loss = 0, 0

        if model_file_path is not None:
            state = torch.load(model_file_path,
                               map_location=lambda storage, location: storage)
            start_iter = state['iter']
            start_loss = state['current_loss']

            if not config.is_coverage:
                self.optimizer.load_state_dict(state['optimizer'])
                if use_cuda:
                    for state in self.optimizer.state.values():
                        for k, v in state.items():
                            if torch.is_tensor(v):
                                state[k] = v.cuda()

        return start_iter, start_loss

    def train_one_batch(self, batch):
        enc_batch, enc_padding_mask, enc_lens, enc_batch_extend_vocab, extra_zeros, c_t_1, coverage = \
            get_input_from_batch(batch, use_cuda)
        dec_batch, dec_padding_mask, max_dec_len, dec_lens_var, target_batch = \
            get_output_from_batch(batch, use_cuda)

        self.optimizer.zero_grad()

        # encoder_outputs shape = (batch_size, max_seq_len, 2*hidden_size)
        # encoder_feature shape = (batch_size*max_seq_len, 2*hidden_size)
        # encoder_hidden[0] shape = (batch, 2, hidden_size)
        encoder_outputs, encoder_feature, encoder_hidden = self.model.encoder(
            enc_batch, enc_lens)
        # s_t_1[0] shape = (1, batch_size, hidden_size)
        s_t_1 = self.model.reduce_state(encoder_hidden)
        '''
        print('Actual enc_batch:')
        en_words = [self.vocab._id_to_word[idx] for idx in enc_batch[0].numpy()]
        print(en_words)
        print('Actual de_batch:')
        de_words = [self.vocab._id_to_word[idx] for idx in dec_batch[0].numpy()]
        print(de_words)
        print('Actual tar_batch:')
        tar_words = [self.vocab._id_to_word[idx] for idx in target_batch[0].numpy()]
        print(tar_words)
        '''

        step_losses = []
        for di in range(min(max_dec_len, config.max_dec_steps)):
            y_t_1 = dec_batch[:, di]  # Teacher forcing
            final_dist, s_t_1, c_t_1, attn_dist, p_gen, next_coverage = self.model.decoder(
                y_t_1, s_t_1, encoder_outputs, encoder_feature,
                enc_padding_mask, c_t_1, extra_zeros, enc_batch_extend_vocab,
                coverage, di)
            target = target_batch[:, di]
            gold_probs = torch.gather(final_dist, 1,
                                      target.unsqueeze(1)).squeeze()
            step_loss = -torch.log(gold_probs + config.eps)
            if config.is_coverage:
                step_coverage_loss = torch.sum(torch.min(attn_dist, coverage),
                                               1)
                step_loss = step_loss + config.cov_loss_wt * step_coverage_loss
                coverage = next_coverage

            step_mask = dec_padding_mask[:, di]
            step_loss = step_loss * step_mask
            step_losses.append(step_loss)

        sum_losses = torch.sum(torch.stack(step_losses, 1), 1)
        batch_avg_loss = sum_losses / dec_lens_var
        loss = torch.mean(batch_avg_loss)

        loss.backward()

        self.norm = clip_grad_norm_(self.model.encoder.parameters(),
                                    config.max_grad_norm)
        clip_grad_norm_(self.model.decoder.parameters(), config.max_grad_norm)
        clip_grad_norm_(self.model.reduce_state.parameters(),
                        config.max_grad_norm)

        self.optimizer.step()

        return loss.item()

    def trainIters(self, n_iters, model_file_path=None):
        iter, running_avg_loss = self.setup_train(model_file_path)
        start = time.time()
        min_val_loss = np.inf
        while iter < n_iters:
            batch = self.batcher.next_batch()
            loss = self.train_one_batch(batch)

            running_avg_loss = calc_running_avg_loss(loss, running_avg_loss,
                                                     self.summary_writer, iter)
            iter += 1

            if iter % config.print_interval == 0:
                tf.logging.info(
                    'steps %d, seconds for %d batch: %.2f , loss: %f, min_val_loss: %f'
                    % (iter, config.print_interval, time.time() - start, loss,
                       min_val_loss))
                start = time.time()
            if iter % config.save_model_iter == 0:
                self.summary_writer.flush()
                model_file_path = self.save_model(running_avg_loss,
                                                  iter,
                                                  mode='train')
                tf.logging.info('Evaluate the model %s at validation set....' %
                                model_file_path)
                evl_model = Evaluate(model_file_path)
                val_avg_loss = evl_model.run_eval()
                if val_avg_loss < min_val_loss:
                    min_val_loss = val_avg_loss
                    best_model_file_path = self.save_model(running_avg_loss,
                                                           iter,
                                                           mode='eval')
                    tf.logging.info('Save best model at %s' %
                                    best_model_file_path)
Exemplo n.º 12
0
class Train(object):
    def __init__(self, model_file_path=None):
        self.vocab = Vocab(config.vocab_path, config.vocab_size)
        self.batcher = Batcher(config.train_data_path,
                               self.vocab,
                               mode='train',
                               batch_size=config.batch_size,
                               single_pass=False)
        time.sleep(15)

        if not model_file_path:
            train_dir = os.path.join(config.log_root,
                                     'train_%d' % (int(time.time())))
            if not os.path.exists(train_dir):
                os.mkdir(train_dir)
        else:
            train_dir = re.sub('/model/model.*', '', model_file_path)

        self.model_dir = os.path.join(train_dir, 'model')
        if not os.path.exists(self.model_dir):
            os.mkdir(self.model_dir)

        self.summary_writer = tf.summary.create_file_writer(train_dir)

    def save_model(self, running_avg_loss, iter):
        state = {
            'iter': iter,
            'encoder_state_dict': self.model.encoder.state_dict(),
            'decoder_state_dict': self.model.decoder.state_dict(),
            'reduce_state_dict': self.model.reduce_state.state_dict(),
            'optimizer': self.optimizer.state_dict(),
            'current_loss': running_avg_loss
        }
        model_save_path = os.path.join(
            self.model_dir, 'model_%d_%d' % (iter, int(time.time())))
        torch.save(state, model_save_path)

    def setup_train(self, model_file_path=None):
        self.model = Model(model_file_path)

        params = list(self.model.encoder.parameters()) + list(self.model.decoder.parameters()) + \
                 list(self.model.reduce_state.parameters())
        initial_lr = config.lr_coverage if config.is_coverage else config.lr
        self.optimizer = Adagrad(
            params,
            lr=initial_lr,
            initial_accumulator_value=config.adagrad_init_acc)
        # self.optimizer = Adam(params)
        start_iter, start_loss = 0, 0

        if model_file_path is not None:
            state = torch.load(model_file_path,
                               map_location=lambda storage, location: storage)
            start_iter = state['iter']
            start_loss = state['current_loss']

            if not config.is_coverage:
                self.optimizer.load_state_dict(state['optimizer'])
                if use_cuda:
                    for state in self.optimizer.state.values():
                        for k, v in state.items():
                            if torch.is_tensor(v):
                                state[k] = v.cuda()

        return start_iter, start_loss

    def f(self, x, alpha):
        # # 1 - x ** alpha
        # k = utils.EPOCH / (utils.MAX_EPOCH / 2) - 1
        # return k * x + (1 - k)/2
        return 1 - x**alpha

    def get_loss_mask(self, src, tgt, absts, alpha=config.alpha):
        loss_mask = []
        for i in range(len(src)):

            # debug('src[i]',src[i])
            # debug('tgt[i]',src[i])
            # cnt = 0
            # tgt_i = [t for t in tgt[i] if t != 1]
            # src_i = set([s for s in src[i] if s != 1])
            # debug('src_i',src_i)
            # m = [t for t in tgt_i if t not in src_i ]
            # # for token in tgt_i:
            # #     if token not in src_i:
            # #         cnt += 1
            # cnt = len(m)
            # abst = round(cnt / len(tgt_i),4)
            abst = absts[i]
            loss_factor = self.f(abst, alpha)
            loss_mask.append(loss_factor)
        return torch.Tensor(loss_mask).cuda()

    def train_one_batch(self, batch):
        enc_batch, enc_padding_mask, enc_lens, enc_batch_extend_vocab, extra_zeros, c_t_1, coverage = \
            get_input_from_batch(batch, use_cuda)
        dec_batch, dec_padding_mask, max_dec_len, dec_lens_var, target_batch = \
            get_output_from_batch(batch, use_cuda)

        self.optimizer.zero_grad()

        encoder_outputs, encoder_feature, encoder_hidden = self.model.encoder(
            enc_batch, enc_lens)
        s_t_1 = self.model.reduce_state(encoder_hidden)

        # debug(batch.original_articles[0])
        # debug(batch.original_abstracts[0])
        loss_mask = self.get_loss_mask(enc_batch, dec_batch, batch.absts)
        # debug('loss_mask',loss_mask)
        step_losses = []
        for di in range(min(max_dec_len, config.max_dec_steps)):
            y_t_1 = dec_batch[:, di]  # Teacher forcing
            final_dist, s_t_1, c_t_1, attn_dist, p_gen, next_coverage, tau = self.model.decoder(
                y_t_1, s_t_1, encoder_outputs, encoder_feature,
                enc_padding_mask, c_t_1, extra_zeros, enc_batch_extend_vocab,
                coverage, di)
            target = target_batch[:, di]
            gold_probs = torch.gather(final_dist, 1,
                                      target.unsqueeze(1)).squeeze()
            step_loss = -torch.log(gold_probs + config.eps)

            # debug('enc_batch',enc_batch.size())
            # debug('dec_batch',dec_batch.size())
            # debug('final_dist', final_dist.size())
            # debug('target',target)
            # debug('gold_probs',gold_probs)

            if config.is_coverage:
                step_coverage_loss = torch.sum(torch.min(attn_dist, coverage),
                                               1)
                step_loss = step_loss + config.cov_loss_wt * step_coverage_loss
                coverage = next_coverage

            step_mask = dec_padding_mask[:, di]
            step_loss = step_loss * step_mask
            # debug('step_loss_before',step_loss)
            # debug('config.loss_mask',config.loss_mask)
            if config.loss_mask:
                step_loss = step_loss * loss_mask
                # pass
            # debug('step_loss_after',step_loss)
            step_losses.append(step_loss)

            if config.DEBUG:
                # break
                pass

        sum_losses = torch.sum(torch.stack(step_losses, 1), 1)
        batch_avg_loss = sum_losses / dec_lens_var
        loss = torch.mean(batch_avg_loss)

        if not config.DEBUG:
            loss.backward()

        self.norm = clip_grad_norm_(self.model.encoder.parameters(),
                                    config.max_grad_norm)
        clip_grad_norm_(self.model.decoder.parameters(), config.max_grad_norm)
        clip_grad_norm_(self.model.reduce_state.parameters(),
                        config.max_grad_norm)

        self.optimizer.step()

        return loss.item(), tau

    def trainIters(self, n_iters, model_file_path=None):
        iter, running_avg_loss = self.setup_train(model_file_path)
        start = time.time()

        start_iter = iter
        while iter < n_iters:
            batch = self.batcher.next_batch()
            loss, tau = self.train_one_batch(batch)

            running_avg_loss = calc_running_avg_loss(loss, running_avg_loss,
                                                     self.summary_writer, iter)
            iter += 1

            if config.DEBUG:
                debug('iter', iter)
                if iter - start_iter > config.BREAK_POINT:
                    break

            if iter % 100 == 0:
                self.summary_writer.flush()
            print_interval = 100
            if iter % print_interval == 0:
                print('steps %d, seconds for %d batch: %.2f , loss: %f' %
                      (iter, print_interval, time.time() - start, loss))
                if config.adaptive_sparsemax:
                    print('tau + eps', [
                        round(e[0], 4)
                        for e in (tau +
                                  config.eps).detach().cpu().numpy().tolist()
                    ])
                start = time.time()
            if iter % 5000 == 0:
                self.save_model(running_avg_loss, iter)
Exemplo n.º 13
0
class Train(object):
    def __init__(self):
        self.vocab = Vocab(config.vocab_path, config.vocab_size)
        self.batcher = Batcher(config.train_data_path, self.vocab, mode='train',
                               batch_size=config.batch_size, single_pass=False)
        time.sleep(15)

        train_dir = os.path.join(config.log_root, 'train_%d' % (int(time.time())))
        if not os.path.exists(train_dir):
            os.mkdir(train_dir)

        self.model_dir = os.path.join(train_dir, 'model')
        if not os.path.exists(self.model_dir):
            os.mkdir(self.model_dir)

        self.summary_writer = tf.summary.FileWriter(train_dir)

    def save_model(self, running_avg_loss, iter):
        state = {
            'iter': iter,
            'encoder_state_dict': self.model.encoder.state_dict(),
            'decoder_state_dict': self.model.decoder.state_dict(),
            'reduce_state_dict': self.model.reduce_state.state_dict(),
            'optimizer': self.optimizer.state_dict(),
            'current_loss': running_avg_loss
        }
        model_save_path = os.path.join(self.model_dir, 'model_%d_%d' % (iter, int(time.time())))
        torch.save(state, model_save_path)

    def setup_train(self, model_file_path=None):
        self.model = Model(model_file_path)

        params = list(self.model.encoder.parameters()) + list(self.model.decoder.parameters()) + \
                 list(self.model.reduce_state.parameters())
        initial_lr = config.lr_coverage if config.is_coverage else config.lr
        self.optimizer = Adagrad(params, lr=initial_lr, initial_accumulator_value=config.adagrad_init_acc)

        start_iter, start_loss = 0, 0

        if model_file_path is not None:
            state = torch.load(model_file_path, map_location=lambda storage, location: storage)
            start_iter = state['iter']
            start_loss = state['current_loss']

            if not config.is_coverage:
                self.optimizer.load_state_dict(state['optimizer'])
                if use_cuda:
                    for state in self.optimizer.state.values():
                        for k, v in state.items():
                            if torch.is_tensor(v):
                                state[k] = v.cuda()

        return start_iter, start_loss

    def train_one_batch(self, batch):
        enc_batch, enc_padding_mask, enc_lens, enc_batch_extend_vocab, extra_zeros, c_t_1, coverage = \
            get_input_from_batch(batch, use_cuda)
        dec_batch, dec_padding_mask, max_dec_len, dec_lens_var, target_batch = \
            get_output_from_batch(batch, use_cuda)

        self.optimizer.zero_grad()

        encoder_outputs, encoder_feature, encoder_hidden = self.model.encoder(enc_batch, enc_lens)
        s_t_1 = self.model.reduce_state(encoder_hidden)
        s_t_1_origin = s_t_1

        batch_size = batch.batch_size
        step_losses = []

        sample_idx = []
        sample_log_probs = Variable(torch.zeros(batch_size))
        baseline_idx = []

        for di in range(min(max_dec_len, config.max_dec_steps)):

            y_t_1 = dec_batch[:, di]  # Teacher forcing, shape [batch_size]
            final_dist, s_t_1, c_t_1, attn_dist, p_gen, next_coverage = self.model.decoder(y_t_1, s_t_1,
                                                                                           encoder_outputs,
                                                                                           encoder_feature,
                                                                                           enc_padding_mask, c_t_1,
                                                                                           extra_zeros,
                                                                                           enc_batch_extend_vocab,
                                                                                           coverage, di)
            target = target_batch[:, di]
            gold_probs = torch.gather(final_dist, 1, target.unsqueeze(1)).squeeze()
            step_loss = -torch.log(gold_probs + config.eps)
            if config.is_coverage:
                step_coverage_loss = torch.sum(torch.min(attn_dist, coverage), 1)
                step_loss = step_loss + config.cov_loss_wt * step_coverage_loss
                coverage = next_coverage

            step_mask = dec_padding_mask[:, di]
            step_loss = step_loss * step_mask
            step_losses.append(step_loss)

            # sample
            if di == 0:  # use decoder input[0], which is <BOS>
                sample_t_1 = dec_batch[:, di]
                s_t_sample = s_t_1_origin
                c_t_sample = Variable(torch.zeros((batch_size, 2 * config.hidden_dim)))

            final_dist, s_t_sample, c_t_sample, attn_dist, p_gen, next_coverage = self.model.decoder(sample_t_1,
                                                                                                     s_t_sample,
                                                                                                     encoder_outputs,
                                                                                                     encoder_feature,
                                                                                                     enc_padding_mask,
                                                                                                     c_t_sample,
                                                                                                     extra_zeros,
                                                                                                     enc_batch_extend_vocab,
                                                                                                     coverage, di)
            # according to final_dist to sample
            # change sample_t_1
            dist = torch.distributions.Categorical(final_dist)
            sample_t_1 = Variable(dist.sample())
            # record sample idx
            sample_idx.append(sample_t_1)  # tensor list
            # compute sample probability
            sample_log_probs += torch.log(
                final_dist.gather(1, sample_t_1.view(-1, 1)))  # gather value along axis=1. given index

            # baseline
            if di == 0:  # use decoder input[0], which is <BOS>
                baseline_t_1 = dec_batch[:, di]
                s_t_sample = s_t_1_origin
                c_t_sample = Variable(torch.zeros((batch_size, 2 * config.hidden_dim)))

            final_dist, s_t_baseline, c_t_baseline, attn_dist, p_gen, next_coverage = self.model.decoder(baseline_t_1,
                                                                                                         s_t_baseline,
                                                                                                         encoder_outputs,
                                                                                                         encoder_feature,
                                                                                                         enc_padding_mask,
                                                                                                         c_t_baseline,
                                                                                                         extra_zeros,
                                                                                                         enc_batch_extend_vocab,
                                                                                                         coverage, di)
            # according to final_dist to get baseline
            # change baseline_t_1
            baseline_t_1 = torch.autograd.Variable(final_dist.max(1))  # get max value along axis=1
            # record baseline probability
            baseline_idx.append(baseline_t_1)

        sum_losses = torch.sum(torch.stack(step_losses, 1), 1)
        batch_avg_loss = sum_losses / dec_lens_var
        loss = torch.mean(batch_avg_loss)

        # according to sample_idx and baseline_idx to compute RL loss
        # map sample/baseline_idx to string
        # compute rouge score
        # compute loss
        sample_idx = torch.stack(sample_idx, dim=1).squeeze()  # expect shape (batch_size, seq_len)
        baseline_idx = torch.stack(baseline_idx, dim=1).squeeze()
        rl_loss = torch.zeros(batch_size)
        for i in range(sample_idx.shape[0]):  # each example in a batch
            sample_y = data.outputids2words(sample_idx[i], self.vocab,
                                            (batch.art_oovs[i] if config.pointer_gen else None))
            baseline_y = data.outputids2words(baseline_idx[i], self.vocab,
                                              (batch.art_oovs[i] if config.pointer_gen else None))
            true_y = batch.original_abstracts[i]

            sample_score = rouge_l_f(sample_y, true_y)
            baseline_score = rouge_l_f(baseline_y, true_y)

            sample_score = Variable(sample_score)
            baseline_score = Variable(baseline_score)

            rl_loss[i] = baseline_score - sample_score
        rl_loss = rl_loss * sample_log_probs

        gamma = 0.9984
        loss = (1 - gamma) * loss + gamma * rl_loss

        loss.backward()

        self.norm = clip_grad_norm_(self.model.encoder.parameters(), config.max_grad_norm)
        clip_grad_norm_(self.model.decoder.parameters(), config.max_grad_norm)
        clip_grad_norm_(self.model.reduce_state.parameters(), config.max_grad_norm)

        self.optimizer.step()

        return loss.item()

    def trainIters(self, n_iters, model_file_path=None):
        iter, running_avg_loss = self.setup_train(model_file_path)
        start = time.time()
        while iter < n_iters:
            batch = self.batcher.next_batch()
            loss = self.train_one_batch(batch)

            running_avg_loss = calc_running_avg_loss(loss, running_avg_loss, self.summary_writer, iter)
            iter += 1

            if iter % 100 == 0:
                self.summary_writer.flush()
            print_interval = 1000
            if iter % print_interval == 0:
                print('steps %d, seconds for %d batch: %.2f , loss: %f' % (iter, print_interval,
                                                                           time.time() - start, loss))
                start = time.time()
            if iter % 5000 == 0:
                self.save_model(running_avg_loss, iter)
Exemplo n.º 14
0
class Train:
    def __init__(self):
        self.vocab = Vocab(config.vocab_path, config.vocab_size)
        self.batcher = Batcher(config.train_data_path,
                               self.vocab,
                               mode='train',
                               batch_size=config.batch_size,
                               single_pass=False)
        time.sleep(15)

        train_dir = os.path.join(config.log_root,
                                 'train_%d' % (int(time.time())))
        if not os.path.exists(train_dir):
            os.mkdir(train_dir)

        self.model_dir = os.path.join(train_dir, 'model')
        if not os.path.exists(self.model_dir):
            os.mkdir(self.model_dir)

        self.summary_writer = tf.summary.FileWriter(train_dir)

    def save_model(self, moving_avg_loss, iter):
        state = {
            'iter': iter,
            'encoder_state_dict': self.model.encoder.state_dict(),
            'decoder_state_dict': self.model.decoder.state_dict(),
            'reduce_state_dict': self.model.reduce_state.state_dict(),
            'optimizer': self.optimizer.state_dict(),
            'current_loss': moving_avg_loss
        }
        model_save_path = os.path.join(
            self.model_dir, 'model_%d_%d' % (iter, int(time.time())))
        torch.save(state, model_save_path)

    def setup_train(self, model_file_path=None):
        self.model = Model(model_file_path)

        params = list(self.model.encoder.parameters()) + list(self.model.decoder.parameters()) + \
                 list(self.model.reduce_state.parameters())
        initial_lr = config.lr_coverage if config.do_coverage else config.lr
        self.optimizer = Adagrad(
            params,
            lr=initial_lr,
            initial_accumulator_value=config.adagrad_init_acc)

        start_iter, start_loss = 0, 0
        if model_file_path is not None:
            state = torch.load(model_file_path,
                               map_location=lambda storage, location: storage)
            start_iter = state['iter']
            start_loss = state['current_loss']

            # 在训练到某个epoch,需要切换到coverage结构,因此需要使用新的optimizer状态。此处控制切换时机。
            if not config.do_coverage:
                self.optimizer.load_state_dict(state['optimizer'])
                if use_cuda:
                    for state in self.optimizer.state.values():
                        for k, v in state.items():
                            if torch.is_tensor(v):
                                state[k] = v.cuda()

        return start_iter, start_loss

    def train_one_batch(self, batch):
        enc_batch, enc_padding_mask, enc_lens, enc_batch_extend_vocab, extra_zeros, context_v, coverage = \
            get_encoder_variables(batch, use_cuda)
        # dec_lens_var:一个batch的decoder目标序列长度
        dec_batch, dec_padding_mask, max_dec_len, dec_lens_var, target_batch = \
            get_decoder_variables(batch, use_cuda)

        self.optimizer.zero_grad()

        if 0 in enc_lens:
            print('=================')
            print(enc_batch.shape)
            print(enc_lens)
            print(enc_batch)
            print('=================')
        encoder_outputs, encoder_feature, encoder_hidden = self.model.encoder(
            enc_batch, enc_lens)
        d_hc = self.model.reduce_state(encoder_hidden)  # decoder初始h,c

        step_losses = []
        # for step in tqdm.tqdm(range(min(max_dec_len, config.max_dec_steps))):
        for step in range(min(max_dec_len, config.max_dec_steps)):
            d_inp = dec_batch[:, step]  # Teacher forcing
            final_dist, d_hc, context_v, attn_dist, p_gen, next_coverage = self.model.decoder(
                d_inp, d_hc, encoder_outputs, encoder_feature,
                enc_padding_mask, context_v, extra_zeros,
                enc_batch_extend_vocab, coverage, step)
            target = target_batch[:, step]
            # gather每一步target id的预测概率
            gold_probs = torch.gather(final_dist, 1,
                                      target.unsqueeze(1)).squeeze()
            step_loss = -torch.log(gold_probs + config.eps)
            if config.do_coverage:
                step_coverage_loss = torch.sum(torch.min(attn_dist, coverage),
                                               1)  # encoder的累计分布作为损失,见原论文
                step_loss = step_loss + config.cov_loss_wt * step_coverage_loss
                coverage = next_coverage

            step_mask = dec_padding_mask[:, step]
            step_loss = step_loss * step_mask
            step_losses.append(step_loss)

        sum_losses = torch.sum(torch.stack(step_losses, 1), 1)
        batch_avg_loss = sum_losses / dec_lens_var
        loss = torch.mean(batch_avg_loss)

        loss.backward()

        self.norm = clip_grad_norm_(self.model.encoder.parameters(),
                                    config.max_grad_norm)
        clip_grad_norm_(self.model.decoder.parameters(), config.max_grad_norm)
        clip_grad_norm_(self.model.reduce_state.parameters(),
                        config.max_grad_norm)

        self.optimizer.step()

        return loss.item()

    def trainIters(self, n_iters, model_file_path=None):
        iter, moving_avg_loss = self.setup_train(model_file_path)
        start = time.time()
        pbar = tqdm.tqdm(total=n_iters)
        while iter < n_iters:
            batch = self.batcher.next_batch()
            loss = self.train_one_batch(batch)

            moving_avg_loss = calc_moving_avg_loss(loss, moving_avg_loss,
                                                   self.summary_writer, iter)
            iter += 1
            pbar.update(1)

            if iter % 100 == 0:
                self.summary_writer.flush()
            print_interval = 100
            if iter % print_interval == 0:
                print('steps %d, seconds for %d batch: %.2f , loss: %f' %
                      (iter, print_interval, time.time() - start, loss))
                start = time.time()
            if iter % 5000 == 0:
                self.save_model(moving_avg_loss, iter)
        pbar.close()
Exemplo n.º 15
0
class Train(object):
    def __init__(self):
        self.vocab = Vocab(config.vocab_path, config.vocab_size)
        self.batcher = Batcher(config.train_data_path, self.vocab, mode='train',
                               batch_size=config.batch_size, single_pass=False)
        time.sleep(15)
        stamp = time.strftime("%Y%m%d_%H%M%S", time.localtime())
        train_dir = os.path.join(config.log_root, 'train_{}'.format(stamp))
        if not os.path.exists(train_dir):
            os.makedirs(train_dir)

        self.model_dir = os.path.join(train_dir, 'model')
        if not os.path.exists(self.model_dir):
            os.mkdir(self.model_dir)

        self.summary_writer = tf.compat.v1.summary.FileWriter(train_dir)

    def save_model(self, running_avg_loss, iter_step):
        """保存模型"""
        state = {
            'iter': iter_step,
            'encoder_state_dict': self.model.encoder.state_dict(),
            'decoder_state_dict': self.model.decoder.state_dict(),
            'reduce_state_dict': self.model.reduce_state.state_dict(),
            'optimizer': self.optimizer.state_dict(),
            'current_loss': running_avg_loss
        }
        stamp = time.strftime("%Y%m%d_%H%M%S", time.localtime())
        model_save_path = os.path.join(self.model_dir, 'model_{}_{}'.format(iter_step, stamp))
        torch.save(state, model_save_path)

    def setup_train(self, model_file_path=None):
        """模型初始化或加载、初始化迭代次数、损失、优化器"""
        # 初始化模型
        self.model = Model(model_file_path)

        # 模型参数的列表
        params = list(self.model.encoder.parameters()) + list(self.model.decoder.parameters()) + \
                 list(self.model.reduce_state.parameters())
        initial_lr = config.lr_coverage if config.is_coverage else config.lr  # lr_coverage和lr二选一
        # 定义优化器
        self.optimizer = Adagrad(params, lr=initial_lr, initial_accumulator_value=config.adagrad_init_acc)
        # 初始化迭代次数和损失
        start_iter, start_loss = 0, 0
        # 如果传入的已存在的模型路径,加载模型继续训练
        if model_file_path is not None:
            state = torch.load(model_file_path, map_location=lambda storage, location: storage)
            start_iter = state['iter']
            start_loss = state['current_loss']

            if not config.is_coverage:
                self.optimizer.load_state_dict(state['optimizer'])
                if USE_CUDA:
                    for state in self.optimizer.state.values():
                        for k, v in state.items():
                            if torch.is_tensor(v):
                                state[k] = v.to(DEVICE)

        return start_iter, start_loss

    def train_one_batch(self, batch):
        """
        训练一个batch,返回该batch的loss。
        enc_batch:             torch.Size([16, 400]), 16篇文章的编码,不足400词的用pad的编码补足, oov词汇用0编码;
        enc_padding_mask:      torch.Size([16, 400]), 对应pad的位置为0,其余为1;
        enc_lens:              numpy.ndarray, 列表内每个元素表示每篇article的单词数;
        enc_batch_extend_vocab:torch.Size([16, 400]), 16篇文章的编码;oov词汇用超过词汇表的编码;
        extra_zeros:           torch.Size([16, 文章oov词汇数量]) zero tensor;
        c_t_1:                 torch.Size([16, 512]) zero tensor;
        coverage:              Variable(torch.zeros(batch_size, max_enc_seq_len)) if is_coverage==True else None;coverage模式时后续有值
        ----------------------------------------
        dec_batch:             torch.Size([16, 100]) 摘要编码含有开始符号编码以及PAD;
        dec_padding_mask:      torch.Size([16, 100]) 对应pad的位置为0,其余为1;
        max_dec_len:           标量,摘要词语数量,不包含pad
        dec_lens_var:          torch.Size([16] 摘要词汇数量
        target_batch:          torch.Size([16, 100]) 目标摘要编码含有STOP符号编码以及PAD
        """
        enc_batch, enc_padding_mask, enc_lens, enc_batch_extend_vocab, extra_zeros, c_t_1, coverage = \
            get_input_from_batch(batch)
        dec_batch, dec_padding_mask, max_dec_len, dec_lens_var, target_batch = \
            get_output_from_batch(batch)
        # 暂时未理解extra_zeros含义

        self.optimizer.zero_grad()
        """
        # 记得修改Batch类添加vocab属性

        print("模型输入文章编码:", "*"*100)
        print("enc_batch:", enc_batch, enc_batch.size())
        print("enc_batch[-1]:", enc_batch[-1])
        # print("batch._id_to_word:", batch.vocab._id_to_word)
        print("enc_batch[-1]原文:", [batch.vocab.id2word(idx) for idx in enc_batch[-1].cpu().numpy()])
        print("-"*50)
        print("enc_padding_mask:", enc_padding_mask, enc_padding_mask.size())
        print("-"*50)
        print("enc_lens:", enc_lens, enc_lens.shape)
        print("-"*50)
        print("enc_batch_extend_vocab", enc_batch_extend_vocab, enc_batch_extend_vocab.size())
        print("enc_batch_extend_vocab[-1]:", enc_batch_extend_vocab[-1])
        print("enc_batch_extend_vocab[-1]的原文:", [batch.vocab.id2word(idx) if idx<50000 else '[UNK]+{}'.format(idx-50000) for idx in enc_batch_extend_vocab[-1].cpu().numpy()])
        print("-"*50)
        print("extra_zeros:", extra_zeros, extra_zeros.size())
        print("-"*50)
        print("c_t_1:", c_t_1, c_t_1.size())
        print("-"*50)
        print("coverage:", coverage)
        print("*"*100)

        print("模型输入摘要编码,包括源和目标:", "*"*100)
        print("dec_batch:", dec_batch, dec_batch.size())
        print("dec_batch[0]:", dec_batch[0])
        # print("batch._id_to_word:", batch.vocab._id_to_word)
        print("dec_batch[0]原文:", [batch.vocab.id2word(idx) for idx in dec_batch[0].cpu().numpy()])
        print("-"*50)
        print("dec_padding_mask:", dec_padding_mask, dec_padding_mask.size())
        print("-"*50)
        print("max_dec_len:", max_dec_len)
        print("-"*50)
        print("dec_lens_var", dec_lens_var, dec_lens_var.size())
        print("-"*50)
        print("target_batch:", target_batch, target_batch.size())
        print("-"*50)
        print("target_batch[0]:", target_batch[0], target_batch[0].size())
        print("target_batch[0]的原文:", [batch.vocab.id2word(idx) if idx<50000 else '[UNK]+{}'.format(idx-50000) for idx in target_batch[0].cpu().numpy()])
        print("*"*100)
        input("任意键继续>>>")
        """
        # [B, max(seq_lens), 2*hid_dim], [B*max(seq_lens), 2*hid_dim], tuple([2, B, hid_dim], [2, B, hid_dim])
        encoder_outputs, encoder_feature, encoder_hidden = self.model.encoder(enc_batch, enc_lens)
        s_t_1 = self.model.reduce_state(encoder_hidden)  # (h,c) = ([3, B, hid_dim], [3, B, hid_dim])
        step_losses = []
        for di in range(min(max_dec_len, config.max_dec_steps)):
            y_t_1 = dec_batch[:, di]  # 摘要的一个单词,batch里的每个句子的同一位置的单词编码
            # print("y_t_1:", y_t_1, y_t_1.size())
            final_dist, s_t_1, c_t_1, attn_dist, p_gen, next_coverage = self.model.decoder(y_t_1, s_t_1,
                                                                                           encoder_outputs,
                                                                                           encoder_feature,
                                                                                           enc_padding_mask, c_t_1,
                                                                                           extra_zeros,
                                                                                           enc_batch_extend_vocab,
                                                                                           coverage, di)
            target = target_batch[:, di]  # 摘要的下一个单词的编码
            # print("target-iter:", target, target.size())
            # print("final_dist:", final_dist, final_dist.size())
            # input("go on>>")
            # final_dist 是词汇表每个单词的概率,词汇表是扩展之后的词汇表,也就是大于预设的50_000
            gold_probs = torch.gather(final_dist, 1, target.unsqueeze(1)).squeeze()  # 取出目标单词的概率gold_probs
            step_loss = -torch.log(gold_probs + config.eps)  # 最大化gold_probs,也就是最小化step_loss(添加负号)
            if config.is_coverage:
                step_coverage_loss = torch.sum(torch.min(attn_dist, coverage), 1)
                step_loss = step_loss + config.cov_loss_wt * step_coverage_loss
                coverage = next_coverage

            step_mask = dec_padding_mask[:, di]
            step_loss = step_loss * step_mask
            step_losses.append(step_loss)

        sum_losses = torch.sum(torch.stack(step_losses, 1), 1)
        batch_avg_loss = sum_losses / dec_lens_var
        loss = torch.mean(batch_avg_loss)

        loss.backward()

        self.norm = clip_grad_norm_(self.model.encoder.parameters(), config.max_grad_norm)
        clip_grad_norm_(self.model.decoder.parameters(), config.max_grad_norm)
        clip_grad_norm_(self.model.reduce_state.parameters(), config.max_grad_norm)

        self.optimizer.step()

        return loss.item()

    def trainIters(self, n_iters, model_file_path=None):
        # 训练设置,包括
        iter_step, running_avg_loss = self.setup_train(model_file_path)
        start = time.time()
        while iter_step < n_iters:
            # 获取下一个batch数据
            batch = self.batcher.next_batch()
            loss = self.train_one_batch(batch)

            running_avg_loss = calc_running_avg_loss(loss, running_avg_loss, self.summary_writer, iter_step)
            iter_step += 1

            if iter_step % 100 == 0:
                self.summary_writer.flush()

            # print_interval = 1000
            if iter_step % 100 == 0:
                # lr = self.optimizer.state_dict()['param_groups'][0]['lr']
                logging.info('steps %d, seconds for %d steps: %.2f, loss: %f' % (iter_step, 10,
                                                                          time.time() - start, loss))
                start = time.time()
            # 50000次迭代就保存一下模型
            if iter_step % 50000 == 0:
                logging.info("model saved = {}/{}".format(int(iter_step / 50000) + 1, int(config.max_iterations/50000) + 1))
                self.save_model(running_avg_loss, iter_step)
Exemplo n.º 16
0
class WeightedHolE(nn.Module):
    def __init__(self, *args, **kwargs):
        super(WeightedHolE, self).__init__()
        # self.add_hyperparam('rparam', kwargs.pop('rparam', 0.0))

        self.learning_rate = kwargs.get('lr', _DEF_LEARNING_RATE)
        entity_dim, _, relation_dim = args[0]
        embed_dim = args[1]
        self._max_epochs = kwargs.get('max_epochs', _DEF_MAX_EPOCHS)
        
        init_relations = kwargs.get('init_relations')
        if init_relations is not None:
            self.R = nn.Parameter(init_relations)
        else:
            self.R = nn.Parameter(torch.FloatTensor(relation_dim, embed_dim).uniform_(-.1,.1))
        self.R.my_name = 'R'
        self.R.grad = torch.zeros_like(self.R)
        
        pretrained_ent = kwargs.get('pretrained_entities')
        if pretrained_ent is not None:
            self.E = nn.Parameter(pretrained_ent)
        else:
            self.E = nn.Parameter(torch.FloatTensor(entitiy_dim, embed_dim).uniform_(-.1,.1))
        self.E.my_name = 'E'
        self.E.grad = torch.zeros_like(self.E)
        
        self.loss_function = nn.SoftMarginLoss(reduction='sum')
        self.optim = Adagrad(list(self.parameters()), lr=self.learning_rate)
        
    def forward(self, xs, ys, minibatch_size):
        for loss, grads in self._optim(list(zip(xs, ys)), minibatch_size):
            yield loss, grads
        
    def _optim(self, xys, minibatch_size):
        for self._epoch in range(1, self._max_epochs+1):
            self.loss = 0
            self.optim.zero_grad()
            self.train()
            
            # shuffle training examples
            indices = list(range(len(xys)))
            shuffle(indices)
            
            # store epoch for callback
            self.epoch_start = timeit.default_timer()
            
            # process mini-batches
            lower_iter, upper_iter = count(0, minibatch_size), count(minibatch_size, minibatch_size) 
            for lower, upper in zip(lower_iter, upper_iter):
                # select indices for current batch
                if lower >= len(indices):
                    break

                batch_examples = [xys[idx] for idx in indices[lower:upper]]
                triples,ys = zip(*batch_examples)
                ss,ps,os = zip(*triples)
                ss,ps,os,ys=torch.LongTensor(ss), torch.LongTensor(ps), torch.LongTensor(os), torch.FloatTensor(ys)
                        
                yscores = self._scores(ss, ps, os) # see Holographic Embeddings, eq. 2
                self.loss = self.loss_function(yscores, ys)
                print('loss', self.loss)

                fs = -(ys * torch.sigmoid(-yscores)).unsqueeze(1)
                entity_grad, entity_idxs = self._fn_Entity_Grad(yscores, ss, os, ps, fs)
                relation_grad, relation_idxs = self._fn_Relation_Grad(yscores, ss, os, ps, fs)
                #print('grad rel', relation_grads.shape, torch.sum(relation_grads))
                
                for param in self.parameters():
                    if param.my_name == 'R':
                        self.R.grad = relation_grad
                    
                    if param.my_name == 'E':
                        for col,row_grads in zip(entity_idxs, entity_grad): # FIXME use index_put_
                            self.E.grad[col] = row_grads

                self.optim.step()
                
                #batch_loss, batch_grads = self._process_batch(bxys)
                #yield batch_loss, batch_grads


    def _fn_Entity_Grad(self, yscores, ss, os, ps, fs):
        sparse_indices, Sm, n = grad_sum_matrix(torch.cat((ss, os)))
        combined = torch.cat((fs * ccorr(self.R[ps], self.E[os]),
                              fs * cconv(self.E[ss], self.R[ps])),
                             dim=0)
        grads = torch.mm(Sm, combined) / n.unsqueeze(1)
        return grads, sparse_indices

    def _fn_Relation_Grad(self, yscores, ss, os, ps, fs):
        sparse_indices, Sm, n = grad_sum_matrix(ps)
        grads = torch.mm(Sm, fs * ccorr(self.E[ss], self.E[os])) / n
        return grads, sparse_indices
        
    def _scores(self, ss, ps, os):
        return torch.sum(self.R[ps] * ccorr(self.E[ss], self.E[os]), dim=1)

    def _update(self, g, idx=None):
        self.p2[idx] += g * g
        H = np.maximum(np.sqrt(self.p2[idx]), 1e-7)
        self.param[idx] -= self.learning_rate * g / H
Exemplo n.º 17
0
class TrainSeq2Seq(object):
    def __init__(self, is_word_level=False, is_combined=False, alpha=0.3):
        self.vocab = Vocab(config.vocab_path, config.vocab_size)
        # self.batcher = Batcher(config.train_data_path, self.vocab, mode='train',
        #                        batch_size=config.batch_size, single_pass=False)
        self.dataset = DailyMailDataset("train", self.vocab)
        #time.sleep(15)

        self.is_word_level = is_word_level
        self.is_combined = is_combined
        self.alpha = alpha

        if is_word_level:
            print("Using Word Level Policy Gradient")
        elif is_combined:
            print("Using Combined Policy Gradient w/ alpha = ", alpha)
        else:
            print("Using Sentence Level Policy Gradient")

        train_dir = './train_dumps'
        # train_dir = './train_dumps'
        if not os.path.exists(train_dir):
            #print('create dict')
            os.mkdir(train_dir)

        self.model_dir = os.path.join(
            train_dir, 'dumps_model_{:%m_%d_%H_%M}'.format(datetime.now()))
        if not os.path.exists(self.model_dir):
            #print('create folder')
            os.mkdir(self.model_dir)

    def save_model(self, running_avg_loss, iter):
        state = {
            'iter': iter,
            'encoder_state_dict': self.model.encoder.state_dict(),
            'decoder_state_dict': self.model.decoder.state_dict(),
            'reduce_state_dict': self.model.reduce_state.state_dict(),
            'optimizer': self.optimizer.state_dict(),
            'current_loss': running_avg_loss
        }
        model_save_path = os.path.join(
            self.model_dir, 'model_%d_%d' % (iter, int(time.time())))
        torch.save(state, model_save_path)
        return model_save_path

    def setup(self, seqseq_model, model_file_path):
        self.model = seqseq_model

        params = list(self.model.encoder.parameters()) + list(self.model.decoder.parameters()) + \
                 list(self.model.reduce_state.parameters())
        initial_lr = config.lr_coverage if config.is_coverage else config.lr
        self.optimizer = Adagrad(
            params,
            lr=initial_lr,
            initial_accumulator_value=config.adagrad_init_acc)
        #self.optimizer = Adam(params, lr=initial_lr)

        start_iter, start_loss = 0, 0

        if model_file_path is not None:
            print("Loading checkpoint .... ")
            state = torch.load(model_file_path,
                               map_location=lambda storage, location: storage)
            start_iter = state['iter']
            start_loss = state['current_loss']

            if not config.is_coverage:
                self.optimizer.load_state_dict(state['optimizer'])
                if config.use_gpu:
                    for state in self.optimizer.state.values():
                        for k, v in state.items():
                            if torch.is_tensor(v):
                                state[k] = v.cuda()

        return start_iter, start_loss

    def train_one_batch_nll(self, batch):
        enc_batch, enc_padding_mask, enc_lens, enc_batch_extend_vocab, extra_zeros, c_t_1, coverage = \
            get_input_from_batch(batch, config.use_gpu)
        dec_batch, dec_padding_mask, max_dec_len, dec_lens_var, target_batch = \
            get_output_from_batch(batch, config.use_gpu)

        self.optimizer.zero_grad()

        encoder_outputs, encoder_feature, encoder_hidden = self.model.encoder(
            enc_batch, enc_lens)
        s_t_1 = self.model.reduce_state(encoder_hidden)

        step_losses = []
        for di in range(min(max_dec_len, config.max_dec_steps)):
            y_t_1 = dec_batch[:, di]  # Teacher forcing
            final_dist, s_t_1, c_t_1, attn_dist, p_gen, next_coverage = self.model.decoder(
                y_t_1, s_t_1, encoder_outputs, encoder_feature,
                enc_padding_mask, c_t_1, extra_zeros, enc_batch_extend_vocab,
                coverage, di)
            target = target_batch[:, di]
            gold_probs = torch.gather(final_dist, 1,
                                      target.unsqueeze(1)).squeeze()
            step_loss = -torch.log(gold_probs + config.eps)
            if config.is_coverage:
                step_coverage_loss = torch.sum(torch.min(attn_dist, coverage),
                                               1)
                step_loss = step_loss + config.cov_loss_wt * step_coverage_loss
                coverage = next_coverage

            step_mask = dec_padding_mask[:, di]
            step_loss = step_loss * step_mask
            step_losses.append(step_loss)

        sum_losses = torch.sum(torch.stack(step_losses, 1), 1)
        batch_avg_loss = sum_losses / dec_lens_var
        loss = torch.mean(batch_avg_loss)

        loss.backward()

        self.norm = clip_grad_norm_(self.model.encoder.parameters(),
                                    config.max_grad_norm)
        clip_grad_norm_(self.model.decoder.parameters(), config.max_grad_norm)
        clip_grad_norm_(self.model.reduce_state.parameters(),
                        config.max_grad_norm)

        self.optimizer.step()

        return loss.item()

    def train_nll(self, n_iters, iter, running_avg_loss):
        start = time.time()
        while iter < n_iters:
            batch = self.batcher.next_batch()
            loss = self.train_one_batch_nll(batch)

            running_avg_loss = calc_running_avg_loss(loss, running_avg_loss,
                                                     iter)
            print("Iteration:", iter, "  loss:", loss, "  Running avg loss:",
                  running_avg_loss)
            iter += 1

            print_interval = 1000
            if iter % print_interval == 0:
                print('steps %d, seconds for %d batch: %.2f , loss: %f' %
                      (iter, print_interval, time.time() - start, loss))
                start = time.time()
            if iter % 1000 == 0:
                self.save_model(running_avg_loss, iter)

    def train_pg(self,
                 n_iters,
                 start_iter,
                 start_running_avg_loss,
                 start_pg_losses,
                 start_run_avg_losses,
                 num_epochs=50):
        """
        The generator is trained using policy gradients, using the reward from the discriminator.
        Training is done for num_batches batches.
        """

        dataloader = DataLoader(self.dataset,
                                batch_size=config.batch_size,
                                shuffle=True,
                                num_workers=1,
                                collate_fn=create_batch_collate(
                                    self.vocab, config.batch_size))
        # pg_batcher = Batcher(config.train_data_path, self.vocab, mode='train',
        #     batch_size=config.batch_size, single_pass=False)
        #
        # time.sleep(15)

        start = time.time()
        running_avg_loss = start_running_avg_loss
        pg_losses = start_pg_losses
        run_avg_losses = start_run_avg_losses
        iteration = start_iter

        for epoch in range(num_epochs):
            print("Epoch :", epoch + 1)
            for batch in dataloader:
                iteration += 1

                loss = self.train_one_batch_pg(batch)

                running_avg_loss = calc_running_avg_loss(
                    loss, running_avg_loss, iteration)
                print("Iteration:", iteration, "  PG loss:", loss,
                      "  Running avg loss:", running_avg_loss)
                pg_losses.append(loss)
                run_avg_losses.append(running_avg_loss)

                print_interval = 10
                if iteration % print_interval == 0:
                    print(
                        'steps %d, seconds for %d batch: %.2f , loss: %f' %
                        (iteration, print_interval, time.time() - start, loss))

                    start = time.time()

                if iteration % 10 == 0:
                    # Dump model and losses
                    model_file_path = self.save_model(running_avg_loss,
                                                      iteration)
                    pickle.dump(
                        pg_losses,
                        open(
                            os.path.join(
                                self.model_dir,
                                'train_pg_losses_{}.p'.format(iteration)),
                            'wb'))
                    pickle.dump(
                        run_avg_losses,
                        open(
                            os.path.join(
                                self.model_dir,
                                'train_run_avg_losses_{}.p'.format(iteration)),
                            'wb'))
                    # Run eval
                    eval_processor = Evaluate_pg(
                        model_file_path,
                        is_word_level=self.is_word_level,
                        is_combined=self.is_combined,
                        alpha=self.alpha)
                    eval_losses = eval_processor.run_eval(
                        self.model_dir, iteration)

                    # Check if we should stop
                    avg_eval_loss = np.mean(eval_losses)
                    if running_avg_loss < avg_eval_loss:
                        print("Stopping at iteration {}".format(iteration))
                        break

    def compute_policy_grads_using_rewards(self, sentence_rewards,
                                           word_rewards, sentence_losses,
                                           word_losses, word_to_sent_ind):
        if self.is_combined:
            pg_losses = [[(self.alpha * word_reward + (1 - self.alpha) *
                           sentence_rewards[i][word_to_sent_ind[i][j]]) *
                          word_losses[i][j]
                          for j, word_reward in enumerate(abstract_rewards)
                          if j < len(word_to_sent_ind[i])]
                         for i, abstract_rewards in enumerate(word_rewards)]
            pg_losses = [sum(pg) for pg in pg_losses]
        elif self.is_word_level:
            pg_losses = [[
                word_reward * word_losses[i][j]
                for j, word_reward in enumerate(abstract_rewards)
                if j < len(word_to_sent_ind[i])
            ] for i, abstract_rewards in enumerate(word_rewards)]
            pg_losses = [sum(pg) for pg in pg_losses]
        else:
            pg_losses = [[
                rs * sentence_losses[ri][rsi] for rsi, rs in enumerate(r)
            ] for ri, r in enumerate(sentence_rewards)]
            pg_losses = [sum(pg) for pg in pg_losses]
        return pg_losses

    def compute_pg_loss(self, orig, pred, sentence_losses, split_predictions,
                        word_losses, word_to_sent_ind):
        sentence_rewards = None
        word_rewards = None
        # First compute the rewards
        if not self.is_word_level or self.is_combined:
            sentence_rewards = get_sentence_rewards(orig, pred)

        if self.is_word_level or self.is_combined:
            word_rewards = get_word_level_rewards(orig, split_predictions)

        pg_losses = self.compute_policy_grads_using_rewards(
            sentence_rewards=sentence_rewards,
            word_rewards=word_rewards,
            sentence_losses=sentence_losses,
            word_losses=word_losses,
            word_to_sent_ind=word_to_sent_ind)

        return pg_losses

    def compute_batched_sentence_loss(self, word_losses, orig, pred):
        orig_sum = []
        new_pred = []
        pred_sum = []
        sentence_losses = []

        # Convert the original sum as one single string per article
        for i in range(len(orig)):
            orig_sum.append(' '.join(map(str, orig[i])))
            new_pred.append([])
            pred_sum.append([])
            sentence_losses.append([])

        batch_sent_indices = []
        for i in range(len(pred)):
            sentence = []
            sentence = pred[i]
            losses = word_losses[i]
            sentence_indices = []
            count = 0
            while len(sentence) > 0:
                try:
                    idx = sentence.index(".")
                except ValueError:
                    idx = len(sentence)

                sentence_indices.extend([count for _ in range(idx)])

                if count > 0:
                    new_pred[i].append(new_pred[i][count - 1] +
                                       sentence[:idx + 1])
                else:
                    new_pred[i].append(sentence[:idx + 1])

                sentence_losses[i].append(sum(losses[:idx + 1]))

                sentence = sentence[idx + 1:]
                losses = losses[idx + 1:]
                count += 1
            batch_sent_indices.append(sentence_indices)

        for i in range(len(pred)):
            for j in range(len(new_pred[i])):
                pred_sum[i].append(' '.join(map(str, new_pred[i][j])))

        pg_losses = self.compute_pg_loss(orig_sum,
                                         pred_sum,
                                         sentence_losses,
                                         split_predictions=pred,
                                         word_losses=word_losses,
                                         word_to_sent_ind=batch_sent_indices)

        return pg_losses

    def train_one_batch_pg(self, batch):
        batch_size = batch.batch_size

        enc_batch, enc_padding_mask, enc_lens, enc_batch_extend_vocab, extra_zeros, c_t_1, coverage = \
            get_input_from_batch(batch, config.use_gpu)
        dec_batch, dec_padding_mask, max_dec_len, dec_lens_var, target_batch = \
            get_output_from_batch(batch, config.use_gpu)

        self.optimizer.zero_grad()

        encoder_outputs, encoder_feature, encoder_hidden = self.model.encoder(
            enc_batch, enc_lens)
        s_t_1 = self.model.reduce_state(encoder_hidden)

        step_losses = []
        output_ids = []
        # Begin with START symbol
        y_t_1 = torch.ones(batch_size, dtype=torch.long) * self.vocab.word2id(
            data.START_DECODING)
        if config.use_gpu:
            y_t_1 = y_t_1.cuda()

        for _ in range(batch_size):
            output_ids.append([])
            step_losses.append([])

        for di in range(min(max_dec_len, config.max_dec_steps)):
            #y_t_1 = dec_batch[:, di]  # Teacher forcing
            final_dist, s_t_1, c_t_1, attn_dist, p_gen, next_coverage = self.model.decoder(
                y_t_1, s_t_1, encoder_outputs, encoder_feature,
                enc_padding_mask, c_t_1, extra_zeros, enc_batch_extend_vocab,
                coverage, di)
            target = target_batch[:, di]
            gold_probs = torch.gather(final_dist, 1,
                                      target.unsqueeze(1)).squeeze()
            step_loss = -torch.log(gold_probs + config.eps)  # NLL

            step_mask = dec_padding_mask[:, di]
            step_loss = step_loss * step_mask

            # Move on to next token
            _, idx = torch.max(final_dist, 1)
            idx = idx.reshape(batch_size, -1).squeeze()
            y_t_1 = idx

            for i, pred in enumerate(y_t_1):
                if not pred.item() == data.PAD_TOKEN:
                    output_ids[i].append(pred.item())

            for i, loss in enumerate(step_loss):
                step_losses[i].append(step_loss[i])

        # Obtain the original and predicted summaries
        original_abstracts = batch.original_abstracts_sents
        predicted_abstracts = [
            data.outputids2words(ids, self.vocab, None) for ids in output_ids
        ]

        # Compute the batched loss
        batched_losses = self.compute_batched_sentence_loss(
            step_losses, original_abstracts, predicted_abstracts)
        #batched_losses = Variable(batched_losses, requires_grad=True)
        losses = torch.stack(batched_losses)
        losses = losses / dec_lens_var

        loss = torch.mean(losses)
        loss.backward()

        self.norm = clip_grad_norm_(self.model.encoder.parameters(),
                                    config.max_grad_norm)
        clip_grad_norm_(self.model.decoder.parameters(), config.max_grad_norm)
        clip_grad_norm_(self.model.reduce_state.parameters(),
                        config.max_grad_norm)

        self.optimizer.step()

        return loss.item()
Exemplo n.º 18
0
class Trainer:
    def __init__(self, config):

        self.config = config
        self.device = config['device']
        self.step = 0
        if os.path.exists('../vocab.pt'):
            self.vocab = torch.load('../vocab.pt')
        else:
            self.vocab = Vocab(config['vocab_file'], config['vocab_size'])
            torch.save(self.vocab, '../vocab.pt')
        self.train_data = CNNDMDataset('train', config['data_path'], config,
                                       self.vocab)
        self.validate_data = CNNDMDataset('val', config['data_path'], config,
                                          self.vocab)

        self.setup(config)

    def setup(self, config):

        self.model = Model(config).to(config['device'])
        self.optimizer = Adagrad(self.model.parameters(),
                                 lr=config['learning_rate'],
                                 initial_accumulator_value=0.1)
        # self.optimizer = Adam(self.model.parameters(),lr = config['learning_rate'],betas = config['betas'])
        checkpoint = None

        if config[
                'train_from'] != '':  # Counter在两次mostCommon间, 相同频率的元素可能以不同的次序输出...!
            logging('Train from %s' % config['train_from'])
            checkpoint = torch.load(config['train_from'], map_location='cpu')
            self.model.load_state_dict(checkpoint['model'])
            self.step = checkpoint['step']
            self.vocab = checkpoint['vocab']
            self.optimizer.load_state_dict(checkpoint['optimizer'])
            # print('State dict parameters:')
            # for n in model.state_dict().keys():
            #     print(n)
        #self.optimizer = Adam(self.model.parameters(),lr = config['learning_rate'],betas = config['betas'])

    def train_one(self, batch):
        """ coverage not implemented """
        config = self.config
        enc_batch, enc_padding_mask, enc_lens, enc_batch_extend_vocab, extra_zeros = \
            get_input_from_batch(batch, config, self.device)
        dec_batch, dec_padding_mask, max_dec_len, dec_lens_var, target_batch = \
            get_output_from_batch(batch, self.device)
        pred = self.model(enc_batch, dec_batch, enc_padding_mask,
                          dec_padding_mask, enc_batch_extend_vocab,
                          extra_zeros)
        # >>>>>>>> DEBUG Session <<<<<<<<<
        # print("ENC\n")
        # print(enc_batch)
        # print("DEC\n")
        # print(dec_batch)
        # print("TGT\n")
        # print(target_batch)
        # print("ENCP\n")
        # print(enc_padding_mask)
        # print("DECP\n")
        # print(dec_padding_mask)
        # encs = [self.vocab.id2word(int(v)) for v in enc_batch[:, 0]]
        # decs = [self.vocab.id2word(int(v)) for v in dec_batch[:, 0]]
        # print(' '.join(encs))
        # print(' '.join(decs))
        #print(pred.max(dim=-1)[1][:,0])    #
        #loss = self.model.nll_loss(pred, target_batch, dec_lens_var)
        loss = self.model.label_smoothing_loss(pred, target_batch)
        return loss

    def train(self):

        config = self.config
        train_loader = DataLoader(self.train_data,
                                  batch_size=config['batch_size'],
                                  shuffle=True,
                                  collate_fn=Collate())

        running_avg_loss = 0
        self.model.train()

        for _ in range(config['train_epoch']):
            for batch in train_loader:
                self.step += 1

                loss = self.train_one(batch)
                running_avg_loss = calc_running_avg_loss(
                    loss.item(), running_avg_loss)
                loss.div(float(config['gradient_accum'])).backward()

                if self.step % config[
                        'gradient_accum'] == 0:  # gradient accumulation
                    clip_grad_norm_(self.model.parameters(),
                                    config['max_grad_norm'])
                    self.optimizer.step()
                    self.optimizer.zero_grad()

                if self.step % config['report_every'] == 0:
                    logging("Step %d Train loss %.3f" %
                            (self.step, running_avg_loss))
                if self.step % config['save_every'] == 0:
                    self.save()
                if self.step % config['validate_every'] == 0:
                    self.validate()

    @torch.no_grad()
    def validate(self):
        self.model.eval()
        validate_loader = DataLoader(self.validate_data,
                                     batch_size=self.config['batch_size'],
                                     shuffle=False,
                                     collate_fn=Collate())
        losses = []
        for batch in tqdm(validate_loader):
            loss = self.train_one(batch)
            losses.append(loss.item())
        self.model.train()
        ave_loss = sum(losses) / len(losses)
        logging('Validate loss : %f' % ave_loss)

    def save(self):
        state = {
            'model': self.model.state_dict(),
            'optimizer': self.optimizer.state_dict(),
            'step': self.step,
            'vocab': self.vocab
        }
        save_path = os.path.join(self.config['model_path'],
                                 'model_s%d.pt' % self.step)
        logging('Saving model step %d to %s...' % (self.step, save_path))
        torch.save(state, save_path)
Exemplo n.º 19
0
class Train(object):
    def __init__(self):
        if config.is_hierarchical:
            raise Exception("Hierarchical PGN-AMI not supported!")

        self.vocab = Vocab(config.vocab_path, config.vocab_size)
        self.pad_id = self.vocab.word2id(PAD_TOKEN)
        self.start_id = self.vocab.word2id(START_DECODING)
        self.stop_id = self.vocab.word2id(STOP_DECODING)

        self.print_interval = config.print_interval

        train_dir = config.train_dir
        if not os.path.exists(train_dir):
            os.mkdir(train_dir)

        self.model_dir = train_dir
        if not os.path.exists(self.model_dir):
            os.mkdir(self.model_dir)

    def save_model(self, running_avg_loss, iter):
        state = {
            'iter': iter,
            'encoder_state_dict': self.model.encoder.state_dict(),
            'decoder_state_dict': self.model.decoder.state_dict(),
            'reduce_state_dict': self.model.reduce_state.state_dict(),
            'optimizer': self.optimizer.state_dict(),
            'current_loss': running_avg_loss
        }
        model_save_path = os.path.join(self.model_dir,
                                       'iter{}.pt'.format(iter))
        torch.save(state, model_save_path)

    def setup_train(self, model_file_path=None):
        self.model = Model(model_file_path)

        params = list(self.model.encoder.parameters()) + list(self.model.decoder.parameters()) + \
                 list(self.model.reduce_state.parameters())
        initial_lr = config.lr_coverage if config.is_coverage else config.lr
        self.optimizer = Adagrad(
            params,
            lr=initial_lr,
            initial_accumulator_value=config.adagrad_init_acc)

        start_iter, start_loss = 0, 0

        if model_file_path is not None:
            state = torch.load(model_file_path,
                               map_location=lambda storage, location: storage)
            start_iter = state['iter']
            start_loss = state['current_loss']

            if not config.is_coverage:
                self.optimizer.load_state_dict(state['optimizer'])
                if use_cuda:
                    for state in self.optimizer.state.values():
                        for k, v in state.items():
                            if torch.is_tensor(v):
                                state[k] = v.cuda()

        return start_iter, start_loss

    def train_one_batch(self, ami_data, idx):
        # enc_batch, enc_padding_mask, enc_lens, enc_batch_extend_vocab, extra_zeros, c_t_1, coverage = \
        #     get_ami_input_from_batch(batch, use_cuda)
        # dec_batch, dec_padding_mask, max_dec_len, dec_lens_var, target_batch = \
        #     get_ami_output_from_batch(batch, use_cuda)

        enc_pack, dec_pack = get_a_batch(ami_data,
                                         idx,
                                         self.vocab,
                                         config.batch_size,
                                         config.max_enc_steps,
                                         config.max_dec_steps,
                                         self.start_id,
                                         self.stop_id,
                                         self.pad_id,
                                         sum_type='short',
                                         use_cuda=use_cuda)
        enc_batch, enc_padding_mask, enc_lens, enc_batch_extend_vocab, extra_zeros, c_t_1, coverage = enc_pack
        dec_batch, dec_padding_mask, max_dec_len, dec_lens_var, target_batch = dec_pack

        self.optimizer.zero_grad()

        encoder_outputs, encoder_feature, encoder_hidden = self.model.encoder(
            enc_batch, enc_lens)
        s_t_1 = self.model.reduce_state.forward1(encoder_hidden)

        step_losses = []
        for di in range(min(max_dec_len, config.max_dec_steps)):
            y_t_1 = dec_batch[:, di]  # Teacher forcing

            final_dist, s_t_1, c_t_1, attn_dist, p_gen, next_coverage = self.model.decoder.forward1(
                y_t_1, s_t_1, encoder_outputs, encoder_feature,
                enc_padding_mask, c_t_1, extra_zeros, enc_batch_extend_vocab,
                coverage, di)

            target = target_batch[:, di]
            gold_probs = torch.gather(final_dist, 1,
                                      target.unsqueeze(1)).squeeze()
            step_loss = -torch.log(gold_probs + config.eps)
            if config.is_coverage:
                step_coverage_loss = torch.sum(torch.min(attn_dist, coverage),
                                               1)
                step_loss = step_loss + config.cov_loss_wt * step_coverage_loss
                coverage = next_coverage

            step_mask = dec_padding_mask[:, di]
            step_loss = step_loss * step_mask
            step_losses.append(step_loss)

        sum_losses = torch.sum(torch.stack(step_losses, 1), 1)
        batch_avg_loss = sum_losses / dec_lens_var
        loss = torch.mean(batch_avg_loss)

        loss.backward()

        clip_grad_norm_(self.model.encoder.parameters(), config.max_grad_norm)
        clip_grad_norm_(self.model.decoder.parameters(), config.max_grad_norm)
        clip_grad_norm_(self.model.reduce_state.parameters(),
                        config.max_grad_norm)

        self.optimizer.step()

        return loss.item()

    def trainIters(self, n_iters, model_file_path=None):
        iter, running_avg_loss = self.setup_train(model_file_path)
        sys.stdout.flush()

        ami_data = load_ami_data('train')
        valid_data = load_ami_data('valid')
        # make the training data 100
        random.shuffle(valid_data)
        ami_data.extend(valid_data[:6])
        valid_data = valid_data[6:]

        num_batches = len(ami_data)
        idx = 0

        # validation & stopping
        best_valid_loss = 1000000000
        stop_counter = 0

        while iter < n_iters:
            if idx == 0:
                print("shuffle training data")
                random.shuffle(ami_data)

            loss = self.train_one_batch(ami_data, idx)

            running_avg_loss = calc_running_avg_loss(loss, running_avg_loss,
                                                     iter)

            iter += 1
            idx += config.batch_size
            if idx == num_batches: idx = 0

            if iter % self.print_interval == 0:
                print("[{}] iter {}, loss: {:.5f}".format(
                    str(datetime.now()), iter, loss))
                sys.stdout.flush()

            if iter % config.save_every == 0:
                self.save_model(running_avg_loss, iter)

            if iter % config.eval_every == 0:
                valid_loss = self.run_eval(valid_data)
                print("valid_loss = {:.5f}".format(valid_loss))
                if valid_loss < best_valid_loss:
                    stop_counter = 0
                    best_valid_loss = valid_loss
                    print("VALID better")
                else:
                    stop_counter += 1
                    print(
                        "VALID NOT better, counter = {}".format(stop_counter))
                    if stop_counter == config.stop_after:
                        print("Stop training")
                        return

        print("Finished training!")

    def eval_one_batch(self, eval_data, idx):

        enc_pack, dec_pack = get_a_batch(eval_data,
                                         idx,
                                         self.vocab,
                                         1,
                                         config.max_enc_steps,
                                         config.max_dec_steps,
                                         self.start_id,
                                         self.stop_id,
                                         self.pad_id,
                                         sum_type='short',
                                         use_cuda=use_cuda)

        enc_batch, enc_padding_mask, enc_lens, enc_batch_extend_vocab, extra_zeros, c_t_1, coverage = enc_pack
        dec_batch, dec_padding_mask, max_dec_len, dec_lens_var, target_batch = dec_pack

        encoder_outputs, encoder_feature, encoder_hidden = self.model.encoder(
            enc_batch, enc_lens)
        s_t_1 = self.model.reduce_state.forward1(encoder_hidden)

        step_losses = []
        for di in range(min(max_dec_len, config.max_dec_steps)):
            y_t_1 = dec_batch[:, di]  # Teacher forcing
            final_dist, s_t_1, c_t_1, attn_dist, p_gen, next_coverage = self.model.decoder.forward1(
                y_t_1, s_t_1, encoder_outputs, encoder_feature,
                enc_padding_mask, c_t_1, extra_zeros, enc_batch_extend_vocab,
                coverage, di)

            target = target_batch[:, di]
            gold_probs = torch.gather(final_dist,
                                      dim=1,
                                      index=target.unsqueeze(1)).squeeze()
            step_loss = -torch.log(gold_probs + config.eps)
            if config.is_coverage:
                step_coverage_loss = torch.sum(torch.min(attn_dist, coverage),
                                               1)
                step_loss = step_loss + config.cov_loss_wt * step_coverage_loss
                coverage = next_coverage

            step_mask = dec_padding_mask[:, di]
            step_loss = step_loss * step_mask
            step_losses.append(step_loss)

        sum_step_losses = torch.sum(torch.stack(step_losses, 1), 1)
        batch_avg_loss = sum_step_losses / dec_lens_var
        loss = torch.mean(batch_avg_loss)

        return loss.data.item()

    def run_eval(self, eval_data):
        running_avg_loss, iter = 0, 0
        batch_losses = []
        num_batches = len(eval_data)
        print("valid data size = {}".format(num_batches))
        for idx in range(num_batches):
            loss = self.eval_one_batch(eval_data, idx)
            batch_losses.append(loss)
            running_avg_loss = calc_running_avg_loss(loss, running_avg_loss,
                                                     iter)
            print("#", end="")
            sys.stdout.flush()
        print()

        avg_loss = sum(batch_losses) / len(batch_losses)
        return avg_loss
Exemplo n.º 20
0
class Train(object):
    def __init__(self):
        self.vocab = Vocab(config.vocab_path, config.vocab_size)
        self.batcher = Batcher(config.train_data_path,
                               self.vocab,
                               mode='train',
                               batch_size=config.batch_size,
                               single_pass=False)
        time.sleep(15)

        train_dir = os.path.join(config.log_root,
                                 'train_%d' % (int(time.time())))
        if not os.path.exists(train_dir):
            os.mkdir(train_dir)

        self.model_dir = os.path.join(train_dir, 'model')
        if not os.path.exists(self.model_dir):
            os.mkdir(self.model_dir)

        self.summary_writer = tf.summary.FileWriter(train_dir)

    def save_model(self, running_avg_loss, iter):
        state = {
            'iter': iter,
            'encoder_state_dict': self.model.encoder.state_dict(),
            'decoder_state_dict': self.model.decoder.state_dict(),
            'reduce_state_dict': self.model.reduce_state.state_dict(),
            'optimizer': self.optimizer.state_dict(),
            'current_loss': running_avg_loss
        }
        model_save_path = os.path.join(
            self.model_dir, 'model_%d_%d' % (iter, int(time.time())))
        torch.save(state, model_save_path)

    def setup_train(self, model_file_path=None):
        self.model = Model(model_file_path)

        params = list(self.model.encoder.parameters()) + list(self.model.decoder.parameters()) + \
                 list(self.model.reduce_state.parameters())
        initial_lr = config.lr_coverage if config.is_coverage else config.lr
        self.optimizer = Adagrad(
            params,
            lr=initial_lr,
            initial_accumulator_value=config.adagrad_init_acc)

        start_iter, start_loss = 0, 0

        if model_file_path is not None:
            state = torch.load(model_file_path,
                               map_location=lambda storage, location: storage)
            start_iter = state['iter']
            start_loss = state['current_loss']

            if not config.is_coverage:
                self.optimizer.load_state_dict(state['optimizer'])
                if use_cuda:
                    for state in self.optimizer.state.values():
                        for k, v in state.items():
                            if torch.is_tensor(v):
                                state[k] = v.cuda()

        return start_iter, start_loss

    def train_one_batch(self, batch):
        enc_batch, enc_padding_mask, enc_lens, enc_batch_extend_vocab, extra_zeros, c_t_1, coverage = \
            get_input_from_batch(batch, use_cuda)
        dec_batch, dec_padding_mask, max_dec_len, dec_lens_var, target_batch = \
            get_output_from_batch(batch, use_cuda)

        self.optimizer.zero_grad()

        encoder_outputs, encoder_feature, encoder_hidden = self.model.encoder(
            enc_batch, enc_lens)
        s_t_1 = self.model.reduce_state(encoder_hidden)

        step_losses = []
        for di in range(min(max_dec_len, config.max_dec_steps)):
            y_t_1 = dec_batch[:, di]  # Teacher forcing
            final_dist, s_t_1, c_t_1, attn_dist, p_gen, next_coverage = self.model.decoder(
                y_t_1, s_t_1, encoder_outputs, encoder_feature,
                enc_padding_mask, c_t_1, extra_zeros, enc_batch_extend_vocab,
                coverage, di)
            target = target_batch[:, di]
            gold_probs = torch.gather(final_dist, 1,
                                      target.unsqueeze(1)).squeeze()
            step_loss = -torch.log(gold_probs + config.eps)
            if config.is_coverage:
                step_coverage_loss = torch.sum(torch.min(attn_dist, coverage),
                                               1)
                step_loss = step_loss + config.cov_loss_wt * step_coverage_loss
                coverage = next_coverage

            step_mask = dec_padding_mask[:, di]
            step_loss = step_loss * step_mask
            step_losses.append(step_loss)

        sum_losses = torch.sum(torch.stack(step_losses, 1), 1)
        batch_avg_loss = sum_losses / dec_lens_var
        loss = torch.mean(batch_avg_loss)

        loss.backward()

        self.norm = clip_grad_norm_(self.model.encoder.parameters(),
                                    config.max_grad_norm)
        clip_grad_norm_(self.model.decoder.parameters(), config.max_grad_norm)
        clip_grad_norm_(self.model.reduce_state.parameters(),
                        config.max_grad_norm)

        self.optimizer.step()

        return loss.item()

    def trainIters(self, n_iters, model_file_path=None):
        iter, running_avg_loss = self.setup_train(model_file_path)
        start = time.time()
        while iter < n_iters:
            batch = self.batcher.next_batch()

            loss = self.train_one_batch(batch)

            running_avg_loss = calc_running_avg_loss(loss, running_avg_loss,
                                                     self.summary_writer, iter)
            iter += 1

            if iter % 100 == 0:
                self.summary_writer.flush()
            print_interval = 50
            if iter % print_interval == 0:
                print('steps %d, seconds for %d batch: %.2f , loss: %f' %
                      (iter, print_interval, time.time() - start, loss))
                start = time.time()
            if iter % 5000 == 0:
                self.save_model(running_avg_loss, iter)
Exemplo n.º 21
0
class Train(object):
    def __init__(self):
        self.vocab = Vocab(config.vocab_path, config.vocab_size)

        self.batcher = Batcher(config.train_data_path, self.vocab, mode='train',
                               batch_size=config.batch_size, single_pass=False)
        # print("MODE MUST BE train")
        # time.sleep(15)
        self.print_interval = config.print_interval

        train_dir = config.train_dir
        if not os.path.exists(train_dir):
            os.mkdir(train_dir)

        self.model_dir = train_dir
        if not os.path.exists(self.model_dir):
            os.mkdir(self.model_dir)

        # self.summary_writer = tf.compat.v1.summary.FileWriter(train_dir)

    def save_model(self, running_avg_loss, iter):
        state = {
            'iter': iter,
            'encoder_state_dict': self.model.encoder.state_dict(),
            'decoder_state_dict': self.model.decoder.state_dict(),
            'reduce_state_dict': self.model.reduce_state.state_dict(),
            'optimizer': self.optimizer.state_dict(),
            'current_loss': running_avg_loss
        }
        model_save_path = os.path.join(self.model_dir, 'iter{}.pt'.format(iter))
        torch.save(state, model_save_path)

    def setup_train(self, model_file_path=None):
        self.model = Model(model_file_path)

        params = list(self.model.encoder.parameters()) + list(self.model.decoder.parameters()) + \
                 list(self.model.reduce_state.parameters())
        initial_lr = config.lr_coverage if config.is_coverage else config.lr
        self.optimizer = Adagrad(params, lr=initial_lr, initial_accumulator_value=config.adagrad_init_acc)

        start_iter, start_loss = 0, 0

        if model_file_path is not None:
            state = torch.load(model_file_path, map_location= lambda storage, location: storage)
            start_iter = state['iter']
            start_loss = state['current_loss']

            if not config.is_coverage:
                self.optimizer.load_state_dict(state['optimizer'])
                if use_cuda:
                    for state in self.optimizer.state.values():
                        for k, v in state.items():
                            if torch.is_tensor(v):
                                state[k] = v.cuda()

        return start_iter, start_loss

    def train_one_batch(self, batch):
        enc_batch, enc_padding_mask, enc_lens, enc_batch_extend_vocab, extra_zeros, c_t_1, coverage = \
            get_input_from_batch(batch, use_cuda)
        dec_batch, dec_padding_mask, max_dec_len, dec_lens_var, target_batch = \
            get_output_from_batch(batch, use_cuda)

        self.optimizer.zero_grad()

        if not config.is_hierarchical:
            encoder_outputs, encoder_feature, encoder_hidden = self.model.encoder(enc_batch, enc_lens)
            s_t_1 = self.model.reduce_state.forward1(encoder_hidden)

        else:
            stop_id = self.vocab.word2id('.')
            pad_id  = self.vocab.word2id('[PAD]')
            enc_sent_pos = get_sent_position(enc_batch, stop_id, pad_id)
            dec_sent_pos = get_sent_position(dec_batch, stop_id, pad_id)

            encoder_outputs, encoder_feature, encoder_hidden, sent_enc_outputs, sent_enc_feature, sent_enc_hidden, sent_enc_padding_mask, sent_lens, seq_lens2 = \
                                                                    self.model.encoder(enc_batch, enc_lens, enc_sent_pos)

            s_t_1, sent_s_t_1 = self.model.reduce_state(encoder_hidden, sent_enc_hidden)
        step_losses = []
        for di in range(min(max_dec_len, config.max_dec_steps)):
            y_t_1 = dec_batch[:, di]  # Teacher forcing
            if not config.is_hierarchical:
                # start = datetime.now()

                final_dist, s_t_1,  c_t_1, attn_dist, p_gen, next_coverage = self.model.decoder.forward1(y_t_1, s_t_1,
                                                            encoder_outputs, encoder_feature, enc_padding_mask, c_t_1,
                                                            extra_zeros, enc_batch_extend_vocab,
                                                                               coverage, di)
                # print('NO HIER Time: ',datetime.now() - start)
                # import pdb; pdb.set_trace()
            else:
                # start = datetime.now()
                max_doc_len = enc_batch.size(1)
                final_dist, sent_s_t_1,  c_t_1, attn_dist, p_gen, next_coverage = self.model.decoder(y_t_1, sent_s_t_1,
                                                            encoder_outputs, encoder_feature, enc_padding_mask, seq_lens2,
                                                            sent_s_t_1, sent_enc_outputs, sent_enc_feature, sent_enc_padding_mask,
                                                            sent_lens, max_doc_len,
                                                            c_t_1, extra_zeros, enc_batch_extend_vocab, coverage, di)
                # print('DO HIER Time: ',datetime.now() - start)
                # import pdb; pdb.set_trace()


            target = target_batch[:, di]
            gold_probs = torch.gather(final_dist, 1, target.unsqueeze(1)).squeeze()
            step_loss = -torch.log(gold_probs + config.eps)
            if config.is_coverage:
                step_coverage_loss = torch.sum(torch.min(attn_dist, coverage), 1)
                step_loss = step_loss + config.cov_loss_wt * step_coverage_loss
                coverage = next_coverage

            step_mask = dec_padding_mask[:, di]
            step_loss = step_loss * step_mask
            step_losses.append(step_loss)

        sum_losses = torch.sum(torch.stack(step_losses, 1), 1)
        batch_avg_loss = sum_losses/dec_lens_var
        loss = torch.mean(batch_avg_loss)

        # start = datatime.now()
        loss.backward()
        # print('{} HIER Time: {}'.format(config.is_hierarchical ,datetime.now() - start))
        # import pdb; pdb.set_trace()

        clip_grad_norm_(self.model.encoder.parameters(), config.max_grad_norm)
        clip_grad_norm_(self.model.decoder.parameters(), config.max_grad_norm)
        clip_grad_norm_(self.model.reduce_state.parameters(), config.max_grad_norm)

        self.optimizer.step()

        return loss.item()

    def trainIters(self, n_iters, model_file_path=None):
        iter, running_avg_loss = self.setup_train(model_file_path)
        sys.stdout.flush()

        # data_path = "lib/data/batches_train.vocab50000.batch16.pk.bin"
        # with open(data_path, 'rb') as f:
        #     stored_batches = pickle.load(f, encoding="bytes")
        # print("loaded data: {}".format(data_path))
        # num_batches = len(stored_batches)

        while iter < n_iters:
            batch = self.batcher.next_batch()
            # batch_id = iter%num_batches
            # batch = stored_batches[batch_id]

            loss = self.train_one_batch(batch)

            # running_avg_loss = calc_running_avg_loss(loss, running_avg_loss, self.summary_writer, iter)
            running_avg_loss = calc_running_avg_loss(loss, running_avg_loss, iter)

            iter += 1

            # if iter % 100 == 0:
            #     self.summary_writer.flush()

            if iter % self.print_interval == 0:
                print("[{}] iter {}, loss: {:.5f}".format(str(datetime.now()), iter, loss))
                sys.stdout.flush()

            if iter % config.save_every == 0:
                self.save_model(running_avg_loss, iter)

        print("Finished training!")
Exemplo n.º 22
0
class Train(object):
    def __init__(self, opt):
        self.vocab = Vocab(config.vocab_path, config.vocab_size)
        self.batcher = Batcher(config.train_data_path,
                               self.vocab,
                               mode='train',
                               batch_size=config.batch_size,
                               single_pass=False)
        time.sleep(15)

        train_dir = os.path.join(config.log_root,
                                 'train_%d' % (int(time.time())))
        if not os.path.exists(train_dir):
            os.mkdir(train_dir)

        self.model_dir = os.path.join(train_dir, 'model')
        if not os.path.exists(self.model_dir):
            os.mkdir(self.model_dir)
        self.opt = opt
        self.summary_writer = tf.summary.FileWriter(train_dir)

    def save_model(self, running_avg_loss, iter):
        state = {
            'iter': iter,
            'encoder_state_dict': self.model.encoder.state_dict(),
            'decoder_state_dict': self.model.decoder.state_dict(),
            'reduce_state_dict': self.model.reduce_state.state_dict(),
            'optimizer': self.optimizer.state_dict(),
            'current_loss': running_avg_loss
        }
        model_save_path = os.path.join(
            self.model_dir, 'model_%d_%d' % (iter, int(time.time())))
        torch.save(state, model_save_path)

    def setup_train(self, model_file_path=None):

        # 训练设置,包括
        if self.opt.load_model != None:
            model_file_path = os.path.join(self.model_dir, self.opt.load_model)
        else:
            model_file_path = None

        self.model = Model(model_file_path)

        params = list(self.model.encoder.parameters()) + list(self.model.decoder.parameters()) + \
                 list(self.model.reduce_state.parameters())
        initial_lr = config.lr_coverage if config.is_coverage else config.lr
        self.optimizer = Adagrad(
            params,
            lr=initial_lr,
            initial_accumulator_value=config.adagrad_init_acc)

        start_iter, start_loss = 0, 0

        if model_file_path is not None:
            state = torch.load(model_file_path,
                               map_location=lambda storage, location: storage)
            start_iter = state['iter']
            start_loss = state['current_loss']

            if not config.is_coverage:
                self.optimizer.load_state_dict(state['optimizer'])
                if use_cuda:
                    for state in self.optimizer.state.values():
                        for k, v in state.items():
                            if torch.is_tensor(v):
                                state[k] = v.cuda()

        return start_iter, start_loss

    def train_one_batch(self, batch):
        enc_batch, enc_padding_mask, enc_lens, enc_batch_extend_vocab, extra_zeros, c_t_1, coverage = \
            get_input_from_batch(batch, use_cuda)
        dec_batch, dec_padding_mask, max_dec_len, dec_lens_var, target_batch = \
            get_output_from_batch(batch, use_cuda)

        self.optimizer.zero_grad()

        encoder_outputs, encoder_feature, encoder_hidden = self.model.encoder(
            enc_batch, enc_lens)
        s_t_1 = self.model.reduce_state(encoder_hidden)

        if self.opt.train_mle == "yes":
            step_losses = []
            for di in range(min(max_dec_len, config.max_dec_steps)):
                y_t_1 = dec_batch[:, di]  # Teacher forcing
                final_dist, s_t_1, c_t_1, attn_dist, p_gen, next_coverage = self.model.decoder(
                    y_t_1, s_t_1, encoder_outputs, encoder_feature,
                    enc_padding_mask, c_t_1, extra_zeros,
                    enc_batch_extend_vocab, coverage, di)
                target = target_batch[:, di]
                gold_probs = torch.gather(final_dist, 1,
                                          target.unsqueeze(1)).squeeze()
                step_loss = -torch.log(gold_probs + config.eps)
                if config.is_coverage:
                    step_coverage_loss = torch.sum(
                        torch.min(attn_dist, coverage), 1)
                    step_loss = step_loss + config.cov_loss_wt * step_coverage_loss
                    coverage = next_coverage

                step_mask = dec_padding_mask[:, di]
                step_loss = step_loss * step_mask
                step_losses.append(step_loss)

            sum_losses = torch.sum(torch.stack(step_losses, 1), 1)
            batch_avg_loss = sum_losses / dec_lens_var
            mle_loss = torch.mean(batch_avg_loss)
        else:
            mle_loss = get_cuda(torch.FloatTensor([0]))
            # --------------RL training-----------------------------------------------------
        if self.opt.train_rl == "yes":  # perform reinforcement learning training
            # multinomial sampling
            sample_sents, RL_log_probs = self.train_batch_RL(
                encoder_outputs,
                encoder_hidden,
                enc_padding_mask,
                encoder_feature,
                enc_batch_extend_vocab,
                extra_zeros,
                c_t_1,
                batch.art_oovs,
                coverage,
                greedy=False)
            with torch.autograd.no_grad():
                # greedy sampling
                greedy_sents, _ = self.train_batch_RL(encoder_outputs,
                                                      encoder_hidden,
                                                      enc_padding_mask,
                                                      encoder_feature,
                                                      enc_batch_extend_vocab,
                                                      extra_zeros,
                                                      c_t_1,
                                                      batch.art_oovs,
                                                      coverage,
                                                      greedy=True)

            sample_reward = self.reward_function(sample_sents,
                                                 batch.original_abstracts)
            baseline_reward = self.reward_function(greedy_sents,
                                                   batch.original_abstracts)
            # if iter%200 == 0:
            #     self.write_to_file(sample_sents, greedy_sents, batch.original_abstracts, sample_reward, baseline_reward, iter)
            rl_loss = -(
                sample_reward - baseline_reward
            ) * RL_log_probs  # Self-critic policy gradient training (eq 15 in https://arxiv.org/pdf/1705.04304.pdf)
            rl_loss = torch.mean(rl_loss)

            batch_reward = torch.mean(sample_reward).item()
        else:
            rl_loss = get_cuda(torch.FloatTensor([0]))
            batch_reward = 0
        #loss.backward()
        (self.opt.mle_weight * mle_loss +
         self.opt.rl_weight * rl_loss).backward()
        self.norm = clip_grad_norm_(self.model.encoder.parameters(),
                                    config.max_grad_norm)
        clip_grad_norm_(self.model.decoder.parameters(), config.max_grad_norm)
        clip_grad_norm_(self.model.reduce_state.parameters(),
                        config.max_grad_norm)

        self.optimizer.step()

        return mle_loss.item(), batch_reward

    def train_batch_RL(self, encoder_outputs, encoder_hidden, enc_padding_mask,
                       encoder_feature, enc_batch_extend_vocab, extra_zeros,
                       c_t_1, article_oovs, coverage, greedy):
        '''Generate sentences from decoder entirely using sampled tokens as input. These sentences are used for ROUGE evaluation
        Args
        :param enc_out: Outputs of the encoder for all time steps (batch_size, length_input_sequence, 2*hidden_size)
        :param enc_hidden: Tuple containing final hidden state & cell state of encoder. Shape of h & c: (batch_size, hidden_size)
        :param enc_padding_mask: Mask for encoder input; Tensor of size (batch_size, length_input_sequence) with values of 0 for pad tokens & 1 for others
        :param ct_e: encoder context vector for time_step=0 (eq 5 in https://arxiv.org/pdf/1705.04304.pdf)
        :param extra_zeros: Tensor used to extend vocab distribution for pointer mechanism
        :param enc_batch_extend_vocab: Input batch that stores OOV ids
        :param article_oovs: Batch containing list of OOVs in each example
        :param greedy: If true, performs greedy based sampling, else performs multinomial sampling
        Returns:
        :decoded_strs: List of decoded sentences
        :log_probs: Log probabilities of sampled words
        '''
        s_t_1 = self.model.reduce_state(
            encoder_hidden)  # Decoder hidden states
        y_t_1 = get_cuda(
            torch.LongTensor(len(encoder_outputs)).fill_(
                self.vocab.word2id(data.START_DECODING))
        )  # Input to the decoder                                                              #Used for intra-temporal attention (section 2.1 in https://arxiv.org/pdf/1705.04304.pdf)
        inds = []  # Stores sampled indices for each time step
        decoder_padding_mask = [
        ]  # 存储生成样本的填充掩码 Stores padding masks of generated samples
        log_probs = []  # Stores log probabilites of generated samples
        mask = get_cuda(
            torch.LongTensor(len(encoder_outputs)).fill_(1)
        )  # Values that indicate whether [STOP] token has already been encountered; 1 => Not encountered, 0 otherwise

        for t in range(config.max_dec_steps):
            probs, s_t_1, c_t_1, attn_dist, p_gen, next_coverage = self.model.decoder(
                y_t_1, s_t_1, encoder_outputs, encoder_feature,
                enc_padding_mask, c_t_1, extra_zeros, enc_batch_extend_vocab,
                coverage, t)
            if greedy is False:
                multi_dist = Categorical(probs)  # 根据概率分布进行采样
                y_t_1 = multi_dist.sample()  # perform multinomial sampling
                log_prob = multi_dist.log_prob(y_t_1)
                log_probs.append(log_prob)
            else:
                _, y_t_1 = torch.max(
                    probs, dim=1
                )  # 取概率最大的词                                                  #perform greedy sampling
            y_t_1 = y_t_1.detach()
            inds.append(y_t_1)
            mask_t = get_cuda(torch.zeros(len(encoder_outputs))
                              )  # Padding mask of batch for current time step
            mask_t[
                mask ==
                1] = 1  # If [STOP] is not encountered till previous time step, mask_t = 1 else mask_t = 0
            mask[
                (mask == 1) +
                (y_t_1 == self.vocab.word2id(data.STOP_DECODING)) ==
                2] = 0  # If [STOP] is not encountered till previous time step and current word is [STOP], make mask = 0
            decoder_padding_mask.append(mask_t)
            is_oov = (y_t_1 >= config.vocab_size
                      ).long()  # Mask indicating whether sampled word is OOV
            y_t_1 = (1 - is_oov) * y_t_1 + (is_oov) * self.vocab.word2id(
                data.UNKNOWN_TOKEN)  # Replace OOVs with [UNK] token

        inds = torch.stack(inds, dim=1)
        decoder_padding_mask = torch.stack(decoder_padding_mask, dim=1)
        if greedy is False:  # If multinomial based sampling, compute log probabilites of sampled words
            log_probs = torch.stack(log_probs, dim=1)
            log_probs = log_probs * decoder_padding_mask  # Not considering sampled words with padding mask = 0
            lens = torch.sum(decoder_padding_mask,
                             dim=1)  # Length of sampled sentence
            log_probs = torch.sum(
                log_probs, dim=1
            ) / lens  # (bs,)                                     #compute normalizied log probability of a sentence
        decoded_strs = []
        for i in range(len(encoder_outputs)):
            id_list = inds[i].cpu().numpy()
            oovs = article_oovs[i]
            S = data.outputids2words(
                id_list, self.vocab,
                oovs)  # Generate sentence corresponding to sampled words
            try:
                end_idx = S.index(data.STOP_DECODING)
                S = S[:end_idx]
            except ValueError:
                S = S
            if len(
                    S
            ) < 2:  # If length of sentence is less than 2 words, replace it with "xxx"; Avoids setences like "." which throws error while calculating ROUGE
                S = ["xxx"]
            S = " ".join(S)
            decoded_strs.append(S)

        return decoded_strs, log_probs

    def reward_function(self, decoded_sents, original_sents):
        rouge = Rouge()
        try:
            scores = rouge.get_scores(decoded_sents, original_sents)
        except Exception:
            print(
                "Rouge failed for multi sentence evaluation.. Finding exact pair"
            )
            scores = []
            for i in range(len(decoded_sents)):
                try:
                    score = rouge.get_scores(decoded_sents[i],
                                             original_sents[i])
                except Exception:
                    print("Error occured at:")
                    print("decoded_sents:", decoded_sents[i])
                    print("original_sents:", original_sents[i])
                    score = [{"rouge-1": {"p": 0.0}}]
                scores.append(score[0])
        rouge_l_p1 = [score["rouge-1"]["p"] for score in scores]
        rouge_l_p1 = get_cuda(torch.FloatTensor(rouge_l_p1))
        return rouge_l_p1

    def trainIters(self, n_iters, model_file_path=None):
        iter, running_avg_loss = self.setup_train(model_file_path)
        start = time.time()
        while iter < n_iters:
            batch = self.batcher.next_batch()
            loss = self.train_one_batch(batch)

            running_avg_loss = calc_running_avg_loss(loss, running_avg_loss,
                                                     self.summary_writer, iter)
            iter += 1

            if iter % 50 == 0:
                self.summary_writer.flush()
            print_interval = 50
            if iter % print_interval == 0:
                print('steps %d, seconds for %d batch: %.2f , loss: %f' %
                      (iter, print_interval, time.time() - start, loss))
                start = time.time()
            if iter % 100 == 0:
                self.save_model(running_avg_loss, iter)
Exemplo n.º 23
0
                                    dtype=torch.float64).cuda()
    else:
        loss_weights = torch.tensor(cfg.LOSS_WEIGHTS, dtype=torch.float64)

    model.train()
    optimizer = Adagrad(model.parameters(),
                        lr=cfg.LEARNING_RATE,
                        weight_decay=cfg.WEIGHT_DECAY)

    for epoch in range(cfg.MAX_EPOCHS):
        train_loss = 0

        for _iter in tqdm(range(cfg.EPOCH_ITERS)):
            batch_iterator = iter(dataloader)
            # zero the gradient buffers
            optimizer.zero_grad()

            (gt, patch_2, patch_3) = next(batch_iterator)

            # Use CUDA if possible
            if torch.cuda.device_count():
                gt = gt.cuda()
                patch_2 = patch_2.cuda()
                patch_3 = patch_3.cuda()
            else:
                gt = gt
                patch_2 = patch_2
                patch_3 = patch_3

            softmax_scores = model.forward(patch_2=Variable(patch_2),
                                           patch_3=Variable(patch_3))
Exemplo n.º 24
0
class Train(object):
    def __init__(self):
        self.vocab = Vocab(config.vocab_path, config.vocab_size)
        self.batcher = Batcher(config.train_data_path,
                               self.vocab,
                               mode='train',
                               batch_size=config.batch_size,
                               single_pass=False)
        time.sleep(15)

        train_dir = os.path.join(config.ouput_root,
                                 'train_%d' % (int(time.time())))
        if not os.path.exists(train_dir):
            os.makedirs(train_dir)

        self.checkpoint_dir = os.path.join(train_dir, 'checkpoints')
        if not os.path.exists(self.checkpoint_dir):
            os.makedirs(self.checkpoint_dir)

        self.train_summary_writer = tf.summary.create_file_writer(
            os.path.join(train_dir, 'log', 'train'))
        self.eval_summary_writer = tf.summary.create_file_writer(
            os.path.join(train_dir, 'log', 'eval'))

    def save_model(self, model_path, running_avg_loss, iter):
        state = {
            'iter': iter,
            'encoder_state_dict': self.model.encoder.state_dict(),
            'decoder_state_dict': self.model.decoder.state_dict(),
            'reduce_state_dict': self.model.reduce_state.state_dict(),
            'optimizer': self.optimizer.state_dict(),
            'current_loss': running_avg_loss
        }
        torch.save(state, model_path)

    def setup_train(self, model_file_path=None):
        self.model = Model(device, model_file_path)

        params = list(self.model.encoder.parameters()) + list(self.model.decoder.parameters()) + \
                 list(self.model.reduce_state.parameters())
        initial_lr = config.lr_coverage if config.is_coverage else config.lr
        self.optimizer = Adagrad(
            params,
            lr=initial_lr,
            initial_accumulator_value=config.adagrad_init_acc)

        start_iter, start_loss = 0, 0

        if model_file_path is not None:
            state = torch.load(model_file_path,
                               map_location=lambda storage, location: storage)
            start_iter = state['iter']
            start_loss = state['current_loss']

            if not config.is_coverage:
                self.optimizer.load_state_dict(state['optimizer'])
                for state in self.optimizer.state.values():
                    for k, v in state.items():
                        if torch.is_tensor(v):
                            state[k] = v.to(device)

        return start_iter, start_loss

    def train_one_batch(self, batch, forcing_ratio=1):
        enc_batch, enc_padding_mask, enc_lens, enc_batch_extend_vocab, extra_zeros, c_t_1, coverage = \
            get_input_from_batch(batch, device)
        dec_batch, dec_padding_mask, max_dec_len, dec_lens_var, target_batch = \
            get_output_from_batch(batch, device)

        self.optimizer.zero_grad()

        encoder_outputs, encoder_feature, encoder_hidden = self.model.encoder(
            enc_batch, enc_lens)
        s_t_1 = self.model.reduce_state(encoder_hidden)

        step_losses = []
        y_t_1_hat = None
        for di in range(min(max_dec_len, config.max_dec_steps)):
            y_t_1 = dec_batch[:, di]
            # decide the next input
            if di == 0 or random.random() < forcing_ratio:
                x_t = y_t_1  # teacher forcing, use label from last time step as input
            else:
                # use embedding of UNK for all oov word
                y_t_1_hat[y_t_1_hat > self.vocab.size()] = self.vocab.word2id(
                    UNKNOWN_TOKEN)
                x_t = y_t_1_hat.flatten(
                )  # use prediction from last time step as input
            final_dist, s_t_1, c_t_1, attn_dist, p_gen, next_coverage = self.model.decoder(
                x_t, s_t_1, encoder_outputs, encoder_feature, enc_padding_mask,
                c_t_1, extra_zeros, enc_batch_extend_vocab, coverage, di)
            _, y_t_1_hat = final_dist.data.topk(1)
            target = target_batch[:, di].unsqueeze(1)
            step_loss = cal_NLLLoss(target, final_dist)
            if config.is_coverage:  # if not using coverge, keep coverage=None
                step_coverage_loss = torch.sum(torch.min(attn_dist, coverage),
                                               1)
                step_loss = step_loss + config.cov_loss_wt * step_coverage_loss
                coverage = next_coverage

            step_mask = dec_padding_mask[:,
                                         di]  # padding in target should not count into loss
            step_loss = step_loss * step_mask
            step_losses.append(step_loss)

        sum_losses = torch.sum(torch.stack(step_losses, 1), 1)
        batch_avg_loss = sum_losses / dec_lens_var
        loss = torch.mean(batch_avg_loss)

        loss.backward()

        self.norm = clip_grad_norm_(self.model.encoder.parameters(),
                                    config.max_grad_norm)
        clip_grad_norm_(self.model.decoder.parameters(), config.max_grad_norm)
        clip_grad_norm_(self.model.reduce_state.parameters(),
                        config.max_grad_norm)

        self.optimizer.step()

        return loss.item()

    def train(self, n_iters, init_model_path=None):
        iter, avg_loss = self.setup_train(init_model_path)
        start = time.time()
        cnt = 0
        best_model_path = None
        min_eval_loss = float('inf')
        while iter < n_iters:
            s = config.forcing_ratio
            k = config.decay_to_0_iter
            x = iter
            nere_zero = 0.0001
            if config.forcing_decay_type:
                if x >= config.decay_to_0_iter:
                    forcing_ratio = 0
                elif config.forcing_decay_type == 'linear':
                    forcing_ratio = s * (k - x) / k
                elif config.forcing_decay_type == 'exp':
                    p = pow(nere_zero, 1 / k)
                    forcing_ratio = s * (p**x)
                elif config.forcing_decay_type == 'sig':
                    r = math.log((1 / nere_zero) - 1) / k
                    forcing_ratio = s / (1 + pow(math.e, r * (x - k / 2)))
                else:
                    raise ValueError('Unrecognized forcing_decay_type: ' +
                                     config.forcing_decay_type)
            else:
                forcing_ratio = config.forcing_ratio
            batch = self.batcher.next_batch()
            loss = self.train_one_batch(batch, forcing_ratio=forcing_ratio)
            model_path = os.path.join(self.checkpoint_dir,
                                      'model_step_%d' % (iter + 1))
            avg_loss = calc_avg_loss(loss, avg_loss)

            if (iter + 1) % config.print_interval == 0:
                with self.train_summary_writer.as_default():
                    tf.summary.scalar(name='loss', data=loss, step=iter)
                self.train_summary_writer.flush()
                logger.info('steps %d, took %.2f seconds, train avg loss: %f' %
                            (iter + 1, time.time() - start, avg_loss))
                start = time.time()
            if config.eval_interval is not None and (
                    iter + 1) % config.eval_interval == 0:
                start = time.time()
                logger.info("Start Evaluation on model %s" % model_path)
                eval_processor = Evaluate(self.model, self.vocab)
                eval_loss = eval_processor.run_eval()
                logger.info(
                    "Evaluation finished, took %.2f seconds, eval loss: %f" %
                    (time.time() - start, eval_loss))
                with self.eval_summary_writer.as_default():
                    tf.summary.scalar(name='eval_loss',
                                      data=eval_loss,
                                      step=iter)
                self.eval_summary_writer.flush()
                if eval_loss < min_eval_loss:
                    logger.info(
                        "This is the best model so far, saving it to disk.")
                    min_eval_loss = eval_loss
                    best_model_path = model_path
                    self.save_model(model_path, eval_loss, iter)
                    cnt = 0
                else:
                    cnt += 1
                    if cnt > config.patience:
                        logger.info(
                            "Eval loss doesn't drop for %d straight times, early stopping.\n"
                            "Best model: %s (Eval loss %f: )" %
                            (config.patience, best_model_path, min_eval_loss))
                        break
                start = time.time()
            elif (iter + 1) % config.save_interval == 0:
                self.save_model(model_path, avg_loss, iter)
            iter += 1
        else:
            logger.info(
                "Training finished, best model: %s, with train loss %f: " %
                (best_model_path, min_eval_loss))
class Train(object):
    def __init__(self):
        #config("print.vocab_path ",config.vocab_path)
        self.vocab = Vocab(config.vocab_path, config.vocab_size)
        self.batcher = Batcher(config.train_data_path,
                               self.vocab,
                               mode='train',
                               batch_size=config.batch_size,
                               single_pass=False)
        time.sleep(15)

        train_dir = os.path.join(config.log_root,
                                 'train_%d' % (int(time.time())))
        if not os.path.exists(train_dir):
            os.mkdir(train_dir)

        self.model_dir = os.path.join(train_dir, 'model')
        if not os.path.exists(self.model_dir):
            os.mkdir(self.model_dir)

        self.summary_writer = tf.summary.FileWriter(train_dir)

    def save_model(self, running_avg_loss, iter):
        state = {
            'iter': iter,
            'encoder_state_dict': self.model.encoder.state_dict(),
            'decoder_state_dict': self.model.decoder.state_dict(),
            'reduce_state_dict': self.model.reduce_state.state_dict(),
            'optimizer': self.optimizer.state_dict(),
            'current_loss': running_avg_loss
        }
        model_save_path = os.path.join(
            self.model_dir, 'model_%d_%d' % (iter, int(time.time())))
        torch.save(state, model_save_path)

    def setup_train(self, model_file_path=None):
        self.model = Model(model_file_path)

        params = list(self.model.encoder.parameters()) + list(self.model.decoder.parameters()) + \
                 list(self.model.reduce_state.parameters())
        #print("params : ",params)
        #print("params collection is completed....")
        initial_lr = config.lr_coverage if config.is_coverage else config.lr
        self.optimizer = Adagrad(
            params,
            lr=initial_lr,
            initial_accumulator_value=config.adagrad_init_acc)

        start_iter, start_loss = 0, 0

        #### Loading state where the training stopped earlier use that to train for future epoches ####
        if model_file_path is not None:
            state = torch.load(model_file_path,
                               map_location=lambda storage, location: storage)
            start_iter = state['iter']
            start_loss = state['current_loss']

            if not config.is_coverage:
                self.optimizer.load_state_dict(state['optimizer'])
                ###### Making into GPU/server accessable Variables #####
                if use_cuda:
                    for state in self.optimizer.state.values():
                        for k, v in state.items():
                            if torch.is_tensor(v):
                                state[k] = v.cuda()
        return start_iter, start_loss

    def train_one_batch(self, batch):

        ########### Below Two lines of code is for just initialization of Encoder and Decoder sizes,vocab, lenghts etc : ######
        enc_batch, enc_padding_mask, enc_lens, enc_batch_extend_vocab, extra_zeros, c_t_1, coverage = \
            get_input_from_batch(batch, use_cuda)
        dec_batch, dec_padding_mask, max_dec_len, dec_lens_var, target_batch = \
            get_output_from_batch(batch, use_cuda)

        self.optimizer.zero_grad()
        #print("train_one_batch function ......")
        encoder_outputs, encoder_feature, encoder_hidden = self.model.encoder(
            enc_batch, enc_lens)
        s_t_1 = self.model.reduce_state(
            encoder_hidden
        )  ### Here initially encoder final hiddenstate==decoder first/prev word at timestamp=0
        #print("s_t_1 : ",len(s_t_1),s_t_1[0].shape,s_t_1[1].shape)

        #print("steps.....")
        #print("max_dec_len = ",max_dec_len)
        step_losses = []
        for di in range(min(max_dec_len, config.max_dec_steps)):
            ############ Traing [ Teacher Forcing ] ###########
            y_t_1 = dec_batch[:, di]  # Teacher forcing
            #print("y_t_1 : ",len(y_t_1))
            final_dist, s_t_1, c_t_1, attn_dist, p_gen, next_coverage = self.model.decoder(
                y_t_1, s_t_1, encoder_outputs, encoder_feature,
                enc_padding_mask, c_t_1, extra_zeros, enc_batch_extend_vocab,
                coverage, di)
            #print("attn_dist : ",len(attn_dist),attn_dist[0].shape)
            #print("final_dist : ",len(final_dist),final_dist[0].shape) ############## vocab_Size
            target = target_batch[:, di]
            #print("target = ",len(target))

            gold_probs = torch.gather(final_dist, 1,
                                      target.unsqueeze(1)).squeeze()
            step_loss = -torch.log(
                gold_probs + config.eps
            )  #################################################### Eqn_6
            if config.is_coverage:
                step_coverage_loss = torch.sum(
                    torch.min(attn_dist, coverage),
                    1)  ###############################Eqn_13a
                step_loss = step_loss + config.cov_loss_wt * step_coverage_loss  ###############################Eqn_13b
                coverage = next_coverage

            step_mask = dec_padding_mask[:, di]
            step_loss = step_loss * step_mask
            step_losses.append(step_loss)

        sum_losses = torch.sum(torch.stack(step_losses, 1), 1)
        batch_avg_loss = sum_losses / dec_lens_var
        loss = torch.mean(batch_avg_loss)

        loss.backward()

        self.norm = clip_grad_norm_(self.model.encoder.parameters(),
                                    config.max_grad_norm)
        clip_grad_norm_(self.model.decoder.parameters(), config.max_grad_norm)
        clip_grad_norm_(self.model.reduce_state.parameters(),
                        config.max_grad_norm)

        self.optimizer.step()

        return loss.item()

    def trainIters(self, n_iters, model_file_path=None):
        print("trainIters__Started___model_file_path is : ", model_file_path)
        iter, running_avg_loss = self.setup_train(model_file_path)
        start = time.time()
        print("Max iteration : n_iters = ", n_iters)
        print("going to start running iter NO : ", iter)
        print("\n******************************\n")
        while iter < n_iters:
            print("\n###################################\n")
            print("iter : ", iter)
            batch = self.batcher.next_batch()
            print("batch data loading : ", batch)
            loss = self.train_one_batch(batch)
            running_avg_loss = calc_running_avg_loss(loss, running_avg_loss,
                                                     self.summary_writer, iter)
            print("running_avg_loss : ", running_avg_loss)
            iter += 1
            if iter % 100 == 0:  ##100
                self.summary_writer.flush()
            print_interval = 100  #1000
            if iter % print_interval == 0:
                print('steps %d, seconds for %d batch: %.2f , loss: %f' %
                      (iter, print_interval, time.time() - start, loss))
                start = time.time()
            if iter % 500 == 0:  ##5000
                self.save_model(running_avg_loss, iter)
Exemplo n.º 26
0
class Train(object):
    def __init__(self):
        self.vocab = Vocab(config.vocab_path, config.vocab_size)
        self.batcher = Batcher(config.train_data_path, self.vocab, mode='train',
                               batch_size=config.batch_size, single_pass=False)
        time.sleep(15)

        train_dir = os.path.join(config.log_root, 'train_%d' % (int(time.time())))
        if not os.path.exists(train_dir):
            os.mkdir(train_dir)

        self.model_dir = os.path.join(train_dir, 'model')
        if not os.path.exists(self.model_dir):
            os.mkdir(self.model_dir)

        self.summary_writer = tf.summary.FileWriter(train_dir)
        self.last_good_model_save_path = None 

    def save_model(self, running_avg_loss, iter):
        state = {
            'iter': iter,
            'encoder_state_dict': self.model.encoder.state_dict(),
            'decoder_state_dict': self.model.decoder.state_dict(),
            'reduce_state_dict': self.model.reduce_state.state_dict(),
            'optimizer': self.optimizer.state_dict(),
            'current_loss': running_avg_loss
        }
        model_save_path = os.path.join(self.model_dir, 'model_%d_%d' % (iter, int(time.time())))
        # save the path to the last model that was not nan
        if (not math.isnan(running_avg_loss)):
            self.last_good_model_save_path = model_save_path 
        torch.save(state, model_save_path)

    def setup_train(self, model_file_path=None):
        self.model = Model(model_file_path)
        self.last_good_model_save_path = model_file_path

        params = list(self.model.encoder.parameters()) + list(self.model.decoder.parameters()) + \
                 list(self.model.reduce_state.parameters())
        initial_lr = config.lr_coverage if config.is_coverage else config.lr
        self.optimizer = Adagrad(params, lr=initial_lr, initial_accumulator_value=config.adagrad_init_acc)

        start_iter, start_loss = 0, 0

        if model_file_path is not None:
            state = torch.load(model_file_path, map_location= lambda storage, location: storage)
            start_iter = state['iter']
            start_loss = state['current_loss']

            if not config.is_coverage:
                self.optimizer.load_state_dict(state['optimizer'])
                if use_cuda:
                    for state in self.optimizer.state.values():
                        for k, v in state.items():
                            if torch.is_tensor(v):
                                state[k] = v.cuda()

        return start_iter, start_loss

    def train_one_batch(self, batch):
        enc_batch, enc_padding_mask, enc_lens, enc_batch_extend_vocab, extra_zeros, c_t_1, coverage = \
            get_input_from_batch(batch, use_cuda)
        dec_batch, dec_padding_mask, max_dec_len, dec_lens_var, target_batch = \
            get_output_from_batch(batch, use_cuda)

        self.optimizer.zero_grad()

        encoder_outputs, encoder_feature, encoder_hidden = self.model.encoder(enc_batch, enc_lens)
        s_t_1 = self.model.reduce_state(encoder_hidden)

        step_losses = []
        for di in range(min(max_dec_len, config.max_dec_steps)):
            y_t_1 = dec_batch[:, di]  # Teacher forcing
            final_dist, s_t_1,  c_t_1, attn_dist, p_gen, next_coverage = self.model.decoder(y_t_1, s_t_1,
                                                        encoder_outputs, encoder_feature, enc_padding_mask, c_t_1,
                                                        extra_zeros, enc_batch_extend_vocab,
                                                                           coverage, di)

            target = target_batch[:, di]
            gold_probs = torch.gather(final_dist, 1, target.unsqueeze(1)).squeeze()
            step_loss = -torch.log(gold_probs + config.eps)
            if config.is_coverage:
                step_coverage_loss = torch.sum(torch.min(attn_dist, coverage), 1)
                step_loss = step_loss + config.cov_loss_wt * step_coverage_loss
                coverage = next_coverage

            # calculate copy loss
            vocab_zero = Variable(torch.zeros(self.model.decoder.vocab_dist_.shape, dtype=torch.float))
            if use_cuda:
                vocab_zero = vocab_zero.cuda()
            if extra_zeros is not None:
                vocab_zero = torch.cat([vocab_zero, extra_zeros], 1)
            attn_dist_ = (1 - p_gen) * attn_dist
            attn_expanded = vocab_zero.scatter_add(1, enc_batch_extend_vocab, attn_dist_)
            vocab_zero[:, self.vocab.word2id('[UNK]')] = 1.0
            # Not sure whether we want to add loss for the extra vocab indices
            #vocab_zero[:, config.vocab_size:] = 1.0
            y_unk_neg = 1.0 - vocab_zero
            copyloss=torch.bmm(y_unk_neg.unsqueeze(1), attn_expanded.unsqueeze(2))
            
            # add copy loss with lambda 2 weight
            step_loss = step_loss + config.copy_loss_wt * copyloss
                
            step_mask = dec_padding_mask[:, di]
            step_loss = step_loss * step_mask
            step_losses.append(step_loss)


        sum_losses = torch.sum(torch.stack(step_losses, 1), 1)
        batch_avg_loss = sum_losses/dec_lens_var
        loss = torch.mean(batch_avg_loss)

        loss.backward()

        self.norm = clip_grad_norm_(self.model.encoder.parameters(), config.max_grad_norm)
        clip_grad_norm_(self.model.decoder.parameters(), config.max_grad_norm)
        clip_grad_norm_(self.model.reduce_state.parameters(), config.max_grad_norm)

        self.optimizer.step()

        return loss.item()

    def trainIters(self, n_iters, model_file_path=None):
        iter, running_avg_loss = self.setup_train(model_file_path)
        start = time.time()
        while iter < n_iters:
            batch = self.batcher.next_batch()
            loss = self.train_one_batch(batch)

            running_avg_loss = calc_running_avg_loss(loss, running_avg_loss, self.summary_writer, iter)
            iter += 1

            if (math.isnan(running_avg_loss)):
                print('Found a nan loss return. Restarting the training at {}' \
                        .format(self.last_good_model_save_path))
                iter, running_avg_loss = self.setup_train(self.last_good_model_save_path)
                start = time.time()

            if iter % 100 == 0:
                self.summary_writer.flush()
            print_interval = 1000
            if iter % print_interval == 0:
                print('steps %d, seconds for %d batch: %.2f , loss: %f' % (iter, print_interval,
                                                                           time.time() - start, loss))
                start = time.time()
            if iter % 1000 == 0:
                self.save_model(running_avg_loss, iter)