示例#1
0
    def validation_step(self, batch, batch_idx):
        txt = batch.txt
        gls = batch.gls

        output = self.forward(txt, gls[:, :-1])
        output_dim = output.shape[-1]

        pred = output.contiguous().view(-1, output_dim)
        trg_gls = gls[:, 1:].contiguous().view(-1)

        loss = self.loss(pred, trg_gls)
        ppl = math.exp(loss)

        # get_blue
        _gls_hyp = self.get_gls_hyp(output)
        _gls_ref = self.get_gls_ref(gls)
        bleu = bleu_score(_gls_hyp, _gls_ref)

        pbar = {'ppl': ppl, 'bleu': bleu['bleu4']}

        return {
            'loss': loss,
            'progress_bar': pbar,
            'log': {
                'valid_loss': loss,
                'valid_ppl': ppl,
                'bleu1': bleu['bleu1'],
                'bleu2': bleu['bleu2'],
                'bleu3': bleu['bleu3'],
                'bleu4': bleu['bleu4']
            }
        }
示例#2
0
    def training_step(self, batch, batch_idx):
        txt, txt_lengths = batch.txt # bsz x seq
        gls, gls_lengths = batch.gls

        if self.model_name == 'onmt':
            # convert bsz x seq -> seq x bsz
            gls = gls.transpose(1,0)
            txt = txt.transpose(1,0)
            output = self.forward(gls, txt, gls_lengths)
            output_dim = output.shape[-1]
            txt = txt.transpose(1,0) # bsz x seq
           
        elif self.model_name == 'tutorial':
            output = self.forward(gls, txt[:, :-1])
            output_dim = output.shape[-1]
            
        pred = output.contiguous().view(-1, output_dim)    
        trg_txt = txt[:, 1:].contiguous().view(-1)
        
        loss = self.loss(pred, trg_txt)
        ppl = math.exp(loss)
        
        # get_blue
        _txt_hyp = self.get_txt_hyp(output)
        _txt_ref = self.get_txt_ref(txt)
        bleu = bleu_score(_txt_hyp, _txt_ref)
    
        # logging
        self.log('train_loss', loss, prog_bar=False, logger=True)
        self.log('train_ppl', ppl, prog_bar=False, logger=True)
        self.log('train_bleu', bleu, prog_bar=False, logger=True)
        self.log('lr', self.lr, prog_bar=True, logger=False)

        return loss  
示例#3
0
    def validation_step(self, batch, batch_idx):
        txt, txt_lengths = batch.txt  # bsz x seq
        gls, gls_lengths = batch.gls

        if self.model_name == 'onmt':
            # convert bsz x seq -> seq x bsz
            txt = txt.transpose(1, 0)
            gls = gls.transpose(1, 0)
            output = self.forward(txt, gls, txt_lengths)
            output_dim = output.shape[-1]
            gls = gls.transpose(1, 0)  # bsz x seq

        elif self.model_name == 'tutorial':
            output = self.forward(txt, gls[:, :-1])
            output_dim = output.shape[-1]

        pred = output.contiguous().view(-1, output_dim)
        trg_gls = gls[:, 1:].contiguous().view(-1)

        loss = self.loss(pred, trg_gls)
        ppl = math.exp(loss)

        # get_blue
        _gls_hyp = self.get_gls_hyp(output)
        _gls_ref = self.get_gls_ref(gls)
        bleu = bleu_score(_gls_hyp, _gls_ref)

        # logging
        self.log('valid_loss', loss, prog_bar=False, logger=True)
        self.log('valid_ppl', ppl, prog_bar=False, logger=True)
        self.log('valid_bleu', bleu, prog_bar=False, logger=True)