Ejemplo n.º 1
0
	def beam_decode(self, inpute, inputc, src_pad_mask=None, context_mask=None, beam_size=8, max_len=512, length_penalty=0.0, return_all=False, clip_beam=False, fill_pad=False):

		bsize, seql = inpute.size()[:2]

		beam_size2 = beam_size * beam_size
		bsizeb2 = bsize * beam_size2
		real_bsize = bsize * beam_size

		sos_emb = self.get_sos_emb(inpute)
		isize = sos_emb.size(-1)
		sqrt_isize = sqrt(isize)

		if length_penalty > 0.0:
			lpv = sos_emb.new_ones(real_bsize, 1)
			lpv_base = 6.0 ** length_penalty

		out = sos_emb * sqrt_isize
		if self.pemb is not None:
			 out = out + self.pemb.get_pos(0)

		if self.drop is not None:
			out = self.drop(out)

		states = {}

		for _tmp, net in enumerate(self.nets):
			out, _state = net(inpute, None, inputc, src_pad_mask, context_mask, None, out)
			states[_tmp] = _state

		if self.out_normer is not None:
			out = self.out_normer(out)

		out = self.lsm(self.classifier(out))

		scores, wds = out.topk(beam_size, dim=-1)
		scores = scores.squeeze(1)
		sum_scores = scores
		wds = wds.view(real_bsize, 1)
		trans = wds

		done_trans = wds.view(bsize, beam_size).eq(2)

		inpute = inpute.repeat(1, beam_size, 1).view(real_bsize, seql, isize)

		_src_pad_mask = None if src_pad_mask is None else src_pad_mask.repeat(1, beam_size, 1).view(real_bsize, 1, seql)
		_cbsize, _cseql = inputc[0].size()[:2]
		_creal_bsize = _cbsize * beam_size
		_context_mask = [None if cu is None else cu.repeat(1, beam_size, 1).view(_creal_bsize, 1, _cseql) for cu in context_mask]

		_inputc = [inputu.repeat(1, beam_size, 1).view(_creal_bsize, _cseql, isize) for inputu in inputc]

		for key, value in states.items():
			states[key] = repeat_bsize_for_beam_tensor(value, beam_size)

		for step in range(1, max_len):

			out = self.wemb(wds) * sqrt_isize
			if self.pemb is not None:
				out = out + self.pemb.get_pos(step)

			if self.drop is not None:
				out = self.drop(out)

			for _tmp, net in enumerate(self.nets):
				out, _state = net(inpute, states[_tmp], _inputc, _src_pad_mask, None, _context_mask, out)
				states[_tmp] = _state

			if self.out_normer is not None:
				out = self.out_normer(out)

			out = self.lsm(self.classifier(out)).view(bsize, beam_size, -1)

			_scores, _wds = out.topk(beam_size, dim=-1)
			_scores = (_scores.masked_fill(done_trans.unsqueeze(2).expand(bsize, beam_size, beam_size), 0.0) + sum_scores.unsqueeze(2).expand(bsize, beam_size, beam_size))

			if length_penalty > 0.0:
				lpv = lpv.masked_fill(~done_trans.view(real_bsize, 1), ((step + 6.0) ** length_penalty) / lpv_base)

			if clip_beam and (length_penalty > 0.0):
				scores, _inds = (_scores.view(real_bsize, beam_size) / lpv.expand(real_bsize, beam_size)).view(bsize, beam_size2).topk(beam_size, dim=-1)
				_tinds = (_inds + torch.arange(0, bsizeb2, beam_size2, dtype=_inds.dtype, device=_inds.device).unsqueeze(1).expand_as(_inds)).view(real_bsize)
				sum_scores = _scores.view(bsizeb2).index_select(0, _tinds).view(bsize, beam_size)
			else:
				scores, _inds = _scores.view(bsize, beam_size2).topk(beam_size, dim=-1)
				_tinds = (_inds + torch.arange(0, bsizeb2, beam_size2, dtype=_inds.dtype, device=_inds.device).unsqueeze(1).expand_as(_inds)).view(real_bsize)
				sum_scores = scores

			wds = _wds.view(bsizeb2).index_select(0, _tinds).view(real_bsize, 1)

			_inds = (_inds // beam_size + torch.arange(0, real_bsize, beam_size, dtype=_inds.dtype, device=_inds.device).unsqueeze(1).expand_as(_inds)).view(real_bsize)

			trans = torch.cat((trans.index_select(0, _inds), wds.masked_fill(done_trans.view(real_bsize, 1), 0) if fill_pad else wds), 1)

			done_trans = (done_trans.view(real_bsize).index_select(0, _inds) | wds.eq(2).squeeze(1)).view(bsize, beam_size)

			_done = False
			if length_penalty > 0.0:
				lpv = lpv.index_select(0, _inds)
			elif (not return_all) and done_trans.select(1, 0).int().sum().item() == bsize:
				_done = True

			if _done or (done_trans.int().sum().item() == real_bsize):
				break

			for key, value in states.items():
				states[key] = value.index_select(0, _inds)

		if (not clip_beam) and (length_penalty > 0.0):
			scores = scores / lpv.view(bsize, beam_size)
			scores, _inds = scores.topk(beam_size, dim=-1)
			_inds = (_inds + torch.arange(0, real_bsize, beam_size, dtype=_inds.dtype, device=_inds.device).unsqueeze(1).expand_as(_inds)).view(real_bsize)
			trans = trans.view(real_bsize, -1).index_select(0, _inds).view(bsize, beam_size, -1)

		if return_all:

			return trans, scores
		else:

			return trans.view(bsize, beam_size, -1).select(1, 0)
Ejemplo n.º 2
0
    def beam_decode(self,
                    inpute,
                    src_pad_mask=None,
                    beam_size=8,
                    max_len=512,
                    length_penalty=0.0,
                    return_all=False,
                    clip_beam=False,
                    fill_pad=False):

        bsize, seql, isize = inpute[0].size()

        beam_size2 = beam_size * beam_size
        bsizeb2 = bsize * beam_size2
        real_bsize = bsize * beam_size

        sqrt_isize = sqrt(isize)

        if length_penalty > 0.0:
            # lpv: length penalty vector for each beam (bsize * beam_size, 1)
            lpv = inpute[0].new_ones(real_bsize, 1)
            lpv_base = 6.0**length_penalty

        states = {}

        outs = []

        for _inum, (model, inputu) in enumerate(zip(self.nets, inpute)):

            out = model.get_sos_emb(inputu) * sqrt_isize
            if model.pemb is not None:
                out = out + model.pemb.get_pos(0)

            if model.drop is not None:
                out = model.drop(out)

            states[_inum] = {}

            for _tmp, net in enumerate(model.nets):
                out, _state = net(inputu, None, src_pad_mask, out, 1)
                states[_inum][_tmp] = _state

            if model.out_normer is not None:
                out = model.out_normer(out)

            # outs: [(bsize, 1, nwd)]

            outs.append(model.classifier(out).softmax(dim=-1))

        out = torch.stack(outs).mean(0).log()

        # scores: (bsize, 1, beam_size) => (bsize, beam_size)
        # wds: (bsize * beam_size, 1)
        # trans: (bsize * beam_size, 1)

        scores, wds = out.topk(beam_size, dim=-1)
        scores = scores.squeeze(1)
        sum_scores = scores
        wds = wds.view(real_bsize, 1)
        trans = wds

        # done_trans: (bsize, beam_size)

        done_trans = wds.view(bsize, beam_size).eq(2)

        # inpute: (bsize, seql, isize) => (bsize * beam_size, seql, isize)

        inpute = [
            inputu.repeat(1, beam_size, 1).view(real_bsize, seql, isize)
            for inputu in inpute
        ]

        # _src_pad_mask: (bsize, 1, seql) => (bsize * beam_size, 1, seql)

        _src_pad_mask = None if src_pad_mask is None else src_pad_mask.repeat(
            1, beam_size, 1).view(real_bsize, 1, seql)

        # states[i][j]: (bsize, 1, isize) => (bsize * beam_size, 1, isize)

        for key, value in states.items():
            for _key, _value in value.items():
                value[_key] = repeat_bsize_for_beam_tensor(_value, beam_size)

        for step in range(2, max_len + 1):

            outs = []

            for _inum, (model, inputu) in enumerate(zip(self.nets, inpute)):

                out = model.wemb(wds) * sqrt_isize
                if model.pemb is not None:
                    out = out + model.pemb.get_pos(step - 1)

                if model.drop is not None:
                    out = model.drop(out)

                for _tmp, net in enumerate(model.nets):
                    out, _state = net(inputu, states[_inum][_tmp],
                                      _src_pad_mask, out, step)
                    states[_inum][_tmp] = _state

                if model.out_normer is not None:
                    out = model.out_normer(out)

                # outs: [(bsize, beam_size, nwd)...]

                outs.append(
                    model.classifier(out).softmax(dim=-1).view(
                        bsize, beam_size, -1))

            out = torch.stack(outs).mean(0).log()

            # find the top k ** 2 candidates and calculate route scores for them
            # _scores: (bsize, beam_size, beam_size)
            # done_trans: (bsize, beam_size)
            # scores: (bsize, beam_size)
            # _wds: (bsize, beam_size, beam_size)
            # mask_from_done_trans: (bsize, beam_size) => (bsize, beam_size * beam_size)
            # added_scores: (bsize, 1, beam_size) => (bsize, beam_size, beam_size)

            _scores, _wds = out.topk(beam_size, dim=-1)
            _scores = (_scores.masked_fill(
                done_trans.unsqueeze(2).expand(bsize, beam_size, beam_size),
                0.0) + scores.unsqueeze(2).expand(bsize, beam_size, beam_size))

            if length_penalty > 0.0:
                lpv = lpv.masked_fill(1 - done_trans.view(real_bsize, 1),
                                      ((step + 5.0)**length_penalty) /
                                      lpv_base)

            # clip from k ** 2 candidate and remain the top-k for each path
            # scores: (bsize, beam_size * beam_size) => (bsize, beam_size)
            # _inds: indexes for the top-k candidate (bsize, beam_size)

            if clip_beam and (length_penalty > 0.0):
                scores, _inds = (_scores.view(real_bsize, beam_size) /
                                 lpv.expand(real_bsize, beam_size)).view(
                                     bsize, beam_size2).topk(beam_size, dim=-1)
                _tinds = (_inds + torch.arange(
                    0,
                    bsizeb2,
                    beam_size2,
                    dtype=_inds.dtype,
                    device=_inds.device).unsqueeze(1).expand_as(_inds)
                          ).view(real_bsize)
                sum_scores = _scores.view(bsizeb2).index_select(
                    0, _tinds).view(bsize, beam_size)
            else:
                scores, _inds = _scores.view(bsize, beam_size2).topk(beam_size,
                                                                     dim=-1)
                _tinds = (_inds + torch.arange(
                    0,
                    bsizeb2,
                    beam_size2,
                    dtype=_inds.dtype,
                    device=_inds.device).unsqueeze(1).expand_as(_inds)
                          ).view(real_bsize)
                sum_scores = scores

            # select the top-k candidate with higher route score and update translation record
            # wds: (bsize, beam_size, beam_size) => (bsize * beam_size, 1)

            wds = _wds.view(bsizeb2).index_select(0,
                                                  _tinds).view(real_bsize, 1)

            # reduces indexes in _inds from (beam_size ** 2) to beam_size
            # thus the fore path of the top-k candidate is pointed out
            # _inds: indexes for the top-k candidate (bsize, beam_size)

            _inds = (
                _inds // beam_size +
                torch.arange(0,
                             real_bsize,
                             beam_size,
                             dtype=_inds.dtype,
                             device=_inds.device).unsqueeze(1).expand_as(_inds)
            ).view(real_bsize)

            # select the corresponding translation history for the top-k candidate and update translation records
            # trans: (bsize * beam_size, nquery) => (bsize * beam_size, nquery + 1)

            trans = torch.cat(
                (trans.index_select(0, _inds),
                 wds.masked_fill(done_trans.view(real_bsize, 1), pad_id)
                 if fill_pad else wds), 1)

            done_trans = (done_trans.view(real_bsize).index_select(0, _inds)
                          | wds.eq(2).squeeze(1)).view(bsize, beam_size)

            # check early stop for beam search
            # done_trans: (bsize, beam_size)
            # scores: (bsize, beam_size)

            _done = False
            if length_penalty > 0.0:
                lpv = lpv.index_select(0, _inds)
            elif (not return_all) and all_done(done_trans.select(1, 0), bsize):
                _done = True

            # check beam states(done or not)

            if _done or all_done(done_trans, real_bsize):
                break

            # update the corresponding hidden states
            # states[i][j]: (bsize * beam_size, nquery, isize)
            # _inds: (bsize, beam_size) => (bsize * beam_size)

            for key, value in states.items():
                for _key, _value in value.items():
                    value[_key] = _value.index_select(0, _inds)

        # if length penalty is only applied in the last step, apply length penalty
        if (not clip_beam) and (length_penalty > 0.0):
            scores = scores / lpv.view(bsize, beam_size)
            scores, _inds = scores.topk(beam_size, dim=-1)
            _inds = (
                _inds +
                torch.arange(0,
                             real_bsize,
                             beam_size,
                             dtype=_inds.dtype,
                             device=_inds.device).unsqueeze(1).expand_as(_inds)
            ).view(real_bsize)
            trans = trans.view(real_bsize, -1).index_select(0, _inds).view(
                bsize, beam_size, -1)

        if return_all:

            return trans, scores
        else:

            return trans.view(bsize, beam_size, -1).select(1, 0)
Ejemplo n.º 3
0
    def beam_decode(self,
                    inpute,
                    src_pad_mask=None,
                    beam_size=8,
                    max_len=512,
                    length_penalty=0.0,
                    return_all=False,
                    clip_beam=False):

        bsize, seql = inpute.size()[:2]

        beam_size2 = beam_size * beam_size
        bsizeb2 = bsize * beam_size2
        real_bsize = bsize * beam_size

        sos_emb = self.get_sos_emb(inpute)
        isize = sos_emb.size(-1)
        sqrt_isize = sqrt(isize)

        if length_penalty > 0.0:
            # lpv: length penalty vector for each beam (bsize * beam_size, 1)
            lpv = sos_emb.new_ones(real_bsize, 1)
            lpv_base = 6.0**length_penalty

        out = sos_emb * sqrt_isize + self.pemb.get_pos(0)

        if self.drop is not None:
            out = self.drop(out)

        states = {}

        attns = []
        for _tmp, net in enumerate(self.nets):
            out, _attn, _state = net(inpute, None, src_pad_mask, None, out,
                                     True)
            states[_tmp] = _state
            attns.append(_attn)

        if self.out_normer is not None:
            out = self.out_normer(out)

        attns = torch.cat(attns, dim=1).permute(0, 2, 3, 1)
        _asize = attns.size()
        out = torch.cat([
            out,
            attns.contiguous().view(-1, _asize[-1]).mv(
                self.tattn_w.softmax(dim=0) if self.tattn_drop is None else
                self.tattn_drop(self.tattn_w).softmax(dim=0)).view(
                    _asize[:-1]).bmm(inpute)
        ],
                        dim=-1)

        # out: (bsize, 1, nwd)

        out = self.lsm(self.classifier(out))

        # scores: (bsize, 1, beam_size) => (bsize, beam_size)
        # wds: (bsize * beam_size, 1)
        # trans: (bsize * beam_size, 1)

        scores, wds = out.topk(beam_size, dim=-1)
        scores = scores.squeeze(1)
        sum_scores = scores
        wds = wds.view(real_bsize, 1)
        trans = wds

        # done_trans: (bsize, beam_size)

        done_trans = wds.view(bsize, beam_size).eq(2)

        # inpute: (bsize, seql, isize) => (bsize * beam_size, seql, isize)

        inpute = inpute.repeat(1, beam_size, 1).view(real_bsize, seql, isize)

        # _src_pad_mask: (bsize, 1, seql) => (bsize * beam_size, 1, seql)

        _src_pad_mask = None if src_pad_mask is None else src_pad_mask.repeat(
            1, beam_size, 1).view(real_bsize, 1, seql)

        # states[i]: (bsize, 1, isize) => (bsize * beam_size, 1, isize)

        for key, value in states.items():
            states[key] = repeat_bsize_for_beam_tensor(value, beam_size)

        for step in range(1, max_len):

            out = self.wemb(wds) * sqrt_isize + self.pemb.get_pos(step)

            if self.drop is not None:
                out = self.drop(out)

            attns = []
            for _tmp, net in enumerate(self.nets):
                out, _attn, _state = net(inpute, states[_tmp], _src_pad_mask,
                                         None, out, True)
                states[_tmp] = _state
                attns.append(_attn)

            if self.out_normer is not None:
                out = self.out_normer(out)

            attns = torch.cat(attns, dim=1).permute(0, 2, 3, 1)
            _asize = attns.size()
            out = torch.cat([
                out,
                attns.contiguous().view(-1, _asize[-1]).mv(
                    self.tattn_w.softmax(dim=0) if self.tattn_drop is None else
                    self.tattn_drop(self.tattn_w).softmax(dim=0)).view(
                        _asize[:-1]).bmm(inpute)
            ],
                            dim=-1)

            # out: (bsize, beam_size, nwd)

            out = self.lsm(self.classifier(out)).view(bsize, beam_size, -1)

            # find the top k ** 2 candidates and calculate route scores for them
            # _scores: (bsize, beam_size, beam_size)
            # done_trans: (bsize, beam_size)
            # scores: (bsize, beam_size)
            # _wds: (bsize, beam_size, beam_size)
            # mask_from_done_trans: (bsize, beam_size) => (bsize, beam_size * beam_size)
            # added_scores: (bsize, 1, beam_size) => (bsize, beam_size, beam_size)

            _scores, _wds = out.topk(beam_size, dim=-1)
            _scores = (
                _scores.masked_fill(
                    done_trans.unsqueeze(2).expand(bsize, beam_size,
                                                   beam_size), 0.0) +
                sum_scores.unsqueeze(2).expand(bsize, beam_size, beam_size))

            if length_penalty > 0.0:
                lpv = lpv.masked_fill(1 - done_trans.view(real_bsize, 1),
                                      ((step + 6.0)**length_penalty) /
                                      lpv_base)

            # clip from k ** 2 candidate and remain the top-k for each path
            # scores: (bsize, beam_size * beam_size) => (bsize, beam_size)
            # _inds: indexes for the top-k candidate (bsize, beam_size)

            if clip_beam and (length_penalty > 0.0):
                scores, _inds = (_scores.view(real_bsize, beam_size) /
                                 lpv.expand(real_bsize, beam_size)).view(
                                     bsize, beam_size2).topk(beam_size, dim=-1)
                _tinds = (_inds + torch.arange(
                    0,
                    bsizeb2,
                    beam_size2,
                    dtype=_inds.dtype,
                    device=_inds.device).unsqueeze(1).expand_as(_inds)
                          ).view(real_bsize)
                sum_scores = _scores.view(bsizeb2).index_select(
                    0, _tinds).view(bsize, beam_size)
            else:
                scores, _inds = _scores.view(bsize, beam_size2).topk(beam_size,
                                                                     dim=-1)
                _tinds = (_inds + torch.arange(
                    0,
                    bsizeb2,
                    beam_size2,
                    dtype=_inds.dtype,
                    device=_inds.device).unsqueeze(1).expand_as(_inds)
                          ).view(real_bsize)
                sum_scores = scores

            # select the top-k candidate with higher route score and update translation record
            # wds: (bsize, beam_size, beam_size) => (bsize * beam_size, 1)

            wds = _wds.view(bsizeb2).index_select(0,
                                                  _tinds).view(real_bsize, 1)

            # reduces indexes in _inds from (beam_size ** 2) to beam_size
            # thus the fore path of the top-k candidate is pointed out
            # _inds: indexes for the top-k candidate (bsize, beam_size)

            _inds = (
                _inds / beam_size +
                torch.arange(0,
                             real_bsize,
                             beam_size,
                             dtype=_inds.dtype,
                             device=_inds.device).unsqueeze(1).expand_as(_inds)
            ).view(real_bsize)

            # select the corresponding translation history for the top-k candidate and update translation records
            # trans: (bsize * beam_size, nquery) => (bsize * beam_size, nquery + 1)

            trans = torch.cat((trans.index_select(0, _inds), wds), 1)

            done_trans = (done_trans.view(real_bsize).index_select(0, _inds) +
                          wds.eq(2).squeeze(1)).gt(0).view(bsize, beam_size)

            # check early stop for beam search
            # done_trans: (bsize, beam_size)
            # scores: (bsize, beam_size)

            _done = False
            if length_penalty > 0.0:
                lpv = lpv.index_select(0, _inds)
            elif (not return_all) and done_trans.select(
                    1, 0).sum().item() == bsize:
                _done = True

            # check beam states(done or not)

            if _done or (done_trans.sum().item() == real_bsize):
                break

            # update the corresponding hidden states
            # states[i]: (bsize * beam_size, nquery, isize)
            # _inds: (bsize, beam_size) => (bsize * beam_size)

            for key, value in states.items():
                states[key] = value.index_select(0, _inds)

        # if length penalty is only applied in the last step, apply length penalty
        if (not clip_beam) and (length_penalty > 0.0):
            scores = scores / lpv.view(bsize, beam_size)
            scores, _inds = scores.topk(beam_size, dim=-1)
            _inds = (
                _inds +
                torch.arange(0,
                             real_bsize,
                             beam_size,
                             dtype=_inds.dtype,
                             device=_inds.device).unsqueeze(1).expand_as(_inds)
            ).view(real_bsize)
            trans = trans.view(real_bsize, -1).index_select(0, _inds).view(
                bsize, beam_size, -1)

        if return_all:

            return trans, scores
        else:

            return trans.view(bsize, beam_size, -1).select(1, 0)
Ejemplo n.º 4
0
	def repeat_buffer(self, beam_size):

		if self.real_iK is not None:
			self.real_iK, self.real_iV = repeat_bsize_for_beam_tensor(self.real_iK, beam_size), repeat_bsize_for_beam_tensor(self.real_iV, beam_size)
Ejemplo n.º 5
0
    def beam_decode(self,
                    inpute,
                    inputh,
                    src_pad_mask=None,
                    beam_size=8,
                    max_len=512,
                    length_penalty=0.0,
                    return_all=False,
                    clip_beam=clip_beam_with_lp,
                    fill_pad=False):

        bsize, seql = inpute.size()[:2]

        beam_size2 = beam_size * beam_size
        bsizeb2 = bsize * beam_size2
        real_bsize = bsize * beam_size

        sos_emb = self.get_sos_emb(inpute)
        isize = sos_emb.size(-1)
        sqrt_isize = sqrt(isize)

        if length_penalty > 0.0:
            lpv = sos_emb.new_ones(real_bsize, 1)
            lpv_base = 6.0**length_penalty

        out = sos_emb * sqrt_isize
        if self.pemb is not None:
            out = out + self.pemb.get_pos(0)

        if self.drop is not None:
            out = self.drop(out)

        out = self.out_normer(out)

        states = {}

        for _tmp, (net, inputu, inputhu) in enumerate(
                zip(self.nets, inpute.unbind(dim=-1), inputh.unbind(dim=-1))):
            out, _state = net(inputu, inputhu, None, src_pad_mask, None, out,
                              True)
            states[_tmp] = _state

        out = self.lsm(self.classifier(out))

        scores, wds = out.topk(beam_size, dim=-1)
        scores = scores.squeeze(1)
        sum_scores = scores
        wds = wds.view(real_bsize, 1)
        trans = wds

        done_trans = wds.view(bsize, beam_size).eq(2)

        inputh = repeat_bsize_for_beam_tensor(inputh, beam_size)
        self.repeat_cross_attn_buffer(beam_size)

        _src_pad_mask = None if src_pad_mask is None else src_pad_mask.repeat(
            1, beam_size, 1).view(real_bsize, 1, seql)

        states = expand_bsize_for_beam(states, beam_size=beam_size)

        for step in range(1, max_len):

            out = self.wemb(wds) * sqrt_isize
            if self.pemb is not None:
                out = out + self.pemb.get_pos(step)

            if self.drop is not None:
                out = self.drop(out)

            out = self.out_normer(out)

            for _tmp, (net, inputu, inputhu) in enumerate(
                    zip(self.nets, inpute.unbind(dim=-1),
                        inputh.unbind(dim=-1))):
                out, _state = net(inputu, inputhu, states[_tmp], _src_pad_mask,
                                  None, out, True)
                states[_tmp] = _state

            out = self.lsm(self.classifier(out)).view(bsize, beam_size, -1)

            _scores, _wds = out.topk(beam_size, dim=-1)
            _done_trans_unsqueeze = done_trans.unsqueeze(2)
            _scores = (
                _scores.masked_fill(
                    _done_trans_unsqueeze.expand(bsize, beam_size, beam_size),
                    0.0) +
                sum_scores.unsqueeze(2).repeat(1, 1, beam_size).masked_fill_(
                    select_zero_(_done_trans_unsqueeze.repeat(1, 1, beam_size),
                                 -1, 0), -inf_default))

            if length_penalty > 0.0:
                lpv.masked_fill_(~done_trans.view(real_bsize, 1),
                                 ((step + 6.0)**length_penalty) / lpv_base)

            if clip_beam and (length_penalty > 0.0):
                scores, _inds = (_scores.view(real_bsize, beam_size) /
                                 lpv.expand(real_bsize, beam_size)).view(
                                     bsize, beam_size2).topk(beam_size, dim=-1)
                _tinds = (_inds + torch.arange(
                    0,
                    bsizeb2,
                    beam_size2,
                    dtype=_inds.dtype,
                    device=_inds.device).unsqueeze(1).expand_as(_inds)
                          ).view(real_bsize)
                sum_scores = _scores.view(bsizeb2).index_select(
                    0, _tinds).view(bsize, beam_size)
            else:
                scores, _inds = _scores.view(bsize, beam_size2).topk(beam_size,
                                                                     dim=-1)
                _tinds = (_inds + torch.arange(
                    0,
                    bsizeb2,
                    beam_size2,
                    dtype=_inds.dtype,
                    device=_inds.device).unsqueeze(1).expand_as(_inds)
                          ).view(real_bsize)
                sum_scores = scores

            wds = _wds.view(bsizeb2).index_select(0,
                                                  _tinds).view(real_bsize, 1)

            _inds = (
                _inds // beam_size +
                torch.arange(0,
                             real_bsize,
                             beam_size,
                             dtype=_inds.dtype,
                             device=_inds.device).unsqueeze(1).expand_as(_inds)
            ).view(real_bsize)

            trans = torch.cat(
                (trans.index_select(0, _inds),
                 wds.masked_fill(done_trans.view(real_bsize, 1), pad_id)
                 if fill_pad else wds), 1)

            done_trans = (done_trans.view(real_bsize).index_select(0, _inds)
                          | wds.eq(2).squeeze(1)).view(bsize, beam_size)

            _done = False
            if length_penalty > 0.0:
                lpv = lpv.index_select(0, _inds)
            elif (not return_all) and all_done(done_trans.select(1, 0), bsize):
                _done = True

            if _done or all_done(done_trans, real_bsize):
                break

            states = index_tensors(states, indices=_inds, dim=0)

        if (not clip_beam) and (length_penalty > 0.0):
            scores = scores / lpv.view(bsize, beam_size)
            scores, _inds = scores.topk(beam_size, dim=-1)
            _inds = (
                _inds +
                torch.arange(0,
                             real_bsize,
                             beam_size,
                             dtype=_inds.dtype,
                             device=_inds.device).unsqueeze(1).expand_as(_inds)
            ).view(real_bsize)
            trans = trans.view(real_bsize, -1).index_select(0, _inds)

        if return_all:

            return trans.view(bsize, beam_size, -1), scores
        else:

            return trans.view(bsize, beam_size, -1).select(1, 0)