def beam_decode(self, inpute, inputh, src_pad_mask=None, chk_pad_mask=None, beam_size=8, max_len=512, length_penalty=0.0, return_all=False, clip_beam=clip_beam_with_lp, fill_pad=False): bsize, seql = inpute.size()[:2] beam_size2 = beam_size * beam_size bsizeb2 = bsize * beam_size2 real_bsize = bsize * beam_size sos_emb = self.get_sos_emb(inpute) isize = sos_emb.size(-1) sqrt_isize = sqrt(isize) if length_penalty > 0.0: lpv = sos_emb.new_ones(real_bsize, 1) lpv_base = 6.0**length_penalty out = sos_emb * sqrt_isize if self.pemb is not None: out = out + self.pemb.get_pos(0) if self.drop is not None: out = self.drop(out) out = self.out_normer(out) states = {} for _tmp, (net, inputu, inputhu) in enumerate( zip(self.nets, inpute.unbind(dim=-1), inputh.unbind(dim=-1))): out, _state = net(inputu, inputhu, None, src_pad_mask, chk_pad_mask, None, out, True) states[_tmp] = _state out = self.lsm(self.classifier(out)) scores, wds = out.topk(beam_size, dim=-1) scores = scores.squeeze(1) sum_scores = scores wds = wds.view(real_bsize, 1) trans = wds done_trans = wds.view(bsize, beam_size).eq(2) #inputh = repeat_bsize_for_beam_tensor(inputh, beam_size) self.repeat_cross_attn_buffer(beam_size) _src_pad_mask = None if src_pad_mask is None else src_pad_mask.repeat( 1, beam_size, 1).view(real_bsize, 1, seql) _chk_pad_mask = None if chk_pad_mask is None else repeat_bsize_for_beam_tensor( chk_pad_mask, beam_size) states = expand_bsize_for_beam(states, beam_size=beam_size) for step in range(1, max_len): out = self.wemb(wds) * sqrt_isize if self.pemb is not None: out = out + self.pemb.get_pos(step) if self.drop is not None: out = self.drop(out) out = self.out_normer(out) for _tmp, (net, inputu, inputhu) in enumerate( zip(self.nets, inpute.unbind(dim=-1), inputh.unbind(dim=-1))): out, _state = net(inputu, inputhu, states[_tmp], _src_pad_mask, _chk_pad_mask, None, out, True) states[_tmp] = _state out = self.lsm(self.classifier(out)).view(bsize, beam_size, -1) _scores, _wds = out.topk(beam_size, dim=-1) _done_trans_unsqueeze = done_trans.unsqueeze(2) _scores = ( _scores.masked_fill( _done_trans_unsqueeze.expand(bsize, beam_size, beam_size), 0.0) + sum_scores.unsqueeze(2).repeat(1, 1, beam_size).masked_fill_( select_zero_(_done_trans_unsqueeze.repeat(1, 1, beam_size), -1, 0), -inf_default)) if length_penalty > 0.0: lpv.masked_fill_(~done_trans.view(real_bsize, 1), ((step + 6.0)**length_penalty) / lpv_base) if clip_beam and (length_penalty > 0.0): scores, _inds = (_scores.view(real_bsize, beam_size) / lpv.expand(real_bsize, beam_size)).view( bsize, beam_size2).topk(beam_size, dim=-1) _tinds = (_inds + torch.arange( 0, bsizeb2, beam_size2, dtype=_inds.dtype, device=_inds.device).unsqueeze(1).expand_as(_inds) ).view(real_bsize) sum_scores = _scores.view(bsizeb2).index_select( 0, _tinds).view(bsize, beam_size) else: scores, _inds = _scores.view(bsize, beam_size2).topk(beam_size, dim=-1) _tinds = (_inds + torch.arange( 0, bsizeb2, beam_size2, dtype=_inds.dtype, device=_inds.device).unsqueeze(1).expand_as(_inds) ).view(real_bsize) sum_scores = scores wds = _wds.view(bsizeb2).index_select(0, _tinds).view(real_bsize, 1) _inds = ( _inds // beam_size + torch.arange(0, real_bsize, beam_size, dtype=_inds.dtype, device=_inds.device).unsqueeze(1).expand_as(_inds) ).view(real_bsize) trans = torch.cat( (trans.index_select(0, _inds), wds.masked_fill(done_trans.view(real_bsize, 1), pad_id) if fill_pad else wds), 1) done_trans = (done_trans.view(real_bsize).index_select(0, _inds) | wds.eq(2).squeeze(1)).view(bsize, beam_size) _done = False if length_penalty > 0.0: lpv = lpv.index_select(0, _inds) elif (not return_all) and all_done(done_trans.select(1, 0), bsize): _done = True if _done or all_done(done_trans, real_bsize): break states = index_tensors(states, indices=_inds, dim=0) if (not clip_beam) and (length_penalty > 0.0): scores = scores / lpv.view(bsize, beam_size) scores, _inds = scores.topk(beam_size, dim=-1) _inds = ( _inds + torch.arange(0, real_bsize, beam_size, dtype=_inds.dtype, device=_inds.device).unsqueeze(1).expand_as(_inds) ).view(real_bsize) trans = trans.view(real_bsize, -1).index_select(0, _inds) if return_all: return trans.view(bsize, beam_size, -1), scores else: return trans.view(bsize, beam_size, -1).select(1, 0)
def beam_decode(self, inpute, src_pad_mask=None, beam_size=8, max_len=512, length_penalty=0.0, return_all=False, clip_beam=clip_beam_with_lp, fill_pad=False): bsize, seql, isize = inpute[0].size() beam_size2 = beam_size * beam_size bsizeb2 = bsize * beam_size2 real_bsize = bsize * beam_size sqrt_isize = sqrt(isize) if length_penalty > 0.0: # lpv: length penalty vector for each beam (bsize * beam_size, 1) lpv = inpute[0].new_ones(real_bsize, 1) lpv_base = 6.0**length_penalty states = {} outs = [] for _inum, (model, inputu) in enumerate(zip(self.nets, inpute)): out = model.get_sos_emb(inputu) * sqrt_isize + model.pemb.get_pos( 0).view(1, 1, isize) if model.drop is not None: out = model.drop(out) states[_inum] = {} for _tmp, net in enumerate(model.nets): out, _state = net(inputu, None, src_pad_mask, None, out) states[_inum][_tmp] = _state if model.out_normer is not None: out = model.out_normer(out) # outs: [(bsize, 1, nwd)] outs.append(model.classifier(out).softmax(dim=-1)) out = torch.stack(outs).mean(0).log() # scores: (bsize, 1, beam_size) => (bsize, beam_size) # wds: (bsize * beam_size, 1) # trans: (bsize * beam_size, 1) scores, wds = out.topk(beam_size, dim=-1) scores = scores.squeeze(1) sum_scores = scores wds = wds.view(real_bsize, 1) trans = wds # done_trans: (bsize, beam_size) done_trans = wds.view(bsize, beam_size).eq(2) # inpute: (bsize, seql, isize) => (bsize * beam_size, seql, isize) inpute = [ inputu.repeat(1, beam_size, 1).view(real_bsize, seql, isize) for inputu in inpute ] # _src_pad_mask: (bsize, 1, seql) => (bsize * beam_size, 1, seql) _src_pad_mask = None if src_pad_mask is None else src_pad_mask.repeat( 1, beam_size, 1).view(real_bsize, 1, seql) # states[i][j]: (bsize, 1, isize) => (bsize * beam_size, 1, isize) states = expand_bsize_for_beam(states, beam_size=beam_size) for step in range(1, max_len): outs = [] for _inum, (model, inputu) in enumerate(zip(self.nets, inpute)): out = model.wemb(wds) * sqrt_isize if model.pemb is not None: out = out + model.pemb.get_pos(step) if model.drop is not None: out = model.drop(out) for _tmp, net in enumerate(model.nets): out, _state = net(inputu, states[_inum][_tmp], _src_pad_mask, None, out) states[_inum][_tmp] = _state if model.out_normer is not None: out = model.out_normer(out) # outs: [(bsize, beam_size, nwd)...] outs.append( model.classifier(out).softmax(dim=-1).view( bsize, beam_size, -1)) out = torch.stack(outs).mean(0).log() # find the top k ** 2 candidates and calculate route scores for them # _scores: (bsize, beam_size, beam_size) # done_trans: (bsize, beam_size) # scores: (bsize, beam_size) # _wds: (bsize, beam_size, beam_size) # mask_from_done_trans: (bsize, beam_size) => (bsize, beam_size * beam_size) # added_scores: (bsize, 1, beam_size) => (bsize, beam_size, beam_size) _scores, _wds = out.topk(beam_size, dim=-1) _scores = (_scores.masked_fill( done_trans.unsqueeze(2).expand(bsize, beam_size, beam_size), 0.0) + scores.unsqueeze(2).expand(bsize, beam_size, beam_size)) if length_penalty > 0.0: lpv.masked_fill_(~done_trans.view(real_bsize, 1), ((step + 6.0)**length_penalty) / lpv_base) # clip from k ** 2 candidate and remain the top-k for each path # scores: (bsize, beam_size * beam_size) => (bsize, beam_size) # _inds: indexes for the top-k candidate (bsize, beam_size) if clip_beam and (length_penalty > 0.0): scores, _inds = (_scores.view(real_bsize, beam_size) / lpv.expand(real_bsize, beam_size)).view( bsize, beam_size2).topk(beam_size, dim=-1) _tinds = (_inds + torch.arange( 0, bsizeb2, beam_size2, dtype=_inds.dtype, device=_inds.device).unsqueeze(1).expand_as(_inds) ).view(real_bsize) sum_scores = _scores.view(bsizeb2).index_select( 0, _tinds).view(bsize, beam_size) else: scores, _inds = _scores.view(bsize, beam_size2).topk(beam_size, dim=-1) _tinds = (_inds + torch.arange( 0, bsizeb2, beam_size2, dtype=_inds.dtype, device=_inds.device).unsqueeze(1).expand_as(_inds) ).view(real_bsize) sum_scores = scores # select the top-k candidate with higher route score and update translation record # wds: (bsize, beam_size, beam_size) => (bsize * beam_size, 1) wds = _wds.view(bsizeb2).index_select(0, _tinds).view(real_bsize, 1) # reduces indexes in _inds from (beam_size ** 2) to beam_size # thus the fore path of the top-k candidate is pointed out # _inds: indexes for the top-k candidate (bsize, beam_size) _inds = ( _inds // beam_size + torch.arange(0, real_bsize, beam_size, dtype=_inds.dtype, device=_inds.device).unsqueeze(1).expand_as(_inds) ).view(real_bsize) # select the corresponding translation history for the top-k candidate and update translation records # trans: (bsize * beam_size, nquery) => (bsize * beam_size, nquery + 1) trans = torch.cat( (trans.index_select(0, _inds), wds.masked_fill(done_trans.view(real_bsize, 1), pad_id) if fill_pad else wds), 1) done_trans = (done_trans.view(real_bsize).index_select(0, _inds) | wds.eq(2).squeeze(1)).view(bsize, beam_size) # check early stop for beam search # done_trans: (bsize, beam_size) # scores: (bsize, beam_size) _done = False if length_penalty > 0.0: lpv = lpv.index_select(0, _inds) elif (not return_all) and all_done(done_trans.select(1, 0), bsize): _done = True # check beam states(done or not) if _done or all_done(done_trans, real_bsize): break # update the corresponding hidden states # states[i][j]: (bsize * beam_size, nquery, isize) # _inds: (bsize, beam_size) => (bsize * beam_size) states = index_tensors(states, indices=_inds, dim=0) # if length penalty is only applied in the last step, apply length penalty if (not clip_beam) and (length_penalty > 0.0): scores = scores / lpv.view(bsize, beam_size) scores, _inds = scores.topk(beam_size, dim=-1) _inds = ( _inds + torch.arange(0, real_bsize, beam_size, dtype=_inds.dtype, device=_inds.device).unsqueeze(1).expand_as(_inds) ).view(real_bsize) trans = trans.view(real_bsize, -1).index_select(0, _inds) if return_all: return trans.view(bsize, beam_size, -1), scores else: return trans.view(bsize, beam_size, -1).select(1, 0)
def greedy_decode_clip(self, inpute, src_pad_mask=None, max_len=512, return_mat=True): bsize = inpute.size(0) sos_emb = self.get_sos_emb(inpute) sqrt_isize = sqrt(sos_emb.size(-1)) # out: input to the decoder for the first step (bsize, 1, isize) out = sos_emb * sqrt_isize if self.pemb is not None: out = out + self.pemb.get_pos(0) if self.drop is not None: out = self.drop(out) states = {} for _tmp, net in enumerate(self.nets): out, _state = net(inpute, ( None, None, ), src_pad_mask, None, out) states[_tmp] = _state if self.out_normer is not None: out = self.out_normer(out) # out: (bsize, 1, nwd) out = self.lsm(self.classifier(out)) # wds: (bsize, 1) wds = out.argmax(dim=-1) mapper = list(range(bsize)) rs = [None for i in range(bsize)] trans = [wds] for i in range(1, max_len): out = self.wemb(wds) * sqrt_isize if self.pemb is not None: out = out + self.pemb.get_pos(i) if self.drop is not None: out = self.drop(out) for _tmp, net in enumerate(self.nets): out, _state = net(inpute, states[_tmp], src_pad_mask, None, out) states[_tmp] = _state if self.out_normer is not None: out = self.out_normer(out) # out: (bsize, 1, nwd) out = self.lsm(self.classifier(out)) wds = out.argmax(dim=-1) trans.append(wds) # done_trans: (bsize) done_trans = wds.squeeze(1).eq(2) _ndone = done_trans.int().sum().item() if _ndone == bsize: for _iu, _tran in enumerate(torch.cat(trans, 1).unbind(0)): rs[mapper[_iu]] = _tran break elif _ndone > 0: _dind = done_trans.nonzero().squeeze(1) _trans = torch.cat(trans, 1) for _iu, _tran in zip(_dind.tolist(), _trans.index_select(0, _dind).unbind(0)): rs[mapper[_iu]] = _tran # reduce bsize for not finished decoding _ndid = (~done_trans).nonzero().squeeze(1) bsize = _ndid.size(0) wds = wds.index_select(0, _ndid) #inpute = inpute.index_select(0, _ndid) self.index_cross_attn_buffer(_ndid) if src_pad_mask is not None: src_pad_mask = src_pad_mask.index_select(0, _ndid) states = index_tensors(states, indices=_ndid, dim=0) trans = list(_trans.index_select(0, _ndid).unbind(1)) # update mapper for _ind, _iu in enumerate(_ndid.tolist()): mapper[_ind] = mapper[_iu] return torch.stack(pad_tensors(rs), 0) if return_mat else rs
def beam_decode_clip(self, inpute, src_pad_mask=None, beam_size=8, max_len=512, length_penalty=0.0, return_mat=True, return_all=False, clip_beam=clip_beam_with_lp): bsize, seql = inpute.size()[:2] beam_size2 = beam_size * beam_size bsizeb2 = bsize * beam_size2 real_bsize = bsize * beam_size sos_emb = self.get_sos_emb(inpute) isize = sos_emb.size(-1) sqrt_isize = sqrt(isize) if length_penalty > 0.0: # lpv: length penalty vector for each beam (bsize * beam_size, 1) lpv = sos_emb.new_ones(real_bsize, 1) lpv_base = 6.0**length_penalty out = sos_emb * sqrt_isize if self.pemb is not None: out = out + self.pemb.get_pos(0) if self.drop is not None: out = self.drop(out) states = {} for _tmp, net in enumerate(self.nets): out, _state = net(inpute, ( None, None, ), src_pad_mask, None, out) states[_tmp] = _state if self.out_normer is not None: out = self.out_normer(out) # out: (bsize, 1, nwd) out = self.lsm(self.classifier(out)) # scores: (bsize, 1, beam_size) => (bsize, beam_size) # wds: (bsize * beam_size, 1) # trans: (bsize * beam_size, 1) scores, wds = out.topk(beam_size, dim=-1) scores = scores.squeeze(1) sum_scores = scores wds = wds.view(real_bsize, 1) trans = wds # done_trans: (bsize, beam_size) done_trans = wds.view(bsize, beam_size).eq(2) # inpute: (bsize, seql, isize) => (bsize * beam_size, seql, isize) #inpute = inpute.repeat(1, beam_size, 1).view(real_bsize, seql, isize) self.repeat_cross_attn_buffer(beam_size) # _src_pad_mask: (bsize, 1, seql) => (bsize * beam_size, 1, seql) _src_pad_mask = None if src_pad_mask is None else src_pad_mask.repeat( 1, beam_size, 1).view(real_bsize, 1, seql) # states[i]: (bsize, 1, isize) => (bsize * beam_size, 1, isize) states = expand_bsize_for_beam(states, beam_size=beam_size) mapper = list(range(bsize)) rs = [None for i in range(bsize)] if return_all: rscore = [None for i in range(bsize)] for step in range(1, max_len): out = self.wemb(wds) * sqrt_isize if self.pemb is not None: out = out + self.pemb.get_pos(step) if self.drop is not None: out = self.drop(out) for _tmp, net in enumerate(self.nets): out, _state = net(inpute, states[_tmp], _src_pad_mask, None, out) states[_tmp] = _state if self.out_normer is not None: out = self.out_normer(out) # out: (bsize, beam_size, nwd) out = self.lsm(self.classifier(out)).view(bsize, beam_size, -1) # find the top k ** 2 candidates and calculate route scores for them # _scores: (bsize, beam_size, beam_size) # done_trans: (bsize, beam_size) # scores: (bsize, beam_size) # _wds: (bsize, beam_size, beam_size) # mask_from_done_trans_u: (bsize, beam_size) => (bsize, beam_size * beam_size) # added_scores: (bsize, 1, beam_size) => (bsize, beam_size, beam_size) _scores, _wds = out.topk(beam_size, dim=-1) _done_trans_unsqueeze = done_trans.unsqueeze(2) _scores = ( _scores.masked_fill( _done_trans_unsqueeze.expand(bsize, beam_size, beam_size), 0.0) + sum_scores.unsqueeze(2).repeat(1, 1, beam_size).masked_fill_( select_zero_(_done_trans_unsqueeze.repeat(1, 1, beam_size), -1, 0), -inf_default)) if length_penalty > 0.0: lpv.masked_fill_(~done_trans.view(real_bsize, 1), ((step + 6.0)**length_penalty) / lpv_base) # clip from k ** 2 candidate and remain the top-k for each path # scores: (bsize, beam_size * beam_size) => (bsize, beam_size) # _inds: indexes for the top-k candidate (bsize, beam_size) if clip_beam and (length_penalty > 0.0): scores, _inds = (_scores.view(real_bsize, beam_size) / lpv.expand(real_bsize, beam_size)).view( bsize, beam_size2).topk(beam_size, dim=-1) _tinds = (_inds + torch.arange( 0, bsizeb2, beam_size2, dtype=_inds.dtype, device=_inds.device).unsqueeze(1).expand_as(_inds) ).view(real_bsize) sum_scores = _scores.view(bsizeb2).index_select( 0, _tinds).view(bsize, beam_size) else: scores, _inds = _scores.view(bsize, beam_size2).topk(beam_size, dim=-1) _tinds = (_inds + torch.arange( 0, bsizeb2, beam_size2, dtype=_inds.dtype, device=_inds.device).unsqueeze(1).expand_as(_inds) ).view(real_bsize) sum_scores = scores # select the top-k candidate with higher route score and update translation record # wds: (bsize, beam_size, beam_size) => (bsize * beam_size, 1) wds = _wds.view(bsizeb2).index_select(0, _tinds).view(real_bsize, 1) # reduces indexes in _inds from (beam_size ** 2) to beam_size # thus the fore path of the top-k candidate is pointed out # _inds: indexes for the top-k candidate (bsize, beam_size) # using "_inds / beam_size" in case old pytorch does not support "//" operation _inds = ( _inds // beam_size + torch.arange(0, real_bsize, beam_size, dtype=_inds.dtype, device=_inds.device).unsqueeze(1).expand_as(_inds) ).view(real_bsize) # select the corresponding translation history for the top-k candidate and update translation records # trans: (bsize * beam_size, nquery) => (bsize * beam_size, nquery + 1) trans = torch.cat((trans.index_select(0, _inds), wds), 1) done_trans = (done_trans.view(real_bsize).index_select(0, _inds) | wds.eq(2).squeeze(1)).view(bsize, beam_size) # check early stop for beam search # done_trans: (bsize, beam_size) # scores: (bsize, beam_size) if length_penalty > 0.0: lpv = lpv.index_select(0, _inds) _done_trans_u = done_trans.sum(1).eq(beam_size) elif return_all: _done_trans_u = done_trans.sum(1).eq(beam_size) else: _done_trans_u = done_trans.select(1, 0) # check beam states(done or not) _ndone = _done_trans_u.int().sum().item() if _ndone == bsize: if (not clip_beam) and (length_penalty > 0.0): scores = scores / lpv.view(bsize, beam_size) scores, _inds = scores.topk(beam_size, dim=-1) _inds = (_inds + torch.arange( 0, real_bsize, beam_size, dtype=_inds.dtype, device=_inds.device).unsqueeze(1).expand_as(_inds) ).view(real_bsize) trans = trans.view(real_bsize, -1).index_select(0, _inds) if return_all: for _iu, (_tran, _score) in enumerate( zip( trans.view(bsize, beam_size, -1).unbind(0), scores.view(bsize, beam_size).unbind(0))): _rid = mapper[_iu] rs[_rid] = _tran rscore[_rid] = _score else: for _iu, _tran in enumerate( trans.view(bsize, beam_size, -1).unbind(0)): rs[mapper[_iu]] = _tran[0] break # update the corresponding hidden states # states[i]: (bsize * beam_size, nquery, isize) # _inds: (bsize, beam_size) => (bsize * beam_size) states = index_tensors(states, indices=_inds, dim=0) if _ndone > 0: _dind = _done_trans_u.nonzero().squeeze(1) _trans = trans.view(bsize, beam_size, -1) _trans_sel = _trans.index_select(0, _dind) if (not clip_beam) and (length_penalty > 0.0): _scores_sel = scores.index_select(0, _dind) / lpv.view( bsize, beam_size).index_select(0, _dind) _sel_bsize = _dind.size(0) _sel_real_bsize = _sel_bsize * beam_size _scores_sel, _inds = _scores_sel.topk(beam_size, dim=-1) _inds = (_inds + torch.arange( 0, _sel_real_bsize, beam_size, dtype=_inds.dtype, device=_inds.device).unsqueeze(1).expand_as(_inds) ).view(_sel_real_bsize) _trans_sel = _trans_sel.view( _sel_real_bsize, -1).index_select(0, _inds).view(_sel_bsize, beam_size, -1) if return_all: for _iu, _tran, _score in zip(_dind.tolist(), _trans_sel.unbind(0), _scores_sel.unbind(0)): _rid = mapper[_iu] rs[_rid] = _tran rscore[_rid] = _score else: for _iu, _tran in zip(_dind.tolist(), _trans_sel.unbind(0)): rs[mapper[_iu]] = _tran[0] # reduce bsize for not finished decoding _ndid = (~_done_trans_u).nonzero().squeeze(1) _bsize = _ndid.size(0) bsizeb2 = _bsize * beam_size2 _real_bsize = _bsize * beam_size wds = wds.view(bsize, beam_size).index_select(0, _ndid).view( _real_bsize, 1) #inpute = inpute.view(bsize, beam_size, seql, isize).index_select(0, _ndid).view(_real_bsize, seql, isize) for _m in self.modules(): if isinstance(layer, ( CrossAttn, MultiHeadAttn, )) and layer.real_iK is not None: layer.real_iK, layer.real_iV = tuple( _vu.view(bsize, beam_size, *list( _vu.size()[1:])).index_select(0, _ndid).view( _real_bsize, *list(_vu.size()[1:])) for _vu in ( layer.real_iK, layer.real_iV, )) if _src_pad_mask is not None: _src_pad_mask = _src_pad_mask.view( bsize, beam_size, 1, seql).index_select(0, _ndid).view(_real_bsize, 1, seql) for k, value in states.items(): states[k] = [ _vu.view(bsize, beam_size, *list(_vu.size()[1:])).index_select( 0, _ndid).view(_real_bsize, *list(_vu.size()[1:])) for _vu in value ] sum_scores = sum_scores.index_select(0, _ndid) trans = _trans.index_select(0, _ndid).view(_real_bsize, -1) if length_penalty > 0.0: lpv = lpv.view(bsize, beam_size).index_select(0, _ndid).view( _real_bsize, 1) done_trans = done_trans.index_select(0, _ndid) bsize, real_bsize = _bsize, _real_bsize # update mapper for _ind, _iu in enumerate(_ndid.tolist()): mapper[_ind] = mapper[_iu] if return_mat: rs = torch.stack(pad_tensors(rs), 0) if return_all: return rs, torch.stack(rscore, 0) else: return rs
def beam_decode(self, inpute, src_pad_mask=None, beam_size=8, max_len=512, length_penalty=0.0, return_all=False, clip_beam=clip_beam_with_lp, fill_pad=False): bsize, seql = inpute.size()[:2] beam_size2 = beam_size * beam_size bsizeb2 = bsize * beam_size2 real_bsize = bsize * beam_size sos_emb = self.get_sos_emb(inpute) isize = sos_emb.size(-1) sqrt_isize = sqrt(isize) if length_penalty > 0.0: # lpv: length penalty vector for each beam (bsize * beam_size, 1) lpv = sos_emb.new_ones(real_bsize, 1) lpv_base = 6.0**length_penalty out = sos_emb * sqrt_isize if self.pemb is not None: out = out + self.pemb.get_pos(0) if self.drop is not None: out = self.drop(out) states = {} for _tmp, net in enumerate(self.nets): out, _state = net(inpute, ( None, None, ), src_pad_mask, None, out) states[_tmp] = _state if self.out_normer is not None: out = self.out_normer(out) # out: (bsize, 1, nwd) out = self.lsm(self.classifier(out)) # scores: (bsize, 1, beam_size) => (bsize, beam_size) # wds: (bsize * beam_size, 1) # trans: (bsize * beam_size, 1) scores, wds = out.topk(beam_size, dim=-1) scores = scores.squeeze(1) sum_scores = scores wds = wds.view(real_bsize, 1) trans = wds # done_trans: (bsize, beam_size) done_trans = wds.view(bsize, beam_size).eq(2) # instead of update inpute: (bsize, seql, isize) => (bsize * beam_size, seql, isize) with the following line, we only update cross-attention buffers. #inpute = inpute.repeat(1, beam_size, 1).view(real_bsize, seql, isize) self.repeat_cross_attn_buffer(beam_size) # _src_pad_mask: (bsize, 1, seql) => (bsize * beam_size, 1, seql) _src_pad_mask = None if src_pad_mask is None else src_pad_mask.repeat( 1, beam_size, 1).view(real_bsize, 1, seql) # states[i]: (bsize, 1, isize) => (bsize * beam_size, 1, isize) states = expand_bsize_for_beam(states, beam_size=beam_size) for step in range(1, max_len): out = self.wemb(wds) * sqrt_isize if self.pemb is not None: out = out + self.pemb.get_pos(step) if self.drop is not None: out = self.drop(out) for _tmp, net in enumerate(self.nets): out, _state = net(inpute, states[_tmp], _src_pad_mask, None, out) states[_tmp] = _state if self.out_normer is not None: out = self.out_normer(out) # out: (bsize, beam_size, nwd) out = self.lsm(self.classifier(out)).view(bsize, beam_size, -1) # find the top k ** 2 candidates and calculate route scores for them # _scores: (bsize, beam_size, beam_size) # done_trans: (bsize, beam_size) # scores: (bsize, beam_size) # _wds: (bsize, beam_size, beam_size) # mask_from_done_trans: (bsize, beam_size) => (bsize, beam_size * beam_size) # added_scores: (bsize, 1, beam_size) => (bsize, beam_size, beam_size) _scores, _wds = out.topk(beam_size, dim=-1) _done_trans_unsqueeze = done_trans.unsqueeze(2) _scores = ( _scores.masked_fill( _done_trans_unsqueeze.expand(bsize, beam_size, beam_size), 0.0) + sum_scores.unsqueeze(2).repeat(1, 1, beam_size).masked_fill_( select_zero_(_done_trans_unsqueeze.repeat(1, 1, beam_size), -1, 0), -inf_default)) if length_penalty > 0.0: lpv.masked_fill_(~done_trans.view(real_bsize, 1), ((step + 6.0)**length_penalty) / lpv_base) # clip from k ** 2 candidate and remain the top-k for each path # scores: (bsize, beam_size * beam_size) => (bsize, beam_size) # _inds: indexes for the top-k candidate (bsize, beam_size) if clip_beam and (length_penalty > 0.0): scores, _inds = (_scores.view(real_bsize, beam_size) / lpv.expand(real_bsize, beam_size)).view( bsize, beam_size2).topk(beam_size, dim=-1) _tinds = (_inds + torch.arange( 0, bsizeb2, beam_size2, dtype=_inds.dtype, device=_inds.device).unsqueeze(1).expand_as(_inds) ).view(real_bsize) sum_scores = _scores.view(bsizeb2).index_select( 0, _tinds).view(bsize, beam_size) else: scores, _inds = _scores.view(bsize, beam_size2).topk(beam_size, dim=-1) _tinds = (_inds + torch.arange( 0, bsizeb2, beam_size2, dtype=_inds.dtype, device=_inds.device).unsqueeze(1).expand_as(_inds) ).view(real_bsize) sum_scores = scores # select the top-k candidate with higher route score and update translation record # wds: (bsize, beam_size, beam_size) => (bsize * beam_size, 1) wds = _wds.view(bsizeb2).index_select(0, _tinds).view(real_bsize, 1) # reduces indexes in _inds from (beam_size ** 2) to beam_size # thus the fore path of the top-k candidate is pointed out # _inds: indexes for the top-k candidate (bsize, beam_size) _inds = ( _inds // beam_size + torch.arange(0, real_bsize, beam_size, dtype=_inds.dtype, device=_inds.device).unsqueeze(1).expand_as(_inds) ).view(real_bsize) # select the corresponding translation history for the top-k candidate and update translation records # trans: (bsize * beam_size, nquery) => (bsize * beam_size, nquery + 1) trans = torch.cat( (trans.index_select(0, _inds), wds.masked_fill(done_trans.view(real_bsize, 1), pad_id) if fill_pad else wds), 1) done_trans = (done_trans.view(real_bsize).index_select(0, _inds) | wds.eq(2).squeeze(1)).view(bsize, beam_size) # check early stop for beam search # done_trans: (bsize, beam_size) # scores: (bsize, beam_size) _done = False if length_penalty > 0.0: lpv = lpv.index_select(0, _inds) elif (not return_all) and all_done(done_trans.select(1, 0), bsize): _done = True # check beam states(done or not) if _done or all_done(done_trans, real_bsize): break # update the corresponding hidden states # states[i]: (bsize * beam_size, nquery, isize) # _inds: (bsize, beam_size) => (bsize * beam_size) states = index_tensors(states, indices=_inds, dim=0) # if length penalty is only applied in the last step, apply length penalty if (not clip_beam) and (length_penalty > 0.0): scores = scores / lpv.view(bsize, beam_size) scores, _inds = scores.topk(beam_size, dim=-1) _inds = ( _inds + torch.arange(0, real_bsize, beam_size, dtype=_inds.dtype, device=_inds.device).unsqueeze(1).expand_as(_inds) ).view(real_bsize) trans = trans.view(real_bsize, -1).index_select(0, _inds) if return_all: return trans.view(bsize, beam_size, -1), scores else: return trans.view(bsize, beam_size, -1).select(1, 0)