def initialize_state(self, hidden, feature=None, attn_memory=None, attn_mask=None, memory_lengths=None, knowledge=None): if self.feature_size is not None: assert feature is not None if self.attn_mode is not None: assert attn_memory is not None if memory_lengths is not None and attn_mask is None: max_len = attn_memory.size(1) attn_mask = sequence_mask(memory_lengths, max_len).eq(0) init_state = DecoderState( hidden=hidden, feature=feature, attn_memory=attn_memory, attn_mask=attn_mask, knowledge=knowledge, ) return init_state
def initialize_state( self, hidden, attn_memory=None, attn_mask=None, memory_lengths=None, guide_score=None, topic_feature=None, ): """ initialize_state """ if self.attn_mode is not None: assert attn_memory is not None if memory_lengths is not None and attn_mask is None: max_len = attn_memory.size(1) attn_mask = sequence_mask(memory_lengths, max_len).eq(0) init_state = DecoderState( hidden=hidden, attn_memory=attn_memory, attn_mask=attn_mask, topic_feature=topic_feature.unsqueeze(1), # bridge_memory=bridge_memory, guide_score=guide_score, state_vector=None, ) return init_state
def initialize_state(self, hidden, fact, hist, attn_fact=None, attn_hist=None, hist_mask=None, fact_mask=None, fact_lengths=None, hist_lengths=None): """ initialize_state """ if self.attn_mode is not None: assert attn_fact is not None assert attn_hist is not None if hist_lengths is not None and hist_mask is None: max_len = attn_hist.size(1) hist_mask = sequence_mask(hist_lengths, max_len).eq(0) if fact_lengths is not None and fact_mask is None: sent_len = torch.max(fact_lengths) fact_mask = sequence_mask(fact_lengths, sent_len).eq(0).view( fact_lengths.size(0), -1) init_state = DecoderState(hidden=hidden, fact=fact, hist=hist, attn_hist=attn_hist, attn_fact=attn_fact, hist_mask=hist_mask, fact_mask=fact_mask) return init_state
def initialize_state( self, hidden, feature=None, attn_memory=None, attn_mask=None, memory_lengths=None, cue_attn_mask=None, cue_lengths=None, cue_enc_outputs=None, task_id=1, ): """ initialize_state """ if self.feature_size is not None: assert feature is not None if self.attn_mode is not None: assert attn_memory is not None if memory_lengths is not None and attn_mask is None: if task_id == 1: max_len = attn_memory.size( 1 ) # 第二阶段attn_memory(batch_size, src_len, 2*rnn_hidden_size) else: max_len = attn_memory.size( 2 ) # 第一阶段attn_memory(batch_size, sent_num, num_enc_inputs, 2*rnn_hidden_size) attn_mask = sequence_mask(memory_lengths, max_len).eq( 0 ) # 第一阶段attn_mask(batch_size, sent_num, num_enc_inputs) 第二阶段attn_mask(batch_size, num_enc_inputs) if cue_lengths is not None and cue_attn_mask is None: cue_max_len = cue_enc_outputs.size( 1) # cue_enc_outputs(batch_size, cue_len, 2*rnn_hidden_size) cue_attn_mask = sequence_mask(cue_lengths, cue_max_len).eq( 0) # cue_attn_mask(batch_size,max_len-2) init_state = DecoderState(hidden=hidden, feature=feature, attn_memory=attn_memory, attn_mask=attn_mask, cue_attn_mask=cue_attn_mask, cue_enc_outputs=cue_enc_outputs, cue_lengths=cue_lengths, memory_lengths=memory_lengths, task_id=task_id) return init_state
def initialize_state(self, hidden, feature=None, src_enc_outputs=None, src_inputs=None, src_lengths=None, src_mask=None, cue_enc_outputs=None, cue_inputs=None, cue_lengths=None, cue_mask=None, selected_cue_memory=None, selected_cue_length=None, selected_cue_mask=None, knowledge=None): """ initialize_state """ if self.feature_size is not None: assert feature is not None if self.attn_mode is not None: assert src_enc_outputs is not None if src_lengths is not None and src_mask is None: max_len = src_enc_outputs.size(1) src_mask = sequence_mask(src_lengths, max_len).eq(0) if selected_cue_length is not None and selected_cue_mask is None: max_len = selected_cue_memory.size(1) selected_cue_mask = sequence_mask(selected_cue_length, max_len).eq(0) if cue_lengths is not None and cue_mask is None: max_len = cue_enc_outputs.size(2) cue_mask = sequence_mask(cue_lengths, max_len).eq(0) init_state = DecoderState(hidden=hidden, feature=feature, src_enc_outputs=src_enc_outputs, src_inputs=src_inputs, src_mask=src_mask, cue_enc_outputs=cue_enc_outputs, cue_inputs=cue_inputs, cue_mask=cue_mask, selected_cue_memory=selected_cue_memory, selected_cue_mask=selected_cue_mask, knowledge=knowledge) return init_state
def initialize_state(self, hidden, attn_memory=None, input_feed=None, mask=None): """ initialize_state """ if self.attn_mode is not None: assert attn_memory is not None init_state = DecoderState(hidden=hidden, attn_memory=attn_memory, input_feed=input_feed, mask=mask) return init_state
def initialize_state(self, hidden, cue_inputs=None, attn_memory=None, attn_mask=None, memory_lengths=None): """ initialize_state """ if self.attn_mode is not None: assert attn_memory is not None if memory_lengths is not None and attn_mask is None: max_len = attn_memory.size(1) attn_mask = sequence_mask(memory_lengths, max_len).eq(0) init_state = DecoderState( hidden=hidden, cue_inputs=cue_inputs, attn_memory=attn_memory, attn_mask=attn_mask, ) return init_state