def _create_params(self): ### embed self.dict_data_embed_op = {} list_names = self.item_slot_names + self.user_slot_names + ['pos'] for name in list_names: vob_size = self.npz_config['embedding_size'][name] + 1 self.dict_data_embed_op[name] = default_embedding( [vob_size, self.embed_size], 'embed_' + name) self.user_feature_fc_op = default_fc(self.hidden_size, act='relu', name='user_feature_fc') self.item_fc_op = default_fc(self.hidden_size, act='relu', name='item_fc') self.item_gru_fc_op = default_fc(self.hidden_size * 3, act='relu', name='item_gru_fc') self.item_gru_forward_op = default_drnn(self.hidden_size, name='item_gru_forward') self.item_gru_backward_op = default_drnn(self.hidden_size, name='item_gru_backward', is_reverse=True) self.out_click_fc1_op = default_fc(self.hidden_size, act='relu', name='out_click_fc1') self.out_click_fc2_op = default_fc(2, act='softmax', name='out_click_fc2')
def __init__(self, nf, name=''): super(MultiHeadAttention, self).__init__() self._nf = nf self._safe_eps = 1e-5 self.Q_fc_op = default_fc(nf, num_flatten_dims=2, act='relu', name='%s_Q_fc' % name) self.K_fc_op = default_fc(nf, num_flatten_dims=2, act='relu', name='%s_K_fc' % name) self.V_fc_op = default_fc(nf, num_flatten_dims=2, act='relu', name='%s_V_fc' % name)
def _create_params(self): ### embed self.dict_data_embed_op = {} list_names = self.item_slot_names + self.user_slot_names + ['pos'] for name in list_names: vob_size = self.npz_config['embedding_size'][name] + 1 self.dict_data_embed_op[name] = default_embedding( [vob_size, self.embed_size], 'embed_' + name) self.user_feature_fc_op = default_fc(self.hidden_size, act='relu', name='user_feature_fc') self.item_fc_op = default_fc(self.hidden_size, act='relu', name='item_fc') self.atten_item_fc_op = default_fc(self.hidden_size, act='relu', name='atten_item_fc') self.candidate_encode_fc_op = default_fc(self.hidden_size, act='relu', name='candidate_encode_fc') self.item_gru_fc_op = default_fc(self.hidden_size * 3, act='relu', name='item_gru_fc') self.item_gru_op = default_drnn(self.hidden_size, name='item_gru') self.hidden_fc_op = default_fc(self.hidden_size, name='hidden_fc') if self._attention_type == 'concat_fc': self.atten_fc_op = default_fc(1, act=None, name='atten_fc')
def _create_params(self): """create all params here""" ### embed self.dict_data_embed_op = {} list_names = self.item_slot_names + self.user_slot_names + ['pos'] for name in list_names: vob_size = self.npz_config['embedding_size'][name] + 1 self.dict_data_embed_op[name] = default_embedding( [vob_size, self.embed_size], 'embed_' + name) ### embed fc self.user_feature_fc_op = default_fc(self.hidden_size, act='relu', name='user_feature_fc') self.item_fc_op = default_fc(self.hidden_size, act='relu', name='item_fc') self.input_embed_fc_op = default_fc(self.hidden_size, act='relu', name='input_embed_fc') ### blocks self.atten_ops = [] self.atten_norm_ops = [] self.ffn_fc1_ops = [] self.ffn_fc2_ops = [] self.ffn_norm_ops = [] for block_id in range(self._num_blocks): # attention self.atten_ops.append( MultiHeadAttention(self.hidden_size, name="blk%d_atten" % block_id)) self.atten_norm_ops.append( default_batch_norm(name="blk%d_norm" % block_id)) # feed forward self.ffn_fc1_ops.append( default_fc(self.hidden_size * 2, act='relu', name='blk%d_ffn_fc1' % block_id)) self.ffn_fc2_ops.append( default_fc(self.hidden_size, act='relu', name='blk%d_ffn_fc2' % block_id)) self.ffn_norm_ops.append( default_batch_norm(name='blk%d_ffn_norm' % block_id)) ### output self.output_fc1_op = default_fc(self.hidden_size, act='relu', name='output_fc1') self.output_fc2_op = default_fc(2, act='softmax', name='output_fc2')