def build_model(self): self.ctsm_model = ContinuousLSTM() with tf.variable_scope('ShortTermIntentEncoder', reuse=tf.AUTO_REUSE): time_aware_gru_net_input = tf.concat([ self.behavior_list_embedding_dense, ], axis=2) self.short_term_intent_temp = self.ctsm_model.my_lstm_net( hidden_units=self.num_units, input_data=time_aware_gru_net_input, input_length=tf.add(self.seq_length, -1)) # batch_size, max_len, num_units emb = gather_indexes(batch_size=self.now_bacth_data_size, seq_length=self.max_len, width=self.num_units, sequence_tensor=self.short_term_intent_temp, positions=self.mask_index - 1) #batch_size, num_units self.predict_behavior_emb = layer_norm( emb) # batch_size, num_units self.output()
def build_model(self): self.ctsm_model = ContinuousLSTM() last_time = tf.squeeze(gather_indexes( batch_size=self.now_bacth_data_size, seq_length=self.max_len, width=1, sequence_tensor=self.time_list, positions=self.mask_index - 1), axis=1) h_i, h_i_bar, delta_i = self.get_state() predict_target_lambda_emb = self.cal_ht(h_i, h_i_bar, delta_i, self.target[2] - last_time) self.predict_behavior_emb = predict_target_lambda_emb self.output()
def build_model(self): self.ctsm_model = ContinuousLSTM() self.transformer_model = transformer_encoder() last_time = tf.squeeze(gather_indexes(batch_size=self.now_batch_size, seq_length=self.max_seq_len, width=1, sequence_tensor=self.time_lst, positions=self.mask_index - 1), axis=1) o_i, c_i, c_i_bar, delta_i, h_i_minus = self.get_state( self.target_time_last_lst) predict_target_lambda_emb = self.cal_ht(o_i, c_i, c_i_bar, delta_i, self.target_time - last_time) # sims_time_lst: batch_size, sims_len predict_sims_emb = tf.zeros([self.now_batch_size, 1]) self.test = tf.split(self.sims_time_lst, self.sims_len, 1) sims_time = tf.squeeze(tf.split(self.sims_time_lst, self.sims_len, 1), 2) for i in range(self.sims_len): # 第i个时间 batch_size, num_units cur_sims_emb = self.cal_ht(o_i, c_i, c_i_bar, delta_i, sims_time[i] - last_time) predict_sims_emb = tf.concat([predict_sims_emb, cur_sims_emb], axis=1) predict_sims_emb = predict_sims_emb[:, 1:] # batch_size, sims_len * num_units predict_sims_emb = tf.reshape(predict_sims_emb, [-1, self.sims_len, self.num_units ]) # batch_size, sims_len , num_units self.predict_target_emb = predict_target_lambda_emb # self.predict_sims_emb = predict_sims_emb with tf.variable_scope('prepare_emb'): emb_for_type = self.predict_target_emb emb_for_time = h_i_minus with tf.variable_scope('intensity_calculation', reuse=tf.AUTO_REUSE): intensity_model = nhp_intensity_calculation() self.target_lambda = intensity_model.cal_target_intensity( hidden_emb=self.predict_target_emb, type_num=self.type_num) self.sims_lambda = intensity_model.cal_sims_intensity( hidden_emb=self.predict_sims_emb, sims_len=self.sims_len, type_num=self.type_num) with tf.variable_scope('type_time_calculation', reuse=tf.AUTO_REUSE): time_predictor = thp_time_predictor() self.predict_time = time_predictor.predict_time( emb=emb_for_time, num_units=self.num_units, ) type_predictor = thp_type_predictor() self.predict_type_prob = type_predictor.predict_type( emb=emb_for_type, num_units=self.num_units, type_num=self.type_num) # self.predict_type_prob = tf.matmul(self.predict_type_prob, self.embedding.type_emb_lookup_table[:-3, :], # transpose_b=True) self.output()