Пример #1
0
    def build_model(self):
        num_units = self.FLAGS.num_units

        gru_net_ins = GRU()

        self.sequence_embedding, self.positive_embedding, \
        self.behavior_embedding_result_dense, self.positive_embedding_result_dense, \
        self.mask_index, self.label_ids, \
        self.seq_length, user_embedding, self.time = self.embedding.get_embedding(num_units)


        with tf.variable_scope("EnhanceUserPreferenceIntentEncoder"):

            user_preference_temp = gru_net_ins.gru_net_initial(hidden_units=num_units,
                                                               input_length=self.mask_index,
                                                               input_data=self.behavior_embedding_result_dense,
                                                               initial_state=user_embedding)

            self.user_preference = gather_indexes(batch_size=self.now_bacth_data_size,
                                                  seq_length=self.FLAGS.max_len,
                                                  width=self.FLAGS.num_units,
                                                  sequence_tensor=user_preference_temp,
                                                  positions=tf.add(self.mask_index, -1))

        with tf.variable_scope("OutputLayer"):

            self.predict_behavior_emb = layer_norm(self.user_preference)

            # self.mf_auc = tf.reduce_mean(tf.to_float((tf.reduce_sum(tf.multiply(tf.expand_dims(self.predict_behavior_emb, 1),
            #                                                                     tf.expand_dims(self.positive_embedding_result_dense, 1) - self.negative_embedding_result_dense), 2)) > 0))


            l2_norm = tf.add_n([
                tf.nn.l2_loss(self.sequence_embedding),
                tf.nn.l2_loss(self.positive_embedding)
            ])
            regulation_rate = self.FLAGS.regulation_rate

            item_lookup_table_T = tf.transpose(self.embedding.item_emb_lookup_table)
            logits = tf.matmul(self.predict_behavior_emb, item_lookup_table_T)
            log_probs = tf.nn.log_softmax(logits)
            label_ids = tf.reshape(self.label_ids, [-1])
            one_hot_labels = tf.one_hot(label_ids, depth=self.embedding.item_count+3, dtype=tf.float32)
            self.loss_origin = -tf.reduce_sum(log_probs * one_hot_labels, axis=[-1])
            lstur_loss = regulation_rate * l2_norm + tf.reduce_mean(self.loss_origin)

        with tf.name_scope("LearningtoRankLoss"):
            self.loss = lstur_loss
            tf.summary.scalar("l2_norm", l2_norm)
            tf.summary.scalar("Training Loss", self.loss)
            tf.summary.scalar("Learning_rate", self.learning_rate)

        trainable_params = tf.trainable_variables()
        gradients = tf.gradients(self.loss, trainable_params)

        clip_gradients, _ = tf.clip_by_global_norm(gradients, self.FLAGS.max_gradient_norm)

        self.train_op = self.opt.apply_gradients(zip(clip_gradients, trainable_params))

        self.summery()
Пример #2
0
class LSTUR(Hybird_Baseline_Models):
    def build_model(self):
        self.gru_net_ins = GRU()
        with tf.variable_scope('ShortTermIntentEncoder'):
            self.short_term_intent_temp = self.gru_net_ins.gru_net_initial(hidden_units=self.num_units,
                                                                           initial_state=self.user_embedding,
                                                                              input_data=self.behavior_list_embedding_dense,
                                                                              input_length=tf.add(self.seq_length, -1))
            self.short_term_intent = gather_indexes(batch_size=self.now_bacth_data_size,
                                                    seq_length=self.max_len,
                                                    width=self.num_units,
                                                    sequence_tensor=self.short_term_intent_temp,
                                                    positions=self.mask_index - 1)
            self.predict_behavior_emb = layer_norm(self.short_term_intent)
        self.output()
Пример #3
0
    def build_model(self):
        num_units = self.FLAGS.num_units
        num_heads = self.FLAGS.num_heads
        num_blocks = self.FLAGS.num_blocks
        dropout_rate = self.FLAGS.dropout

        attention_net = Attention()
        gru_net_ins = GRU()

        self.sequence_embedding, self.positive_embedding, self.negative_embedding, \
        self.behavior_embedding_result_dense, self.positive_embedding_result_dense, \
        self.negative_embedding_result_dense, self.mask_index, self.label_ids, \
        self.seq_length, user_embedding, time = self.embedding.get_embedding(num_units)

        with tf.variable_scope("LongTermIntentEncoder"):
            long_term_intent_temp = attention_net.self_attention(
                enc=self.behavior_embedding_result_dense,
                num_units=128,
                num_heads=num_heads,
                num_blocks=num_blocks,
                dropout_rate=dropout_rate,
                is_training=True,
                reuse=False,
                key_length=self.seq_length,
                query_length=self.seq_length)
            self.long_term_intent = gather_indexes(
                batch_size=self.now_bacth_data_size,
                seq_length=self.FLAGS.max_len,
                width=self.FLAGS.num_units,
                sequence_tensor=long_term_intent_temp,
                positions=self.mask_index)

            # average pooling for long_term_intent_temp
            self.long_term_preference = tf.reduce_mean(long_term_intent_temp,
                                                       axis=1)

        with tf.variable_scope('ShortTermIntentEncoder'):
            short_term_intent_temp = gru_net_ins.gru_net(
                hidden_units=num_units,
                input_data=self.behavior_embedding_result_dense,
                input_length=self.mask_index)
            self.short_term_intent = gather_indexes(
                batch_size=self.now_bacth_data_size,
                seq_length=self.FLAGS.max_len,
                width=self.FLAGS.num_units,
                sequence_tensor=short_term_intent_temp,
                positions=tf.add(self.mask_index, -1))

        with tf.variable_scope("EnhancePreferenceIntentEncoder"):
            user_enhance_preference_temp = gru_net_ins.gru_net_initial(
                hidden_units=num_units,
                input_length=self.mask_index,
                input_data=long_term_intent_temp,
                initial_state=self.long_term_preference)
            self.user_enhance_preference = gather_indexes(
                batch_size=self.now_bacth_data_size,
                seq_length=self.FLAGS.max_len,
                width=self.FLAGS.num_units,
                sequence_tensor=user_enhance_preference_temp,
                positions=tf.add(self.mask_index, -1))
        with tf.variable_scope("EnhanceUserPreferenceIntentEncoder"):
            user_enhance_preference_temp_user = gru_net_ins.gru_net_initial(
                hidden_units=num_units,
                input_length=self.mask_index,
                input_data=long_term_intent_temp,
                initial_state=user_embedding)
            self.user_enhance_preference_user = gather_indexes(
                batch_size=self.now_bacth_data_size,
                seq_length=self.FLAGS.max_len,
                width=self.FLAGS.num_units,
                sequence_tensor=user_enhance_preference_temp_user,
                positions=tf.add(self.mask_index, -1))

        with tf.variable_scope("PreferenceEncoder"):
            user_preference_temp = gru_net_ins.gru_net(
                hidden_units=num_units,
                input_data=long_term_intent_temp,
                input_length=self.mask_index)
            self.user_preference = gather_indexes(
                batch_size=self.now_bacth_data_size,
                seq_length=self.FLAGS.max_len,
                width=self.FLAGS.num_units,
                sequence_tensor=user_preference_temp,
                positions=tf.add(self.mask_index, -1))

        with tf.variable_scope("OutputLayer"):
            self.predict_behavior_emb = self.user_enhance_preference_user

            self.predict_behavior_emb = layer_norm(self.predict_behavior_emb)

            self.mf_auc = tf.reduce_mean(
                tf.to_float((tf.reduce_sum(
                    tf.multiply(
                        tf.expand_dims(self.predict_behavior_emb, 1),
                        tf.expand_dims(self.positive_embedding_result_dense, 1)
                        - self.negative_embedding_result_dense), 2)) > 0))

            l2_norm = tf.add_n([
                tf.nn.l2_loss(self.sequence_embedding),
                tf.nn.l2_loss(self.positive_embedding),
                tf.nn.l2_loss(self.negative_embedding)
            ])
            regulation_rate = self.FLAGS.regulation_rate

            item_lookup_table_T = tf.transpose(
                self.embedding.item_emb_lookup_table)
            logits = tf.matmul(self.predict_behavior_emb, item_lookup_table_T)
            log_probs = tf.nn.log_softmax(logits)
            label_ids = tf.reshape(self.label_ids, [-1])
            one_hot_labels = tf.one_hot(label_ids,
                                        depth=500000,
                                        dtype=tf.float32)
            self.loss_origin = -tf.reduce_sum(log_probs * one_hot_labels,
                                              axis=[-1])
            lstur_loss = regulation_rate * l2_norm + tf.reduce_mean(
                self.loss_origin)

        with tf.name_scope("LearningtoRankLoss"):
            self.loss = lstur_loss
            tf.summary.scalar("l2_norm", l2_norm)
            tf.summary.scalar("Training Loss", self.loss)
            tf.summary.scalar("Learning_rate", self.learning_rate)

        trainable_params = tf.trainable_variables()
        gradients = tf.gradients(self.loss, trainable_params)

        clip_gradients, _ = tf.clip_by_global_norm(
            gradients, self.FLAGS.max_gradient_norm)

        self.train_op = self.opt.apply_gradients(
            zip(clip_gradients, trainable_params))

        self.summery()