Esempio n. 1
0
    def __init__(self,
                 n_uid,
                 n_mid,
                 n_cat,
                 EMBEDDING_DIM,
                 HIDDEN_SIZE,
                 ATTENTION_SIZE,
                 use_negsampling=False):
        super(Model_NARM,
              self).__init__(n_uid, n_mid, n_cat, EMBEDDING_DIM, HIDDEN_SIZE,
                             ATTENTION_SIZE, use_negsampling)
        # RNN layer(-s) - 1
        with tf.name_scope('rnn_1'):
            rnn_outputs1, final_state1 = dynamic_rnn(
                LSTMCell(HIDDEN_SIZE),
                inputs=self.item_his_eb,
                sequence_length=self.seq_len_ph,
                dtype=tf.float32,
                scope="lstm1")
            tf.summary.histogram('LSTM_outputs1', rnn_outputs1)

        # RNN layer(-s) - 2
        with tf.name_scope('rnn_2'):
            rnn_outputs2, final_state2 = dynamic_rnn(
                LSTMCell(HIDDEN_SIZE),
                inputs=self.item_his_eb,
                sequence_length=self.seq_len_ph,
                dtype=tf.float32,
                scope="lstm2")
            tf.summary.histogram('LSTM_outputs2', rnn_outputs2)

        # Attention layer
        with tf.name_scope('Attention_layer'):
            att_outputs, alphas = din_fcn_attention(final_state1[1],
                                                    rnn_outputs2,
                                                    ATTENTION_SIZE,
                                                    self.mask,
                                                    softmax_stag=1,
                                                    stag='1_1',
                                                    mode='LIST',
                                                    return_alphas=True)
            tf.summary.histogram('alpha_outputs', alphas)
            att_fea = tf.reduce_sum(att_outputs, 1)

        inp = tf.concat([final_state1[1], att_fea, self.item_eb], 1)
        self.build_fcn_net(inp, use_dice=True)
Esempio n. 2
0
    def __init__(self, user_number, item_number, cate_number, EMBEDDING_DIM,
                 HIDDEN_SIZE, ATTENTION_SIZE):
        super(Model_NARM,
              self).__init__(user_number, item_number, cate_number,
                             EMBEDDING_DIM, HIDDEN_SIZE, ATTENTION_SIZE)

        # RNN layer(1)
        with tf.name_scope('rnn_1'):
            rnn_outputs1, final_state1 = dynamic_rnn(
                LSTMCell(HIDDEN_SIZE),
                inputs=self.item_history_embedding,
                sequence_length=self.sequence_length,
                dtype=tf.float32,
                scope="lstm_1")
            tf.summary.histogram('LSTM_outputs1', rnn_outputs1)

        # RNN layer(2)
        with tf.name_scope('rnn_2'):
            rnn_outputs2, final_state2 = dynamic_rnn(
                LSTMCell(HIDDEN_SIZE),
                inputs=self.item_history_embedding,
                sequence_length=self.sequence_length,
                dtype=tf.float32,
                scope="lstm_2")
            tf.summary.histogram('LSTM_outputs2', rnn_outputs2)

        # Attention layer
        with tf.name_scope('Attention_layer'):
            att_outputs, alphas = attention_FCN(final_state1[1],
                                                rnn_outputs2,
                                                ATTENTION_SIZE,
                                                self.mask,
                                                softmax_stag=1,
                                                stag='1_1',
                                                mode='LIST',
                                                return_alphas=True)
            tf.summary.histogram('alpha_outputs', alphas)
            att_fea = tf.reduce_sum(att_outputs, 1)

        last_inps = tf.concat(
            [final_state1[1], att_fea, self.target_item_embedding], 1)
        self.fcn_net(last_inps, use_dice=True)
Esempio n. 3
0
    def __init__(self,
                 n_uid,
                 n_mid,
                 n_cat,
                 EMBEDDING_DIM,
                 HIDDEN_SIZE,
                 ATTENTION_SIZE,
                 use_negsampling=False):
        super(Model_LSTMPP,
              self).__init__(n_uid, n_mid, n_cat, EMBEDDING_DIM, HIDDEN_SIZE,
                             ATTENTION_SIZE, use_negsampling)
        # Attention layer
        with tf.name_scope('Attention_layer_1'):
            att_outputs1, alphas1 = attention_HAN(
                self.item_his_eb,
                attention_size=ATTENTION_SIZE,
                return_alphas=True)
            att_fea1 = tf.reduce_sum(att_outputs1, 1)
            tf.summary.histogram('att_fea1', att_fea1)

        # RNN layer(-s)
        with tf.name_scope('rnn_1'):
            rnn_outputs, final_state1 = dynamic_rnn(
                LSTMCell(HIDDEN_SIZE),
                inputs=self.item_his_eb,
                sequence_length=self.seq_len_ph,
                dtype=tf.float32,
                scope="lstm1")
            tf.summary.histogram('LSTM_outputs', rnn_outputs)

        #alpha
        with tf.name_scope('User_alpha'):
            concat_all = tf.concat([
                self.item_eb, att_fea1, final_state1[1],
                tf.expand_dims(self.time_now_his_batch_ph[:, -1], -1)
            ], 1)
            concat_att1 = tf.layers.dense(concat_all,
                                          80,
                                          activation=tf.nn.sigmoid,
                                          name='concat_att1')
            concat_att2 = tf.layers.dense(concat_att1,
                                          40,
                                          activation=tf.nn.sigmoid,
                                          name='concat_att2')
            user_alpha = tf.layers.dense(concat_att2,
                                         1,
                                         activation=tf.nn.sigmoid,
                                         name='concat_att3')
            user_embed = att_fea1 * user_alpha + final_state1[1] * (1.0 -
                                                                    user_alpha)

        inp = tf.concat([self.item_eb, user_embed], 1)
        self.build_fcn_net(inp, use_dice=True)
Esempio n. 4
0
    def __init__(self, user_number, item_number, cate_number, EMBEDDING_DIM,
                 HIDDEN_SIZE, ATTENTION_SIZE):
        super(Model_LSTMPP,
              self).__init__(user_number, item_number, cate_number,
                             EMBEDDING_DIM, HIDDEN_SIZE, ATTENTION_SIZE)

        # Attention layer
        with tf.name_scope('Attention_layer'):
            att_outputs, alphas = attention_HAN(self.item_history_embedding,
                                                attention_size=ATTENTION_SIZE,
                                                return_alphas=True)
            att_fea = tf.reduce_sum(att_outputs, 1)
            tf.summary.histogram('att_fea', att_fea)

        # RNN layer
        with tf.name_scope('rnn'):
            rnn_outputs, final_state = dynamic_rnn(
                LSTMCell(HIDDEN_SIZE),
                inputs=self.item_history_embedding,
                sequence_length=self.sequence_length,
                dtype=tf.float32,
                scope="lstm")
            tf.summary.histogram('LSTM_outputs', rnn_outputs)

        # alpha
        with tf.name_scope('User_alpha'):
            concat_all = tf.concat([
                self.target_item_embedding, att_fea, final_state[1],
                tf.expand_dims(self.timenow_history[:, -1], -1)
            ], 1)
            concat_att1 = tf.layers.dense(concat_all,
                                          80,
                                          activation=tf.nn.sigmoid,
                                          name='concat_att1')
            concat_att2 = tf.layers.dense(concat_att1,
                                          40,
                                          activation=tf.nn.sigmoid,
                                          name='concat_att2')
            user_alpha = tf.layers.dense(concat_att2,
                                         1,
                                         activation=tf.nn.sigmoid,
                                         name='concat_att3')
            user_embed = att_fea * user_alpha + final_state[1] * (1.0 -
                                                                  user_alpha)

        last_inps = tf.concat([self.target_item_embedding, user_embed], 1)
        self.fcn_net(last_inps, use_dice=True)
Esempio n. 5
0
    def __init__(self, user_number, item_number, cate_number, EMBEDDING_DIM,
                 HIDDEN_SIZE, ATTENTION_SIZE):
        super(Model_LSTM,
              self).__init__(user_number, item_number, cate_number,
                             EMBEDDING_DIM, HIDDEN_SIZE, ATTENTION_SIZE)

        # RNN layer
        with tf.name_scope('rnn'):
            rnn_outputs, final_state = dynamic_rnn(
                LSTMCell(HIDDEN_SIZE),
                inputs=self.item_history_embedding,
                sequence_length=self.sequence_length,
                dtype=tf.float32,
                scope="lstm")
            tf.summary.histogram('LSTM_outputs', rnn_outputs)

        last_inps = tf.concat([self.target_item_embedding, final_state[1]], 1)
        self.fcn_net(last_inps, use_dice=False)
Esempio n. 6
0
    def __init__(self,
                 n_uid,
                 n_mid,
                 n_cat,
                 EMBEDDING_DIM,
                 HIDDEN_SIZE,
                 ATTENTION_SIZE,
                 use_negsampling=False):
        super(Model_LSTM,
              self).__init__(n_uid, n_mid, n_cat, EMBEDDING_DIM, HIDDEN_SIZE,
                             ATTENTION_SIZE, use_negsampling)

        # RNN layer(-s)
        with tf.name_scope('rnn_1'):
            rnn_outputs, final_state1 = dynamic_rnn(
                LSTMCell(HIDDEN_SIZE),
                inputs=self.item_his_eb,
                sequence_length=self.seq_len_ph,
                dtype=tf.float32,
                scope="lstm1")
            tf.summary.histogram('LSTM_outputs', rnn_outputs)

        inp = tf.concat([self.item_eb, final_state1[1]], 1)
        self.build_fcn_net(inp, use_dice=True)
 def lstm_cell(self, size):
     return LSTMCell(size, initializer=tf.orthogonal_initializer())