def RNN(x, weights, biases):
    with variable_scope.variable_scope(
            "other", initializer=init_ops.constant_initializer(0.1)) as vs:
        cell = rnn.MultiRNNCell([
            rnn_cell.LayerNormBasicLSTMCell(n_hidden, layer_norm=False),
            rnn_cell.LayerNormBasicLSTMCell(n_hidden, layer_norm=False)
        ])
        outputs, states = tf.nn.dynamic_rnn(cell, x, dtype=tf.float32)
        return tf.expand_dims(
            tf.matmul(outputs[-1], weights['out'])[-1], 0
        ) + biases['out'], outputs[-1], states, weights['out'], biases['out']
Esempio n. 2
0
def RNN(x, weights, biases):
    with variable_scope.variable_scope(
            "other", initializer=init_ops.constant_initializer(0.1)) as vs:
        fw_cell = rnn_cell.LayerNormBasicLSTMCell(n_hidden, layer_norm=False)
        bw_cell = rnn_cell.LayerNormBasicLSTMCell(n_hidden, layer_norm=False)
        bi_outputs, bi_state = tf.nn.bidirectional_dynamic_rnn(
            fw_cell, bw_cell, x, dtype=float, swap_memory=True)

        return bi_outputs, bi_state, tf.expand_dims(
            tf.concat(bi_outputs, -1)[-1][-1],
            0), tf.matmul(tf.expand_dims(tf.concat(bi_outputs, -1)[-1][-1], 0),
                          weights['out']) + biases['out']
def get_rnn_cell(rnn_cell_size, dropout_prob, n_layers):
    rnn_c = None
    if (n_layers == 1):
        with tf.variable_scope('cells_0'):
            rnn_c = rnn_cell.LayerNormBasicLSTMCell(rnn_cell_size,
                                                    layer_norm=False)
    else:
        cell_list = []
        for i in range(n_layers):
            with tf.variable_scope('cells_{}'.format(i)):
                cell_list.append(
                    rnn_cell.LayerNormBasicLSTMCell(rnn_cell_size,
                                                    layer_norm=False))
        rnn_c = rnn.MultiRNNCell(cell_list)
    return rnn_c
Esempio n. 4
0
  def testBasicLSTMCellWithDropout(self):

    def _is_close(x, y, digits=4):
      delta = x - y
      return delta < 10**(-digits)

    def _is_close_in(x, items, digits=4):
      for i in items:
        if _is_close(x, i, digits):
          return True
      return False

    keep_prob = 0.5
    c_high = 2.9998924946
    c_low = 0.999983298578
    h_low = 0.761552567265
    h_high = 0.995008519604
    num_units = 5
    allowed_low = [2, 3]

    with self.test_session() as sess:
      with variable_scope.variable_scope(
          "other", initializer=init_ops.constant_initializer(1)):
        x = array_ops.zeros([1, 5])
        c = array_ops.zeros([1, 5])
        h = array_ops.zeros([1, 5])
        state = core_rnn_cell_impl.LSTMStateTuple(c, h)
        cell = rnn_cell.LayerNormBasicLSTMCell(
            num_units, layer_norm=False, dropout_keep_prob=keep_prob)

        g, s = cell(x, state)
        sess.run([variables.global_variables_initializer()])
        res = sess.run([g, s], {
            x.name: np.ones([1, 5]),
            c.name: np.ones([1, 5]),
            h.name: np.ones([1, 5]),
        })

        # Since the returned tensors are of size [1,n]
        # get the first component right now.
        actual_h = res[0][0]
        actual_state_c = res[1].c[0]
        actual_state_h = res[1].h[0]

        # For each item in `c` (the cell inner state) check that
        # it is equal to one of the allowed values `c_high` (not
        # dropped out) or `c_low` (dropped out) and verify that the
        # corresponding item in `h` (the cell activation) is coherent.
        # Count the dropped activations and check that their number is
        # coherent with the dropout probability.
        dropped_count = 0
        self.assertTrue((actual_h == actual_state_h).all())
        for citem, hitem in zip(actual_state_c, actual_state_h):
          self.assertTrue(_is_close_in(citem, [c_low, c_high]))
          if _is_close(citem, c_low):
            self.assertTrue(_is_close(hitem, h_low))
            dropped_count += 1
          elif _is_close(citem, c_high):
            self.assertTrue(_is_close(hitem, h_high))
        self.assertIn(dropped_count, allowed_low)
Esempio n. 5
0
def encoding_layer(rnn_cell_size, sequence_len, n_layers, rnn_inputs,
                   dropout_prob):
    for l in range(n_layers):
        with tf.variable_scope('encodings_l_{}'.format(l)):
            with variable_scope.variable_scope(
                    "other",
                    initializer=init_ops.constant_initializer(0.1)) as vs:

                rnn_fw = rnn_cell.LayerNormBasicLSTMCell(rnn_cell_size,
                                                         layer_norm=False)
                rnn_bw = rnn_cell.LayerNormBasicLSTMCell(rnn_cell_size,
                                                         layer_norm=False)
                encoding_output, encoding_state = tf.nn.bidirectional_dynamic_rnn(
                    rnn_fw, rnn_bw, rnn_inputs, sequence_len, dtype=tf.float32)
                encoding_output = tf.concat(encoding_output, 2)
                return encoding_output, encoding_state, rnn_inputs
Esempio n. 6
0
  def testBasicLSTMCellWithStateTuple(self):
    with self.test_session() as sess:
      with variable_scope.variable_scope(
          "root", initializer=init_ops.constant_initializer(0.5)):
        x = array_ops.zeros([1, 2])
        c0 = array_ops.zeros([1, 2])
        h0 = array_ops.zeros([1, 2])
        state0 = core_rnn_cell_impl.LSTMStateTuple(c0, h0)
        c1 = array_ops.zeros([1, 2])
        h1 = array_ops.zeros([1, 2])
        state1 = core_rnn_cell_impl.LSTMStateTuple(c1, h1)
        cell = rnn_cell.LayerNormBasicLSTMCell(2)
        cell = core_rnn_cell_impl.MultiRNNCell([cell] * 2)
        h, (s0, s1) = cell(x, (state0, state1))
        sess.run([variables.global_variables_initializer()])
        res = sess.run([h, s0, s1], {
            x.name: np.array([[1., 1.]]),
            c0.name: 0.1 * np.asarray([[0, 1]]),
            h0.name: 0.1 * np.asarray([[2, 3]]),
            c1.name: 0.1 * np.asarray([[4, 5]]),
            h1.name: 0.1 * np.asarray([[6, 7]]),
        })

        expected_h = np.array([[-0.38079708, 0.38079708]])
        expected_h0 = np.array([[-0.38079708, 0.38079708]])
        expected_c0 = np.array([[-1.0, 1.0]])
        expected_h1 = np.array([[-0.38079708, 0.38079708]])
        expected_c1 = np.array([[-1.0, 1.0]])

        self.assertEqual(len(res), 3)
        self.assertAllClose(res[0], expected_h, 1e-5)
        self.assertAllClose(res[1].c, expected_c0, 1e-5)
        self.assertAllClose(res[1].h, expected_h0, 1e-5)
        self.assertAllClose(res[2].c, expected_c1, 1e-5)
        self.assertAllClose(res[2].h, expected_h1, 1e-5)
Esempio n. 7
0
 def single_cell():
   return rnn_cell.LayerNormBasicLSTMCell(2)
Esempio n. 8
0
  def testBasicLSTMCell(self):
    with self.test_session() as sess:
      with variable_scope.variable_scope(
          "root", initializer=init_ops.constant_initializer(0.5)):
        x = array_ops.zeros([1, 2])
        c0 = array_ops.zeros([1, 2])
        h0 = array_ops.zeros([1, 2])
        state0 = core_rnn_cell_impl.LSTMStateTuple(c0, h0)
        c1 = array_ops.zeros([1, 2])
        h1 = array_ops.zeros([1, 2])
        state1 = core_rnn_cell_impl.LSTMStateTuple(c1, h1)
        state = (state0, state1)
        single_cell = lambda: rnn_cell.LayerNormBasicLSTMCell(2)
        cell = core_rnn_cell_impl.MultiRNNCell([single_cell() for _ in range(2)])
        g, out_m = cell(x, state)
        sess.run([variables.global_variables_initializer()])
        res = sess.run([g, out_m], {
            x.name: np.array([[1., 1.]]),
            c0.name: 0.1 * np.asarray([[0, 1]]),
            h0.name: 0.1 * np.asarray([[2, 3]]),
            c1.name: 0.1 * np.asarray([[4, 5]]),
            h1.name: 0.1 * np.asarray([[6, 7]]),
        })

        expected_h = np.array([[-0.38079708, 0.38079708]])
        expected_state0_c = np.array([[-1.0, 1.0]])
        expected_state0_h = np.array([[-0.38079708, 0.38079708]])
        expected_state1_c = np.array([[-1.0, 1.0]])
        expected_state1_h = np.array([[-0.38079708, 0.38079708]])

        actual_h = res[0]
        actual_state0_c = res[1][0].c
        actual_state0_h = res[1][0].h
        actual_state1_c = res[1][1].c
        actual_state1_h = res[1][1].h

        self.assertAllClose(actual_h, expected_h, 1e-5)
        self.assertAllClose(expected_state0_c, actual_state0_c, 1e-5)
        self.assertAllClose(expected_state0_h, actual_state0_h, 1e-5)
        self.assertAllClose(expected_state1_c, actual_state1_c, 1e-5)
        self.assertAllClose(expected_state1_h, actual_state1_h, 1e-5)

      with variable_scope.variable_scope(
          "other", initializer=init_ops.constant_initializer(0.5)):
        x = array_ops.zeros(
            [1, 3])  # Test BasicLSTMCell with input_size != num_units.
        c = array_ops.zeros([1, 2])
        h = array_ops.zeros([1, 2])
        state = core_rnn_cell_impl.LSTMStateTuple(c, h)
        cell = rnn_cell.LayerNormBasicLSTMCell(2)
        g, out_m = cell(x, state)
        sess.run([variables.global_variables_initializer()])
        res = sess.run([g, out_m], {
            x.name: np.array([[1., 1., 1.]]),
            c.name: 0.1 * np.asarray([[0, 1]]),
            h.name: 0.1 * np.asarray([[2, 3]]),
        })

        expected_h = np.array([[-0.38079708, 0.38079708]])
        expected_c = np.array([[-1.0, 1.0]])
        self.assertEqual(len(res), 2)
        self.assertAllClose(res[0], expected_h, 1e-5)
        self.assertAllClose(res[1].c, expected_c, 1e-5)
        self.assertAllClose(res[1].h, expected_h, 1e-5)
Esempio n. 9
0
def get_rnn_cell(rnn_cell_size, dropout_prob):
    rnn_c = rnn_cell.LayerNormBasicLSTMCell(rnn_cell_size, layer_norm=False)
    #rnn_c = GRUCell(rnn_cell_size)
    #rnn_c = DropoutWrapper(rnn_c, input_keep_prob = dropout_prob)
    return rnn_c
Esempio n. 10
0
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import variables
from tensorflow.contrib.rnn.python.ops import rnn_cell

res = []

with tf.Session() as sess:
    with variable_scope.variable_op_scope(
            name_or_scope="other",
            initializer=init_ops.constant_initializer(0.5)) as vs:
        x = array_ops.zeros([1, 3])
        c = array_ops.zeros([1, 2])
        h = array_ops.zeros([1, 2])
        state = (c, h)
        cell = rnn_cell.LayerNormBasicLSTMCell(2, layer_norm=False)
        g, out_m = cell(x, state)
        sess.run([variables.global_variables_initializer()])
        res = sess.run(
            [g, out_m], {
                x.name: np.array([[
                    1.,
                    1.,
                    1.,
                ]]),
                c.name: 0.1 * np.asarray([[0, 1]]),
                h.name: 0.1 * np.asarray([[2, 3]])
            })

        print(res[1].c)
        print(res[1].h)
Esempio n. 11
0
File: NJM.py Progetto: xy1234552/NJM
    def _init_graph(self):
        time_step = self.train_T

        # Input data.
        self.train_user_id = tf.placeholder(tf.int32,
                                            shape=[None])  # batch_size * 1

        self.train_item_id = tf.placeholder(tf.int32, shape=[None])
        self.train_item_id_list = tf.placeholder(tf.int32,
                                                 shape=[None, time_step])

        self.train_rating_indicator = tf.placeholder(tf.float32,
                                                     shape=[None, time_step])

        self.train_item_attr = tf.placeholder(
            tf.float32, shape=[None, time_step, self.user_node_N])
        self.train_rating_label = tf.placeholder(tf.float32,
                                                 shape=[None, time_step])

        # Test_rating
        self.test_user_id = tf.placeholder(tf.int32, shape=[None])

        self.test_friend_record = tf.placeholder(
            tf.float32, shape=[None, self.user_node_N])
        self.test_item_id = tf.placeholder(tf.int32, shape=[None])
        self.test_item_id_list = tf.placeholder(tf.int32,
                                                shape=[None, time_step + 1])

        self.test_item_attr = tf.placeholder(
            tf.float32, shape=[None, time_step + 1, self.user_node_N])

        self.test_rating_label = tf.placeholder(tf.float32, shape=[None])

        # link prediction
        self.train_predict_link_label = tf.placeholder(
            tf.float32, shape=[None, time_step, self.user_node_N])

        self.train_predict_weight = tf.placeholder(
            tf.float32, shape=[None, time_step, self.user_node_N])

        # test link prediction
        self.link_test_user_id = tf.placeholder(tf.int32, shape=[None])

        #   Variables

        network_weights = self._initialize_weights()
        self.weights, self.biases = network_weights

        # get inint state
        self.ini_social_vector = tf.constant(
            0.0, shape=[self.batch_size, self.embedding_size])
        self.ini_homophily_vector = tf.constant(
            0.0, shape=[self.batch_size, self.embedding_size])

        self.ini_social_matrix = tf.constant(
            0.0, shape=[self.user_node_N, self.embedding_size])
        self.ini_homophily_matrix = tf.constant(
            0.0, shape=[self.user_node_N, self.embedding_size])

        self.one_nodeN = tf.constant(1.0, shape=[self.user_node_N])

        train_item_id_list = tf.reshape(self.train_item_id_list, [-1])
        #   Model

        train_item_attr = tf.reshape(self.train_item_attr,
                                     [-1, self.item_attr_M])
        item_attr_embed = tf.matmul(train_item_attr,
                                    self.weights['item_attr_embeddings'])

        item_rnn_input = tf.reshape(item_attr_embed,
                                    [-1, self.train_T, self.embedding_size])

        item_cell = rnn_cell.LayerNormBasicLSTMCell(self.embedding_size)

        self.item_init_state = item_cell.zero_state(self.batch_size,
                                                    dtype=tf.float32)

        with tf.variable_scope("item_lstm"):
            item_output, item_final_states = tf.nn.dynamic_rnn(
                item_cell,
                item_rnn_input,
                initial_state=self.item_init_state,
                dtype=tf.float32)
            item_output = tf.reshape(item_output, [-1, self.embedding_size])
            item_affine_rating = tf.matmul(
                item_output,
                self.weights['item_rnn_out_rating']) + tf.nn.embedding_lookup(
                    self.biases['item_out_rating'], train_item_id_list)
            item_affine_rating = tf.reshape(
                item_affine_rating, [-1, time_step, self.embedding_size])

        user_latent_vector = tf.nn.embedding_lookup(
            self.weights['user_latent'], self.train_user_id)

        user_latent_promixity = tf.nn.embedding_lookup(
            self.weights['node_proximity'], self.train_user_id)

        consumption_weigh = tf.nn.embedding_lookup(
            self.weights['consumption_balance'], self.train_user_id)

        link_weigh = tf.nn.embedding_lookup(self.weights['link_balance'],
                                            self.train_user_id)

        zero_op = tf.constant(0.0)
        one_op = tf.constant(1.0)

        for t in range(self.train_T):
            if t == 0:

                # -------------------------
                # add hidden layers here

                user_latent_self = user_latent_vector[:, 0, :]

                embed_layer = tf.concat(
                    [user_latent_self, self.ini_social_vector], -1)

                user_output = tf.layers.dense(
                    inputs=embed_layer,
                    units=self.embedding_size,
                    activation=tf.nn.relu,
                    kernel_regularizer=tf.contrib.layers.l2_regularizer(0.1),
                    name='rating_mlp')
                # -------------------------
                # rating prediction

                b_user = tf.nn.embedding_lookup(self.biases['user_static'],
                                                self.train_user_id)
                b_item = tf.nn.embedding_lookup(self.biases['item_static'],
                                                self.train_item_id)
                rating_prediction = tf.multiply(
                    user_output, item_affine_rating[:, t, :]) + tf.multiply(
                        b_user, b_item)
                rating_prediction = tf.reduce_sum(rating_prediction, axis=-1)

                rating_prediction = tf.sigmoid(rating_prediction)
                tf.add_to_collection(
                    "predict_loss",
                    tf.reduce_sum(
                        tf.square(rating_prediction -
                                  self.train_rating_label[:, t]) *
                        self.train_rating_indicator[:, t]))

                # ----------------------------------
                # link prediction

                user_embedding_matrix = tf.concat([
                    self.weights['node_proximity'][:, t, :],
                    self.ini_homophily_matrix
                ], -1)

                link_embed_layer = tf.concat([
                    user_latent_promixity[:, t, :], self.ini_homophily_vector
                ], -1)
                link_embed_layer = tf.layers.dense(
                    inputs=link_embed_layer,
                    units=self.embedding_size,
                    activation=tf.nn.relu,
                    kernel_regularizer=tf.contrib.layers.l2_regularizer(0.1),
                    name='link_mlp')

                link_embedding_matrix = tf.layers.dense(
                    inputs=user_embedding_matrix,
                    units=self.embedding_size,
                    activation=tf.nn.relu,
                    kernel_regularizer=tf.contrib.layers.l2_regularizer(0.1),
                    name='link_mlp',
                    reuse=True)

                link_prediction = tf.matmul(
                    link_embed_layer, link_embedding_matrix,
                    transpose_b=True) + self.biases['link_mlp_embeddings']

                tf.add_to_collection(
                    "predict_loss",
                    self.alphaS * tf.reduce_sum(
                        self.train_predict_weight[:, t] *
                        tf.nn.sigmoid_cross_entropy_with_logits(
                            labels=self.train_predict_link_label[:, t],
                            logits=link_prediction)))

                self.friend_record = self.train_predict_link_label[:, 0, :]
            else:
                user_friend_latent_matrix = tf.multiply(
                    self.weights['transformation'],
                    self.weights['user_latent'][:, t - 1, :])

                node_proximity = tf.matmul(
                    user_latent_promixity[:, t - 1, :],
                    self.weights['node_proximity'][:, t - 1, :],
                    transpose_b=True)

                trust_score = tf.sigmoid(node_proximity)
                trust_score = tf.multiply(self.friend_record, trust_score)
                all = tf.reduce_sum(trust_score, keep_dims=True, axis=-1)
                all_p = all + one_op
                all = tf.where(tf.equal(all, zero_op), all_p, all)
                trust_score = tf.div(trust_score, all)

                user_friend_latent_vector = tf.matmul(
                    trust_score, user_friend_latent_matrix)

                # -------------------------
                # add hidden layers here

                user_latent_self = tf.multiply(
                    consumption_weigh,
                    tf.transpose(user_latent_vector[:, t - 1, :]))
                user_latent_self = tf.transpose(user_latent_self)

                user_friend_latent_vector = tf.multiply(
                    (1 - consumption_weigh),
                    tf.transpose(user_friend_latent_vector))
                user_friend_latent_vector = tf.transpose(
                    user_friend_latent_vector)

                embed_layer = tf.concat(
                    [user_latent_self, user_friend_latent_vector], -1)

                user_output = tf.layers.dense(
                    inputs=embed_layer,
                    units=self.embedding_size,
                    activation=tf.nn.relu,
                    kernel_regularizer=tf.contrib.layers.l2_regularizer(0.1),
                    name='rating_mlp',
                    reuse=True)
                # -------------------------
                # rating prediction

                b_user = tf.nn.embedding_lookup(self.biases['user_static'],
                                                self.train_user_id)
                b_item = tf.nn.embedding_lookup(self.biases['item_static'],
                                                self.train_item_id)
                rating_prediction = tf.multiply(
                    user_output, item_affine_rating[:, t, :]) + tf.multiply(
                        b_user, b_item)
                rating_prediction = tf.reduce_sum(rating_prediction, axis=-1)

                rating_prediction = tf.sigmoid(rating_prediction)
                tf.add_to_collection(
                    "predict_loss",
                    tf.reduce_sum(
                        tf.square(rating_prediction -
                                  self.train_rating_label[:, t]) *
                        self.train_rating_indicator[:, t]))

                # ----------------------------------
                # link prediction

                homo_effect = user_latent_vector[:, t - 1, :]

                node_proximity_by_weight = tf.multiply(
                    link_weigh, tf.transpose(user_latent_promixity[:,
                                                                   t - 1, :]))
                node_proximity_by_weight = tf.transpose(
                    node_proximity_by_weight)

                homo_effect_by_weight = tf.multiply((1 - link_weigh),
                                                    tf.transpose(homo_effect))
                homo_effect_by_weight = tf.transpose(homo_effect_by_weight)

                user_node_matrix = tf.multiply(
                    self.weights['link_balance'],
                    tf.transpose(self.weights['node_proximity'][:, t - 1, :]))
                user_node_matrix = tf.transpose(user_node_matrix)

                user_latent_matrix = tf.multiply(
                    (self.one_nodeN - self.weights['link_balance']),
                    tf.transpose(self.weights['user_latent'][:, t - 1, :]))
                user_latent_matrix = tf.transpose(user_latent_matrix)

                user_embedding_matrix = tf.concat(
                    [user_node_matrix, user_latent_matrix], -1)
                link_embed_layer = tf.concat(
                    [node_proximity_by_weight, homo_effect_by_weight], -1)

                user_embedding_matrix = tf.layers.dense(
                    inputs=user_embedding_matrix,
                    units=self.embedding_size,
                    activation=tf.nn.relu,
                    kernel_regularizer=tf.contrib.layers.l2_regularizer(0.1),
                    name='link_mlp',
                    reuse=True)

                link_embed_layer = tf.layers.dense(
                    inputs=link_embed_layer,
                    units=self.embedding_size,
                    activation=tf.nn.relu,
                    kernel_regularizer=tf.contrib.layers.l2_regularizer(0.1),
                    name='link_mlp',
                    reuse=True)

                link_prediction = tf.matmul(
                    link_embed_layer, user_embedding_matrix,
                    transpose_b=True) + self.biases['link_mlp_embeddings']

                tf.add_to_collection(
                    "predict_loss",
                    self.alphaS * tf.reduce_sum(
                        self.train_predict_weight[:, t] *
                        tf.nn.sigmoid_cross_entropy_with_logits(
                            labels=self.train_predict_link_label[:, t],
                            logits=link_prediction)))

                tf.add_to_collection(
                    "predict_loss",
                    self.alphaU *
                    (tf.reduce_sum(
                        tf.reduce_sum(
                            tf.square(user_output -
                                      user_latent_vector[:, t, :]))) +
                     tf.reduce_sum(
                         tf.reduce_sum(
                             tf.square(user_latent_promixity[:, t - 1, :] -
                                       user_latent_promixity[:, t, :])))))

                self.friend_record = self.friend_record + self.train_predict_link_label[:,
                                                                                        t, :]

        tf.add_to_collection(
            "predict_loss",
            tf.contrib.layers.l2_regularizer(0.1)(self.weights['user_latent']))
        tf.add_to_collection(
            "predict_loss",
            tf.contrib.layers.l2_regularizer(0.01)(
                self.weights['transformation']))
        tf.add_to_collection(
            "predict_loss",
            tf.contrib.layers.l2_regularizer(0.1)(
                self.weights['item_attr_embeddings']))
        tf.add_to_collection(
            "predict_loss",
            tf.contrib.layers.l2_regularizer(0.1)(
                self.weights['item_rnn_out_rating']))
        tf.add_to_collection(
            "predict_loss",
            tf.contrib.layers.l2_regularizer(0.001)(
                self.weights['consumption_balance']))
        tf.add_to_collection(
            "predict_loss",
            tf.contrib.layers.l2_regularizer(0.1)(
                self.weights['node_proximity']))
        tf.add_to_collection(
            "predict_loss",
            tf.contrib.layers.l2_regularizer(0.001)(
                self.weights['link_balance']))

        reg_losses = tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES)

        tf.add_to_collection("predict_loss", tf.reduce_sum(reg_losses))
        self.predict_rating_loss = tf.add_n(tf.get_collection("predict_loss"))

        test_item_attr = tf.reshape(self.test_item_attr,
                                    [-1, self.item_attr_M])
        test_item_attr_embed = tf.matmul(test_item_attr,
                                         self.weights['item_attr_embeddings'])

        test_item_rnn_input = tf.reshape(
            test_item_attr_embed, [-1, self.train_T + 1, self.embedding_size])
        with tf.variable_scope("test_item_lstm"):
            test_item_output, test_item_final_states = tf.nn.dynamic_rnn(
                item_cell,
                test_item_rnn_input,
                initial_state=self.item_init_state,
                dtype=tf.float32)
            test_item_output = test_item_output[:, -1, :]

            test_item_affine_rating = tf.matmul(
                test_item_output,
                self.weights['item_rnn_out_rating']) + tf.nn.embedding_lookup(
                    self.biases['item_out_rating'], self.test_item_id)

        test_user_friend_latent_matrix = tf.multiply(
            self.weights['user_latent'][:, -1, :],
            self.weights['transformation'])

        test_user_latent_vector = tf.nn.embedding_lookup(
            self.weights['user_latent'], self.test_user_id)
        test_user_latent_promixity = tf.nn.embedding_lookup(
            self.weights['node_proximity'], self.test_user_id)

        test_user_latent_vector = test_user_latent_vector[:, -1, :]

        test_node_proximity = tf.matmul(test_user_latent_promixity[:, -1, :],
                                        self.weights['node_proximity'][:,
                                                                       -1, :],
                                        transpose_b=True)
        test_trust_score = tf.sigmoid(test_node_proximity)

        test_trust_score = tf.multiply(test_trust_score,
                                       self.test_friend_record)

        test_all = tf.reduce_sum(test_trust_score, keep_dims=True, axis=-1)
        test_all_p = test_all + one_op
        test_all = tf.where(tf.equal(test_all, zero_op), test_all_p, test_all)
        test_trust_score = tf.div(test_trust_score, test_all)

        test_user_friend_latent_vector = tf.matmul(
            test_trust_score, test_user_friend_latent_matrix)

        test_consumption_weigh = tf.nn.embedding_lookup(
            self.weights['consumption_balance'], self.test_user_id)

        test_social_factor = tf.multiply(
            test_consumption_weigh,
            tf.transpose(test_user_friend_latent_vector))
        test_social_factor = tf.transpose(test_social_factor)
        test_embed_layer = tf.concat(
            [test_user_latent_vector, test_social_factor], -1)

        test_user_output = tf.layers.dense(
            inputs=test_embed_layer,
            units=self.embedding_size,
            activation=tf.nn.relu,
            kernel_regularizer=tf.contrib.layers.l2_regularizer(0.1),
            name='rating_mlp',
            reuse=True)

        test_b_user = tf.nn.embedding_lookup(self.biases['user_static'],
                                             self.test_user_id)
        test_b_item = tf.nn.embedding_lookup(self.biases['item_static'],
                                             self.test_item_id)

        test_rating_prediction = tf.multiply(
            test_user_output, test_item_affine_rating) + tf.multiply(
                test_b_user, test_b_item)
        test_rating_prediction = tf.reduce_sum(test_rating_prediction, axis=-1)

        self.test_rating_prediction = tf.sigmoid(test_rating_prediction)

        link_test_link_weigh = tf.nn.embedding_lookup(
            self.weights['link_balance'], self.link_test_user_id)

        link_test_user_latent_vector = tf.nn.embedding_lookup(
            self.weights['user_latent'], self.link_test_user_id)
        link_test_user_latent_promixity = tf.nn.embedding_lookup(
            self.weights['node_proximity'], self.link_test_user_id)

        link_test_node_proximity = link_test_user_latent_promixity[:, -1, :]

        link_test_homo_effect = link_test_user_latent_vector[:, -1, :]

        link_test_node_proximity_by_weight = tf.multiply(
            link_test_link_weigh, tf.transpose(link_test_node_proximity))
        link_test_node_proximity_by_weight = tf.transpose(
            link_test_node_proximity_by_weight)

        link_test_homo_effect_by_weight = tf.multiply(
            (1 - link_test_link_weigh), tf.transpose(link_test_homo_effect))
        link_test_homo_effect_by_weight = tf.transpose(
            link_test_homo_effect_by_weight)

        # link_test_link_prediction = link_test_node_proximity_by_weight + link_test_homo_effect_by_weight

        link_test_embed_layer = tf.concat([
            link_test_node_proximity_by_weight, link_test_homo_effect_by_weight
        ], -1)
        link_test_embed_layer = tf.layers.dense(
            inputs=link_test_embed_layer,
            units=self.embedding_size,
            activation=tf.nn.relu,
            kernel_regularizer=tf.contrib.layers.l2_regularizer(0.1),
            name='link_mlp',
            reuse=True)

        test_user_node_matrix = tf.multiply(
            self.weights['link_balance'],
            tf.transpose(self.weights['node_proximity'][:, -1, :]))
        test_user_node_matrix = tf.transpose(test_user_node_matrix)

        test_user_latent_matrix = tf.multiply(
            (self.one_nodeN - self.weights['link_balance']),
            tf.transpose(self.weights['user_latent'][:, -1, :]))
        test_user_latent_matrix = tf.transpose(test_user_latent_matrix)

        test_user_embedding_matrix = tf.concat(
            [test_user_node_matrix, test_user_latent_matrix], -1)

        test_user_embedding_matrix = tf.layers.dense(
            inputs=test_user_embedding_matrix,
            units=self.embedding_size,
            activation=tf.nn.relu,
            kernel_regularizer=tf.contrib.layers.l2_regularizer(0.1),
            name='link_mlp',
            reuse=True)

        link_test_link_prediction = tf.matmul(link_test_embed_layer, test_user_embedding_matrix, transpose_b=True) + \
                                    self.biases['link_mlp_embeddings']

        self.link_test_link_prediction = tf.sigmoid(link_test_link_prediction)

        with tf.variable_scope("train"):

            self.predict_rating_optimizer = tf.train.AdamOptimizer(
                learning_rate=0.001, beta1=0.9, beta2=0.999,
                epsilon=1e-8).minimize(self.predict_rating_loss)