Beispiel #1
0
    def discriminator(self,
                      input,
                      input_step,
                      input_size,
                      hidden_size,
                      output_size,
                      batch_size,
                      reuse=False):
        with tf.variable_scope("discriminator") as scope:
            if reuse:
                scope.reuse_variables()

            # lstm cell and wrap with dropout
            d_lstm_cell = tf.contrib.rnn.BasicLSTMCell(hidden_size,
                                                       forget_bias=0.0,
                                                       state_is_tuple=True)
            d_lstm_cell_1 = tf.contrib.rnn.BasicLSTMCell(hidden_size / 2,
                                                         forget_bias=0.0,
                                                         state_is_tuple=True)

            d_lstm_cell_attention = tf.contrib.rnn.AttentionCellWrapper(
                d_lstm_cell, attn_length=10)
            d_lstm_cell_attention_1 = tf.contrib.rnn.AttentionCellWrapper(
                d_lstm_cell_1, attn_length=10)

            if self.attention == 1:
                d_lstm_cell_drop = tf.contrib.rnn.DropoutWrapper(
                    d_lstm_cell_attention, output_keep_prob=0.9)
                d_lstm_cell_drop_1 = tf.contrib.rnn.DropoutWrapper(
                    d_lstm_cell_attention_1, output_keep_prob=0.9)
            else:
                d_lstm_cell_drop = tf.contrib.rnn.DropoutWrapper(
                    d_lstm_cell, output_keep_prob=0.9)
                d_lstm_cell_drop_1 = tf.contrib.rnn.DropoutWrapper(
                    d_lstm_cell_1, output_keep_prob=0.9)

            d_cell = tf.contrib.rnn.MultiRNNCell(
                [d_lstm_cell_drop, d_lstm_cell_drop_1], state_is_tuple=True)
            d_state_ = d_cell.zero_state(batch_size, tf.float32)

            d_W_o = utils.glorot([input_step * hidden_size / 2, output_size])
            d_b_o = tf.Variable(tf.random_normal([output_size]))

            # neural network
            d_outputs = []
            d_state = d_state_
            for i in range(input_step):
                if i > 0: tf.get_variable_scope().reuse_variables()
                (d_cell_output, d_state) = d_cell(
                    input[:, i, :],
                    d_state)  # cell_out: [batch_size, hidden_size /2]
                d_outputs.append(
                    d_cell_output
                )  # output: shape[input_step][batch_size, hidden_size/2]

            # expend outputs to [batch_size, hidden_size/2 * input_step] and then reshape to [batch_size * input_step, hidden_size/2]
            d_output = tf.reshape(tf.concat(d_outputs, axis=1),
                                  [batch_size, input_step * hidden_size / 2])
            d_y = tf.matmul(d_output, d_W_o) + d_b_o  # d_y, [batch_size, 1]
            return d_y
Beispiel #2
0
    def __init__(self,
                 name='gat_agg',
                 verbose=False,
                 input_dim=None,
                 output_dim=None,
                 act=tf.nn.relu,
                 bias=True,
                 weight=True,
                 dropout=0.,
                 atn_type=1,
                 atn_drop=False):
        super(GATAgg, self).__init__(name=name, verbose=verbose)

        self.input_dim = input_dim
        self.output_dim = output_dim
        self.act = act
        self.bias = bias
        self.weight = weight
        self.dropout = dropout
        self.atn_type = atn_type
        self.atn_drop = dropout if atn_drop else 0.

        with tf.variable_scope(self.name):
            if self.weight:
                self.vars['weights'] = glorot(shape=[input_dim, output_dim],
                                              name='weights')
            else:
                assert input_dim == output_dim

            self.vars['atn_weights_1'] = glorot([output_dim, 1],
                                                name='atn_weights_1')
            self.vars['atn_weights_2'] = glorot([output_dim, 1],
                                                name='atn_weights_2')
            self.vars['atn_bias_1'] = zeros([1], name='atn_bias_1')
            self.vars['atn_bias_2'] = zeros([1], name='atn_bias_2')

            if self.bias:
                self.vars['bias'] = zeros([output_dim], name='bias')

        self._log_vars()
Beispiel #3
0
    def graph_conv_network(self, input, lap, input_step, input_size, batch_size, num_feature):
        # W_conv1 = tf.Variable(tf.truncated_normal([self.num_support, num_feature, num_feature], stddev=0.1))
        W_conv1 = utils.glorot([self.num_support, num_feature, num_feature])
        b_conv1 = tf.Variable(tf.constant(0.1, shape=[self.num_support]))

        input_t = tf.transpose(input, perm=[0, 2, 1])
        input_m = tf.reshape(input_t, [batch_size * input_step, input_size])
        # convolve
        supports = []
        for i in range(self.num_support):
            pre = tf.matmul(self.fea, W_conv1[i])
            support_m = tf.matmul(lap[i], pre) + b_conv1[i]
            supports.append(support_m)
        support_sum = tf.add_n(supports)
        h_conv1_temp = tf.matmul(input_m, support_sum)
        h_conv1 = tf.reshape(h_conv1_temp, [batch_size, input_step, num_feature])
        # h_conv1 = tf.nn.relu(output)
        return h_conv1
Beispiel #4
0
    def __init__(self,
                 name='gcn_agg',
                 verbose=False,
                 input_dim=None,
                 output_dim=None,
                 act=tf.nn.relu,
                 weight=True,
                 dropout=0.):
        super(GCNAgg, self).__init__(name=name, verbose=verbose)

        self.input_dim = input_dim
        self.output_dim = output_dim
        self.act = act
        self.weight = weight
        self.dropout = dropout

        with tf.variable_scope(self.name):
            if self.weight:
                self.vars['weights'] = glorot([input_dim, output_dim],
                                              name='weights')
            self.vars['bias'] = zeros([output_dim], name='bias')

        self._log_vars()
Beispiel #5
0
 def reset_parameters(self):
     glorot(self.weight)
     glorot(self.att)
Beispiel #6
0
 def reset_parameters(self):
     glorot(self.lin.weight)
     zeros(self.lin.bias)
Beispiel #7
0
    def build(self):
        zero_embed = tf.Variable(tf.zeros([1, FLAGS.hidden]), dtype=tf.float32, trainable=False, name='dummy_node')
        embed = glorot([self.n_entity, FLAGS.hidden], name='embed')
        self.embed = tf.concat((zero_embed, embed), axis=0)

        support_size = 1
        self.user_neighbors = [self.users]
        self.item_neighbors = [self.items]
        self.support_sizes = [support_size]
        for i in range(1, len(self.hop_n_sample)):
            n_sample = self.hop_n_sample[i]
            user_hop_i = self.sampler((self.user_neighbors[-1], n_sample))
            item_hop_i = self.sampler((self.item_neighbors[-1], n_sample))
            support_size *= n_sample
            self.user_neighbors.append(tf.reshape(user_hop_i, [self.batch_size * support_size]))
            self.item_neighbors.append(tf.reshape(item_hop_i, [self.batch_size * support_size]))
            self.support_sizes.append(support_size)

        user_hidden = [tf.nn.embedding_lookup(self.embed, hop_i) for hop_i in self.user_neighbors]
        item_hidden = [tf.nn.embedding_lookup(self.embed, hop_i) for hop_i in self.item_neighbors]

        for n_hop in range(len(self.hop_n_sample) - 2, -1, -1):
            agg_param = {
                'input_dim': FLAGS.hidden,
                'output_dim': FLAGS.hidden,
                'act': get_act_func() if n_hop else lambda x: x,
                'weight': n_hop != (len(self.hop_n_sample) - 2),
                'dropout': self.dropout,
            }
            agg = self.agg(**agg_param)

            next_user_hidden = []
            next_item_hidden = []
            last_support_size = 1
            for hop in range(n_hop + 1):
                _shape = [self.batch_size * last_support_size, self.hop_n_sample[hop + 1], FLAGS.hidden]
                user_neigh_hidden = tf.reshape(user_hidden[hop + 1], _shape)
                item_neigh_hidden = tf.reshape(item_hidden[hop + 1], _shape)

                user_h = agg((user_hidden[hop], user_neigh_hidden, self.hop_n_sample[hop + 1]))
                item_h = agg((item_hidden[hop], item_neigh_hidden, self.hop_n_sample[hop + 1]))
                last_support_size *= self.hop_n_sample[hop + 1]
                next_user_hidden.append(user_h)
                next_item_hidden.append(item_h)

            if n_hop == 0:
                neighbor_size = self.hop_n_sample[1] + 1
                hidden_size = FLAGS.hidden
                Nu = tf.concat([tf.expand_dims(user_hidden[0], 1), user_neigh_hidden], axis=1)
                Nv = tf.concat([tf.expand_dims(item_hidden[0], 1), item_neigh_hidden], axis=1)

            user_hidden = next_user_hidden
            item_hidden = next_item_hidden

        Nu = tf.nn.dropout(Nu, 1 - self.dropout)
        Nv = tf.nn.dropout(Nv, 1 - self.dropout)
        logits = tf.reduce_sum(tf.expand_dims(Nu, 2) * tf.expand_dims(Nv, 1), axis=3)
        logits = tf.reshape(logits, [-1, neighbor_size * neighbor_size])
        if self.int_type == 1:
            coefs = tf.nn.softmax(logits / FLAGS.temp)
        elif self.int_type == 2:
            with tf.variable_scope('ni'):
                w1 = glorot([hidden_size, 1], name='atn_weights_1')
                w2 = glorot([hidden_size, 1], name='atn_weights_2')
                b1 = zeros([1], name='atn_bias_1')
                b2 = zeros([1], name='atn_bias_2')
            f1 = tf.reshape(tf.matmul(tf.reshape(Nu, [-1, hidden_size]), w1) + b1, [-1, neighbor_size, 1])
            f2 = tf.reshape(tf.matmul(tf.reshape(Nv, [-1, hidden_size]), w2) + b2, [-1, 1, neighbor_size])
            coefs = tf.nn.softmax(tf.nn.tanh(tf.reshape(f1 + f2, [-1, neighbor_size * neighbor_size])) / FLAGS.temp)

        self.outputs = tf.reduce_sum(logits * coefs, axis=1)
        self.scores = tf.nn.sigmoid(self.outputs)
        self.loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=self.outputs, labels=self.labels))

        self.vars = {var.name: var for var in tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES)}

        if FLAGS.l2_reg > 0:
            for k, v in self.vars.items():
                if ('embed' in k) or ('weight' in k):
                    self.loss += FLAGS.l2_reg * tf.nn.l2_loss(v)

        self.optimizer = get_optimizer(self.opt_param, self.learning_rate)
        self.global_step = tf.train.get_or_create_global_step()
        self.train_op = self.optimizer.minimize(self.loss, global_step=self.global_step)
        self.summary_op = tf.summary.merge_all()