예제 #1
0
    def __init__(self,
                 input_dim,
                 output_dim,
                 placeholders,
                 dropout=0.,
                 act=tf.nn.relu,
                 bias=False,
                 **kwargs):
        super(NTN, self).__init__(**kwargs)

        if dropout:
            self.dropout = placeholders['dropout']
        else:
            self.dropout = 0.

        self.act = act
        self.support = placeholders['support']
        self.bias = bias
        self.input_dim = input_dim
        self.output_dim = output_dim

        # helper variable for sparse dropout
        #self.num_features_nonzero = placeholders['num_features_nonzero']

        with tf.variable_scope(self.name + '_vars'):

            self.vars['W'] = glorot([output_dim, input_dim, input_dim],
                                    name='W')
            self.vars['V'] = glorot([output_dim, 2 * input_dim, 1], name='V')
            if self.bias:
                self.vars['bias'] = zeros([output_dim], name='bias')

        if self.logging:
            self._log_vars()
예제 #2
0
    def __init__(self, input_dim, output_dim, neigh_input_dim=None,
            dropout=0., bias=False, act=tf.nn.relu, 
            name=None, concat=False, **kwargs):
        super(MeanAggregator, self).__init__(**kwargs)

        self.dropout = dropout
        self.bias = bias
        self.act = act
        self.concat = concat

        if neigh_input_dim is None:
            neigh_input_dim = input_dim

        if name is not None:
            name = '/' + name
        else:
            name = ''

        with tf.variable_scope(self.name + name + '_vars'):
            self.vars['neigh_weights'] = glorot([neigh_input_dim, output_dim],
                                                        name='neigh_weights')
            self.vars['self_weights'] = glorot([input_dim, output_dim],
                                                        name='self_weights')
            if self.bias:
                self.vars['bias'] = zeros([self.output_dim], name='bias')

        if self.logging:
            self._log_vars()

        self.input_dim = input_dim
        self.output_dim = output_dim
예제 #3
0
    def __init__(self, input_dim, output_dim, neigh_input_dim=None,
            dropout=0, bias=True, act=tf.nn.relu,
            name=None, concat=False, mode="train", **kwargs):
        super(MeanAggregator, self).__init__(**kwargs)

        self.dropout = dropout
        self.bias = bias
        self.act = act
        self.concat = concat
        self.mode = mode

        if name is not None:
            name = '/' + name
        else:
            name = ''

        if neigh_input_dim == None:
            neigh_input_dim = input_dim

        if concat:
            self.output_dim = 2 * output_dim

        with tf.variable_scope(self.name + name + '_vars'):
            self.vars['neigh_weights'] = glorot([neigh_input_dim, output_dim],
                                                name='neigh_weights')
            self.vars['self_weights'] = glorot([input_dim, output_dim],
                                               name='self_weights')
            if self.bias:
                self.vars['bias'] = zeros([self.output_dim], name='bias')

        self.input_dim = input_dim
        self.output_dim = output_dim
예제 #4
0
    def __init__(self,
                 input_dim,
                 output_dim,
                 model_size="small",
                 neigh_input_dim=None,
                 dropout=0.,
                 bias=True,
                 act=tf.nn.relu,
                 name=None,
                 concat=False,
                 **kwargs):
        super(MaxPoolingAggregator, self).__init__(**kwargs)

        self.dropout = dropout
        self.bias = bias
        self.act = act
        self.concat = concat

        if name is not None:
            name = '/' + name
        else:
            name = ''

        if neigh_input_dim == None:
            neigh_input_dim = input_dim

        if concat:
            self.output_dim = 2 * output_dim

        if model_size == "small":
            hidden_dim = self.hidden_dim = 50
        elif model_size == "big":
            hidden_dim = self.hidden_dim = 50

        self.mlp_layers = []
        self.mlp_layers.append(
            Dense(input_dim=neigh_input_dim,
                  output_dim=hidden_dim,
                  act=tf.nn.relu,
                  dropout=dropout,
                  sparse_inputs=False,
                  logging=self.logging))

        with tf.variable_scope(self.name + name + '_vars'):

            self.vars['neigh_weights'] = glorot([hidden_dim, output_dim],
                                                name='neigh_weights')

            self.vars['self_weights'] = glorot([input_dim, output_dim],
                                               name='self_weights')

            if self.bias:
                self.vars['bias'] = zeros([self.output_dim], name='bias')

        self.input_dim = input_dim
        self.output_dim = output_dim
        self.neigh_input_dim = neigh_input_dim
예제 #5
0
파일: layers.py 프로젝트: longyahui/GCATSL
def sp_attn_head(seq,
                 out_sz,
                 adj_mat_local,
                 adj_mat_global,
                 activation,
                 in_drop=0.0,
                 coef_drop=0.0,
                 residual=False):
    with tf.name_scope('my_attn'):
        if in_drop != 0.0:
            seq = tf.nn.dropout(seq, 1.0 - in_drop)
        seq_fts = seq

        latent_factor_size = 8
        nb_nodes = seq_fts.shape[1].value

        w_1 = glorot([seq_fts.shape[2].value, latent_factor_size])
        w_2 = glorot([3 * seq_fts.shape[2].value, latent_factor_size])

        f_1 = tf.layers.conv1d(seq_fts, 1, 1)
        f_2 = tf.layers.conv1d(seq_fts, 1, 1)

        #local neighbours
        logits = tf.add(f_1[0], tf.transpose(f_2[0]))
        logits_first = adj_mat_local * logits
        lrelu = tf.SparseTensor(indices=logits_first.indices,
                                values=tf.nn.leaky_relu(logits_first.values),
                                dense_shape=logits_first.dense_shape)
        coefs = tf.sparse_softmax(lrelu)

        coefs = tf.sparse_reshape(coefs, [nb_nodes, nb_nodes])
        seq_fts = tf.squeeze(seq_fts)
        neigh_embs = tf.sparse.sparse_dense_matmul(coefs, seq_fts)

        #non-local neighbours
        logits_global = adj_mat_global * logits
        lrelu_global = tf.SparseTensor(indices=logits_global.indices,
                                       values=tf.nn.leaky_relu(
                                           logits_global.values),
                                       dense_shape=logits_global.dense_shape)
        coefs_global = tf.sparse_softmax(lrelu_global)

        coefs_global = tf.sparse_reshape(coefs_global, [nb_nodes, nb_nodes])
        neigh_embs_global = tf.sparse.sparse_dense_matmul(
            coefs_global, seq_fts)

        neigh_embs_sum_1 = tf.matmul(
            tf.add(tf.add(seq_fts, neigh_embs), neigh_embs_global), w_1)
        neigh_embs_sum_2 = tf.matmul(
            tf.concat(
                [tf.concat([seq_fts, neigh_embs], axis=-1), neigh_embs_global],
                axis=-1), w_2)

        final_embs = activation(neigh_embs_sum_1) + activation(
            neigh_embs_sum_2)

        return final_embs
예제 #6
0
 def decoder_revised(embed):
     num_nodes = embed.shape[0].value
     embed_size = embed.shape[1].value
     with tf.compat.v1.variable_scope("deco_revised"):
         weight1 = glorot([embed_size, embed_size])
         weight2 = glorot([embed_size, embed_size])
         bias = glorot([num_nodes, embed_size])
     embedding = tf.add(tf.matmul(embed, weight1), bias)
     logits = tf.matmul(tf.matmul(embedding, weight2),
                        tf.transpose(embedding))
     logits = tf.reshape(logits, [-1, 1])
     return tf.nn.sigmoid(logits)
예제 #7
0
def gcn_layer(embed, inter_mat):

    node_size = embed.shape[1].value
    embed_size = embed.shape[2].value
    latent_factor_size = 64
    with tf.compat.v1.variable_scope("enco_second"):
        weight4 = glorot([embed_size, latent_factor_size])
        weight5 = glorot([node_size, latent_factor_size])

    con = tf.matmul(inter_mat[0], embed[0])
    hidden = tf.add(tf.matmul(con, weight4), weight5)
    hidden = hidden[tf.newaxis]

    return tf.nn.relu(hidden)
예제 #8
0
    def __init__(self,
                 input_dim,
                 output_dim,
                 model_size="small",
                 neigh_input_dim=None,
                 dropout=0.,
                 bias=True,
                 act=tf.nn.relu,
                 name=None,
                 concat=False,
                 mode="train",
                 **kwargs):
        super(SeqAggregator, self).__init__(**kwargs)

        self.dropout = dropout
        self.bias = bias
        self.act = act
        self.concat = concat
        self.mode = mode
        self.output_dim = output_dim

        if neigh_input_dim is None:
            neigh_input_dim = input_dim

        if name is not None:
            name = '/' + name
        else:
            name = ''

        if model_size == "small":
            hidden_dim = self.hidden_dim = 128
        elif model_size == "big":
            hidden_dim = self.hidden_dim = 256

        with tf.variable_scope(self.name + name + '_vars'):
            self.vars['neigh_weights'] = glorot([hidden_dim, output_dim],
                                                name='neigh_weights')

            self.vars['self_weights'] = glorot([input_dim, output_dim],
                                               name='self_weights')
            if self.bias:
                self.vars['bias'] = zeros([self.output_dim], name='bias')

        if self.logging:
            self._log_vars()

        self.input_dim = input_dim
        self.output_dim = output_dim
        self.neigh_input_dim = neigh_input_dim
        self.cell = tf.contrib.rnn.BasicLSTMCell(self.hidden_dim)
예제 #9
0
    def __init__(self,
                 placeholders,
                 features,
                 dict_size,
                 degree_permuted,
                 rpr_matrix,
                 rpr_arg,
                 dropout=0.,
                 nodevec_dim=200,
                 lr=0.001,
                 only_f=False,
                 **kwargs):
        """
		Aggregate feature informations of the neighbors of the current node,
		weighted by Rooted PageRank vector of the current node.
		"""

        super(AggregateModel, self).__init__(**kwargs)

        self.placeholders = placeholders
        self.degrees = degree_permuted
        self.only_f = only_f
        self.rpr_arg = tf.Variable(tf.constant(rpr_arg, dtype=tf.int64),
                                   trainable=False)
        self.rpr_matrix = tf.Variable(tf.constant(rpr_matrix,
                                                  dtype=tf.float32),
                                      trainable=False)
        self.dropout = dropout
        self.feature_dim = features.shape[1]
        self.features = tf.Variable(tf.constant(features, dtype=tf.float32),
                                    trainable=False)
        self.train_inputs = placeholders["train_inputs"]
        self.train_labels = placeholders["train_labels"]
        self.batchsize = placeholders["batchsize"]
        self.dim = dict_size
        self.nodevec_dim = nodevec_dim

        self.embeddings = inits.glorot([dict_size, nodevec_dim],
                                       name="embeddings")
        self.nce_weights = inits.glorot([dict_size, nodevec_dim],
                                        name="nce_weights")

        self.aggregator_t = WeightedAggregator(self.feature_dim,
                                               self.nodevec_dim,
                                               dropout=self.dropout,
                                               name='true_agg')
        self.optimizer = tf.train.AdamOptimizer(learning_rate=lr)

        self.build()
예제 #10
0
    def __init__(self,
                 input_dim,
                 output_dim,
                 placeholders,
                 dropout=0.,
                 sparse_inputs=False,
                 act=tf.nn.relu,
                 bias=True):

        self.vars = {}
        if dropout:
            self.dropout = placeholders['dropout']
        else:
            self.dropout = 0.
        self.act = act

        self.sparse_inputs = sparse_inputs
        self.bias = bias

        # helper variable for sparse dropout

        # len(self.support) is asserted to be 1 since there is only one weight matrix per layer
        # initialize the weight matrices
        # the variables are found under the key tf.GlobalKeys.GLOBAL_VARIABLES
        self.vars['weights_' + str(0)] = inits.glorot([input_dim, output_dim],
                                                      name='weights_' + str(0))

        # initialize the biases as 0 matrices of correct shapes
        if self.bias:
            self.vars['bias'] = inits.zeros([output_dim], name='bias')
예제 #11
0
    def __init__(self,
                 input_dim,
                 output_dim,
                 dropout=0.,
                 bias=False,
                 hidden_dim=512,
                 act=tf.nn.relu,
                 name=None,
                 **kwargs):
        super(WeightedAggregator, self).__init__(**kwargs)

        self.dropout = dropout
        self.bias = bias
        self.act = act
        self.hidden_dim = hidden_dim

        if name is not None:
            name = '/' + name
        else:
            name = ''

        with tf.variable_scope(self.name + name + '_vars'):
            self.vars['mlp_weights'] = glorot([input_dim, output_dim],
                                              name='mlp_weights')
            tf.summary.histogram("mlp_weights", self.vars['mlp_weights'])
            if self.bias:
                self.vars['bias'] = zeros([output_dim], name='bias')
                tf.summary.histogram("bias", self.vars['bias'])
        if self.logging:
            self._log_vars()

        self.input_dim = input_dim
        self.output_dim = output_dim
예제 #12
0
    def __init__(self,
                 input_dim,
                 placeholders,
                 dropout=0.,
                 act=tf.nn.relu,
                 bias=False,
                 **kwargs):
        super(SplitAndAttentionPooling, self).__init__(**kwargs)

        if dropout:
            self.dropout = placeholders['dropout']
        else:
            self.dropout = 0.

        self.act = act
        self.support = placeholders['support']
        self.bias = bias
        # the output dimension is same as input dimension
        self.output_dim = input_dim
        # helper variable for sparse dropout
        #self.num_features_nonzero = placeholders['num_features_nonzero']

        with tf.variable_scope(self.name + '_vars'):

            self.vars['weights'] = glorot([input_dim, input_dim],
                                          name='weights')
            if self.bias:
                self.vars['bias'] = zeros([input_dim], name='bias')

        if self.logging:
            self._log_vars()
예제 #13
0
    def __init__(self,
                 input_dim,
                 output_dim,
                 dropout=0.0,
                 sparse_inputs=False,
                 act=tf.nn.relu,
                 bias=False,
                 **kwargs):
        super(Dense, self).__init__(**kwargs)

        self.dropout = dropout
        self.act = act
        self.sparse_inputs = sparse_inputs
        self.bias = bias
        self.input_dim = input_dim

        #helper variable for sparse dropout
        self.num_features_nonzero = input_dim

        with tf.variable_scope("{}_vars".format(self.name)):
            self.vars['weights'] = glorot([input_dim, output_dim],\
                                            name='weights')
            if self.bias:
                self.vars["bias"] = zeros([output_dim], name="bias")

        if self.logging:
            self._log_vars()
예제 #14
0
    def __init__(self,
                 in_channels,
                 out_channels,
                 heads=1,
                 concat=True,
                 negative_slope=0.2,
                 dropout=0,
                 bias=True,
                 agg="add",
                 **kwargs):
        super(GATConv_Modified, self).__init__(aggr=agg, **kwargs)

        self.in_channels = in_channels
        self.out_channels = out_channels
        self.heads = heads
        self.concat = concat
        self.negative_slope = negative_slope
        self.dropout = dropout

        self.weight = Parameter(torch.Tensor(in_channels,
                                             heads * out_channels))
        self.att = Parameter(torch.Tensor(1, heads, 2 * out_channels))

        if bias and concat:
            self.bias = Parameter(torch.Tensor(heads * out_channels))
        elif bias and not concat:
            self.bias = Parameter(torch.Tensor(out_channels))
        else:
            self.register_parameter('bias', None)

        self.reset_parameters()
        self.alphadash = glorot(Parameter(torch.Tensor(in_channels, heads)))
예제 #15
0
    def gcn_layer_gen(self,
                      _input,
                      input_dim,
                      output_dim,
                      layer_name,
                      act_fun,
                      sparse_input=False,
                      record_vars=False):
        """
        GCN层计算图
        """
        with tf.variable_scope(layer_name):
            THWs = []

            if sparse_input:  #【如果使用了稀疏矩阵,则需去除特征向量中的空值】
                H = utils.sparse_dropout(
                    _input, 1 - self.dropout,
                    self.num_features_nonzero)  #【去除含空值之后维度不会出问题吗?】
            else:
                H = tf.nn.dropout(_input, 1 - self.dropout)

            for i, suport in enumerate(self.supports):
                W = inits.glorot([input_dim, output_dim],
                                 name=f'{layer_name}_weight_{i}')
                #【第一轮:名为:layer1 W为(1433*16) 第二轮:名为:layer2 W为(16*7)】
                if record_vars:
                    self.vars.append(W)
                HW = utils.dot(H, W, sparse=sparse_input)
                THW = utils.dot(suport, HW, sparse=True)
                THWs.append(THW)
            output = tf.add_n(THWs)  #【THW列表元素相加】
            # bias = inits.zeros([output_dim], name='bias')
            # output += bias
            output = act_fun(output)
        return output
예제 #16
0
    def __init__(self,
                 input_dim,
                 output_dim,
                 placeholders,
                 dropout=0.,
                 sparse_inputs=False,
                 act=tf.nn.relu,
                 bias=False,
                 featureless=False,
                 **kwargs):
        super(GraphConvolution_GCN, self).__init__(**kwargs)

        if dropout:
            self.dropout = placeholders['dropout']
        else:
            self.dropout = 0.

        self.act = act
        self.support = placeholders['support']
        self.sparse_inputs = sparse_inputs
        self.featureless = featureless
        self.bias = bias

        # helper variable for sparse dropout
        #self.num_features_nonzero = placeholders['num_features_nonzero']

        with tf.variable_scope(self.name + '_vars'):

            self.vars['weights'] = glorot([input_dim, output_dim],
                                          name='weights')
            if self.bias:
                self.vars['bias'] = zeros([output_dim], name='bias')

        if self.logging:
            self._log_vars()
예제 #17
0
    def __init__(self, input_dim, num_units, length, batch_size,
                 return_sequece, bias, **kwargs):
        """
		initialize method
		params:
			input_dim: integer
				the dimension of inputs
			num_units: integer
				the number of hiddens
			bias: Boolean
				the boolean number
		returns:
			none
		"""
        super(LSTM, self).__init__(**kwargs)

        self.input_dim = input_dim
        self.num_units = num_units
        self.return_sequece = return_sequece
        self.length = length
        self.batch_size = batch_size
        self.bias = bias

        with tf.variable_scope("{}_vars".format(self.name)):
            self.vars["fg"] = glorot(
                [self.input_dim + self.num_units, self.num_units],
                name="forget_weight")
            self.vars["il"] = glorot(
                [self.input_dim + self.num_units, self.num_units],
                name="input_weight")
            self.vars["ol"] = glorot(
                [self.input_dim + self.num_units, self.num_units],
                name="output_weight")
            self.vars["Cl"] = glorot(
                [self.input_dim + self.num_units, self.num_units],
                name="cell_weight")

            if self.bias:
                self.vars["fgb"] = zeros([self.num_units], name="forget_bias")
                self.vars["ilb"] = zeros([self.num_units], name="input_bias")
                self.vars["olb"] = zeros([self.num_units], name="output_bias")
                self.vars["Clb"] = zeros([self.num_units], name="cell_bias")

        self.hidden_state, self.cell_state = self.init_hidden(self.batch_size)

        if self.logging:
            self._log_vars()
예제 #18
0
 def decoder(embed):
     embed_size = embed.shape[1].value
     with tf.compat.v1.variable_scope("deco"):
         weight3 = glorot([embed_size, embed_size])
     U = embed
     V = embed
     logits = tf.matmul(tf.matmul(U, weight3), tf.transpose(V))
     logits = tf.reshape(logits, [-1, 1])
     return tf.nn.sigmoid(logits)
예제 #19
0
 def _create_transductive_gembs_placeholders(self, data, num1, num2):
     # Create the dataset-level graph-level embeddings table for later look up.
     self.all_gembs = glorot([data.num_graphs(), FLAGS.gemb_dim],
                             name='all_graph_embeddings')
     self.gemb_lookup_ids_1 = tf.placeholder(tf.int32, shape=(num1))
     self.gemb_lookup_ids_2 = tf.placeholder(tf.int32, shape=(num2))
     self.val_test_gemb_lookup_ids_1 = tf.placeholder(
         tf.int32, shape=(1))  # 1 graph pair per val/test
     self.val_test_gemb_lookup_ids_2 = tf.placeholder(tf.int32, shape=(1))
     self.dropout = tf.placeholder_with_default(0., shape=())
예제 #20
0
def attn_head(seq,
              out_sz,
              bias_mat,
              activation,
              in_drop=0.0,
              coef_drop=0.0,
              residual=False):
    with tf.name_scope('my_attn'):
        if in_drop != 0.0:
            seq = tf.nn.dropout(seq, 1.0 - in_drop)
        seq_fts = seq
        latent_factor_size = 8

        w_1 = glorot([seq_fts.shape[2].value, latent_factor_size])
        w_2 = glorot([2 * seq_fts.shape[2].value, latent_factor_size])

        f_1 = tf.layers.conv1d(seq_fts, 1, 1)
        f_2 = tf.layers.conv1d(seq_fts, 1, 1)
        logits = f_1 + tf.transpose(f_2, [0, 2, 1])
        coefs = tf.nn.softmax(tf.nn.leaky_relu(logits[0]) + bias_mat[0])

        if coef_drop != 0.0:
            coefs = tf.nn.dropout(coefs, 1.0 - coef_drop)
        if in_drop != 0.0:
            seq_fts = tf.nn.dropout(seq_fts, 1.0 - in_drop)

        neigh_embs = tf.matmul(coefs, seq_fts[0])

        neigh_embs_aggre_1 = tf.matmul(tf.add(seq_fts[0], neigh_embs), w_1)
        neigh_embs_aggre_2 = tf.matmul(
            tf.concat([seq_fts[0], neigh_embs], axis=-1), w_2)

        final_embs = activation(neigh_embs_aggre_1) + activation(
            neigh_embs_aggre_2)

        return final_embs, coefs
예제 #21
0
    def __init__(self,
                 input_dim,
                 output_dim,
                 placeholders,
                 dropout=0.,
                 sparse_inputs=False,
                 act=tf.nn.relu,
                 bias=False,
                 featureless=False,
                 norm=False,
                 precalc=False,
                 residual=False,
                 **kwargs):
        super(GraphConvolution, self).__init__(**kwargs)

        if dropout:
            self.dropout = placeholders['dropout']
        else:
            self.dropout = 0.

        self.act = act
        self.support = placeholders['support']
        self.sparse_inputs = sparse_inputs
        self.featureless = featureless
        self.bias = bias
        self.norm = norm
        self.precalc = precalc
        self.residual = residual

        # helper variable for sparse dropout
        self.num_features_nonzero = placeholders['num_features_nonzero']

        with tf.variable_scope(self.name + '_vars'):
            self.vars['weights'] = inits.glorot([input_dim, output_dim],
                                                name='weights')
            if self.bias:
                self.vars['bias'] = inits.zeros([output_dim], name='bias')

            if self.norm:
                self.vars['offset'] = inits.zeros([1, output_dim],
                                                  name='offset')
                self.vars['scale'] = inits.ones([1, output_dim], name='scale')

        if self.logging:
            self._log_vars()
    def _build(self):

        weights1 = glorot([self.input_dim, self.layer_sizes[1]],
                          name='weights')

        self.layers.append(
            Dense(input_dim=self.input_dim,
                  output_dim=self.layer_sizes[1],
                  placeholders=self.placeholders,
                  act=tf.nn.relu,
                  dropout=False,
                  logging=self.logging))

        weights1_decode = tf.transpose(weights1)
        self.layers.append(
            Dense(input_dim=self.layer_sizes[1],
                  output_dim=self.output_dim,
                  placeholders=self.placeholders,
                  act=lambda x: x,
                  dropout=False,
                  shared_weights=weights1_decode,
                  logging=self.logging))
예제 #23
0
    def __init__(self, num_in_channels, num_out_channels, filter_size, strides,
                 padding, dropout, bias, act, **kwargs):

        super(Conv1D, self).__init__(**kwargs)

        self.num_in_channels = num_in_channels
        self.num_out_channels = num_out_channels
        self.filter_size = filter_size
        self.strides = strides
        self.padding = padding
        self.dropout = dropout

        self.num_features_nonzero = num_in_channels
        self.bias = bias
        self.act = act

        with tf.variable_scope("{}_vars".format(self.name)):
            self.vars["weights"] = glorot(
                [filter_size, self.num_in_channels, self.num_out_channels],
                name="weights")
            if self.bias:
                self.vars["bias"] = zeros([self.num_out_channels], name="bias")
예제 #24
0
    def __init__(self, num_classes,
            placeholders, features, adj, degrees,
            layer_infos, concat=True, aggregator_type="mean", 
            model_size="small", sigmoid_loss=False, identity_dim=0, num_re=3,
                **kwargs):
        '''
        Args:
            - placeholders: Stanford TensorFlow placeholder object.
            - features: Numpy array with node features.
            - adj: Numpy array with adjacency lists (padded with random re-samples)
            - degrees: Numpy array with node degrees. 
            - layer_infos: List of SAGEInfo namedtuples that describe the parameters of all 
                   the recursive layers. See SAGEInfo definition above. It contains *numer_re* lists of layer_info
            - concat: whether to concatenate during recursive iterations
            - aggregator_type: how to aggregate neighbor information
            - model_size: one of "small" and "big"
            - sigmoid_loss: Set to true if nodes can belong to multiple classes
            - identity_dim: context embedding
        '''

        models.GeneralizedModel.__init__(self, **kwargs)

        if aggregator_type == "mean":
            self.aggregator_cls = MeanAggregator
        elif aggregator_type == "seq":
            self.aggregator_cls = SeqAggregator
        elif aggregator_type == "meanpool":
            self.aggregator_cls = MeanPoolingAggregator
        elif aggregator_type == "maxpool":
            self.aggregator_cls = MaxPoolingAggregator
        elif aggregator_type == "gcn":
            self.aggregator_cls = GCNAggregator
        else:
            raise Exception("Unknown aggregator: ", self.aggregator_cls)

        # get info from placeholders...
        self.inputs1 = placeholders["batch"]
        self.model_size = model_size
        self.adj_info = adj
        if identity_dim > 0:
           self.embeds_context = tf.get_variable("node_embeddings", [features.shape[0], identity_dim])
        else:
           self.embeds_context = None
        if features is None: 
            if identity_dim == 0:
                raise Exception("Must have a positive value for identity feature dimension if no input features given.")
            self.features = self.embeds_context
        else:
            self.features = tf.Variable(tf.constant(features, dtype=tf.float32), trainable=False)
            if not self.embeds_context is None:
                self.features = tf.concat([self.embeds_context, self.features], axis=1)
        self.degrees = degrees
        self.concat = concat
        self.num_classes = num_classes
        self.sigmoid_loss = sigmoid_loss
        self.dims = [(0 if features is None else features.shape[1]) + identity_dim]
        self.dims.extend([layer_infos[0][i].output_dim for i in range(len(layer_infos[0]))])
        self.batch_size = placeholders["batch_size"]
        self.placeholders = placeholders
        self.layer_infos = layer_infos
        self.num_relations = num_re
        dim_mult = 2 if self.concat else 1
        self.relation_vectors = tf.Variable(glorot([num_re, self.dims[-1] * dim_mult]), trainable=True, name='relation_vectors')
        self.attention_vec = tf.Variable(glorot([self.dims[-1] * dim_mult * 2, 1]))

        self.optimizer = tf.train.AdamOptimizer(learning_rate=FLAGS.learning_rate)
        self.build()
예제 #25
0
    def __init__(self,
                 input_dim,
                 output_dim,
                 weight_decay,
                 model_size="small",
                 neigh_input_dim=None,
                 dropout=0.,
                 bias=False,
                 act=tf.nn.relu,
                 name=None,
                 concat=False,
                 **kwargs):
        super(TwoMaxLayerPoolingAggregator, self).__init__(**kwargs)

        self.dropout = dropout
        self.bias = bias
        self.act = act
        self.concat = concat

        if neigh_input_dim is None:
            neigh_input_dim = input_dim

        if name is not None:
            name = '/' + name
        else:
            name = ''

        if model_size == "small":
            hidden_dim_1 = self.hidden_dim_1 = 512
            hidden_dim_2 = self.hidden_dim_2 = 256
        elif model_size == "big":
            hidden_dim_1 = self.hidden_dim_1 = 1024
            hidden_dim_2 = self.hidden_dim_2 = 512

        self.mlp_layers = []
        self.mlp_layers.append(
            Dense(input_dim=neigh_input_dim,
                  output_dim=hidden_dim_1,
                  weight_decay=weight_decay,
                  act=tf.nn.relu,
                  dropout=dropout,
                  sparse_inputs=False,
                  logging=self.logging))
        self.mlp_layers.append(
            Dense(input_dim=hidden_dim_1,
                  output_dim=hidden_dim_2,
                  weight_decay=weight_decay,
                  act=tf.nn.relu,
                  dropout=dropout,
                  sparse_inputs=False,
                  logging=self.logging))

        with tf.compat.v1.variable_scope(self.name + name + '_vars'):
            self.vars['neigh_weights'] = glorot([hidden_dim_2, output_dim],
                                                name='neigh_weights')

            self.vars['self_weights'] = glorot([input_dim, output_dim],
                                               name='self_weights')
            if self.bias:
                self.vars['bias'] = zeros([self.output_dim], name='bias')

        if self.logging:
            self._log_vars()

        self.input_dim = input_dim
        self.output_dim = output_dim
        self.neigh_input_dim = neigh_input_dim
예제 #26
0
 def reset_parameters(self):
     glorot(self.weight)
     glorot(self.att)
     zeros(self.bias)
예제 #27
0
 def reset_parameters(self):
     glorot(self.weight)
     zeros(self.bias)
     self.cached_result = None