Exemple #1
0
    def __init__(self,
                 input_dim,
                 output_dim,
                 placeholders,
                 dropout=0.,
                 sparse_inputs=False,
                 act=tf.nn.relu,
                 bias=False,
                 featureless=False,
                 **kwargs):
        super(GraphConvolution_GCN, self).__init__(**kwargs)

        if dropout:
            self.dropout = placeholders['dropout']
        else:
            self.dropout = 0.

        self.act = act
        self.support = placeholders['support']
        self.sparse_inputs = sparse_inputs
        self.featureless = featureless
        self.bias = bias

        # helper variable for sparse dropout
        #self.num_features_nonzero = placeholders['num_features_nonzero']

        with tf.variable_scope(self.name + '_vars'):

            self.vars['weights'] = glorot([input_dim, output_dim],
                                          name='weights')
            if self.bias:
                self.vars['bias'] = zeros([output_dim], name='bias')

        if self.logging:
            self._log_vars()
Exemple #2
0
    def __init__(self,
                 input_dim,
                 placeholders,
                 dropout=0.,
                 act=tf.nn.relu,
                 bias=False,
                 **kwargs):
        super(SplitAndAttentionPooling, self).__init__(**kwargs)

        if dropout:
            self.dropout = placeholders['dropout']
        else:
            self.dropout = 0.

        self.act = act
        self.support = placeholders['support']
        self.bias = bias
        # the output dimension is same as input dimension
        self.output_dim = input_dim
        # helper variable for sparse dropout
        #self.num_features_nonzero = placeholders['num_features_nonzero']

        with tf.variable_scope(self.name + '_vars'):

            self.vars['weights'] = glorot([input_dim, input_dim],
                                          name='weights')
            if self.bias:
                self.vars['bias'] = zeros([input_dim], name='bias')

        if self.logging:
            self._log_vars()
Exemple #3
0
    def __init__(self,
                 input_dim,
                 output_dim,
                 dropout=0.0,
                 sparse_inputs=False,
                 act=tf.nn.relu,
                 bias=False,
                 **kwargs):
        super(Dense, self).__init__(**kwargs)

        self.dropout = dropout
        self.act = act
        self.sparse_inputs = sparse_inputs
        self.bias = bias
        self.input_dim = input_dim

        #helper variable for sparse dropout
        self.num_features_nonzero = input_dim

        with tf.variable_scope("{}_vars".format(self.name)):
            self.vars['weights'] = glorot([input_dim, output_dim],\
                                            name='weights')
            if self.bias:
                self.vars["bias"] = zeros([output_dim], name="bias")

        if self.logging:
            self._log_vars()
Exemple #4
0
    def __init__(self,
                 input_dim,
                 output_dim,
                 dropout=0.,
                 bias=False,
                 hidden_dim=512,
                 act=tf.nn.relu,
                 name=None,
                 **kwargs):
        super(WeightedAggregator, self).__init__(**kwargs)

        self.dropout = dropout
        self.bias = bias
        self.act = act
        self.hidden_dim = hidden_dim

        if name is not None:
            name = '/' + name
        else:
            name = ''

        with tf.variable_scope(self.name + name + '_vars'):
            self.vars['mlp_weights'] = glorot([input_dim, output_dim],
                                              name='mlp_weights')
            tf.summary.histogram("mlp_weights", self.vars['mlp_weights'])
            if self.bias:
                self.vars['bias'] = zeros([output_dim], name='bias')
                tf.summary.histogram("bias", self.vars['bias'])
        if self.logging:
            self._log_vars()

        self.input_dim = input_dim
        self.output_dim = output_dim
Exemple #5
0
    def __init__(self,
                 input_dim,
                 output_dim,
                 placeholders,
                 dropout=0.,
                 act=tf.nn.relu,
                 bias=False,
                 **kwargs):
        super(NTN, self).__init__(**kwargs)

        if dropout:
            self.dropout = placeholders['dropout']
        else:
            self.dropout = 0.

        self.act = act
        self.support = placeholders['support']
        self.bias = bias
        self.input_dim = input_dim
        self.output_dim = output_dim

        # helper variable for sparse dropout
        #self.num_features_nonzero = placeholders['num_features_nonzero']

        with tf.variable_scope(self.name + '_vars'):

            self.vars['W'] = glorot([output_dim, input_dim, input_dim],
                                    name='W')
            self.vars['V'] = glorot([output_dim, 2 * input_dim, 1], name='V')
            if self.bias:
                self.vars['bias'] = zeros([output_dim], name='bias')

        if self.logging:
            self._log_vars()
Exemple #6
0
    def __init__(self,
                 input_dim,
                 output_dim,
                 dropout=0.,
                 act=tf.nn.relu,
                 placeholders=None,
                 bias=True,
                 featureless=False,
                 sparse_inputs=False,
                 **kwargs):
        super(Dense, self).__init__(**kwargs)

        self.dropout = dropout

        self.act = act
        self.featureless = featureless
        self.bias = bias
        self.input_dim = input_dim
        self.output_dim = output_dim

        # helper variable for sparse dropout
        self.sparse_inputs = sparse_inputs
        if sparse_inputs:
            self.num_features_nonzero = placeholders['num_features_nonzero']

        with tf.variable_scope(self.name + '_vars'):
            self.vars['weights'] = tf.get_variable(
                'weights',
                shape=(input_dim, output_dim),
                dtype=tf.float32,
                initializer=tf.contrib.layers.xavier_initializer(),
                regularizer=tf.contrib.layers.l2_regularizer(
                    conf.weight_decay))
            if self.bias:
                self.vars['bias'] = zeros([output_dim], name='bias')
Exemple #7
0
    def __init__(self, input_dim, output_dim, neigh_input_dim=None,
            dropout=0., bias=False, act=tf.nn.relu, 
            name=None, concat=False, **kwargs):
        super(MeanAggregator, self).__init__(**kwargs)

        self.dropout = dropout
        self.bias = bias
        self.act = act
        self.concat = concat

        if neigh_input_dim is None:
            neigh_input_dim = input_dim

        if name is not None:
            name = '/' + name
        else:
            name = ''

        with tf.variable_scope(self.name + name + '_vars'):
            self.vars['neigh_weights'] = glorot([neigh_input_dim, output_dim],
                                                        name='neigh_weights')
            self.vars['self_weights'] = glorot([input_dim, output_dim],
                                                        name='self_weights')
            if self.bias:
                self.vars['bias'] = zeros([self.output_dim], name='bias')

        if self.logging:
            self._log_vars()

        self.input_dim = input_dim
        self.output_dim = output_dim
Exemple #8
0
    def __init__(self, input_dim, output_dim, neigh_input_dim=None,
            dropout=0, bias=True, act=tf.nn.relu,
            name=None, concat=False, mode="train", **kwargs):
        super(MeanAggregator, self).__init__(**kwargs)

        self.dropout = dropout
        self.bias = bias
        self.act = act
        self.concat = concat
        self.mode = mode

        if name is not None:
            name = '/' + name
        else:
            name = ''

        if neigh_input_dim == None:
            neigh_input_dim = input_dim

        if concat:
            self.output_dim = 2 * output_dim

        with tf.variable_scope(self.name + name + '_vars'):
            self.vars['neigh_weights'] = glorot([neigh_input_dim, output_dim],
                                                name='neigh_weights')
            self.vars['self_weights'] = glorot([input_dim, output_dim],
                                               name='self_weights')
            if self.bias:
                self.vars['bias'] = zeros([self.output_dim], name='bias')

        self.input_dim = input_dim
        self.output_dim = output_dim
Exemple #9
0
    def __init__(self, input_dim, output_dim, weight_decay, dropout=0.,
                 act=tf.nn.relu, placeholders=None, bias=True,
                 featureless=False, sparse_inputs=False, **kwargs):
        super(Dense, self).__init__(**kwargs)

        self.dropout = dropout

        self.act = act
        self.featureless = featureless
        self.bias = bias
        self.input_dim = input_dim
        self.output_dim = output_dim

        # helper variable for sparse dropout
        self.sparse_inputs = sparse_inputs
        if sparse_inputs:
            self.num_features_nonzero = placeholders['num_features_nonzero']

        with tf.compat.v1.variable_scope(self.name + '_vars'):
            self.vars['weights'] = tf.compat.v1.get_variable(
                     'weights',
                     shape=(input_dim, output_dim),
                     dtype=tf.float32,
                     initializer=tf.compat.v1.keras.initializers.VarianceScaling(
                             scale=1.0, mode="fan_avg",
                             distribution="uniform"),
                     regularizer=tf.keras.regularizers.l2(
                             0.5 * (weight_decay)))
            if self.bias:
                self.vars['bias'] = zeros([output_dim], name='bias')

        if self.logging:
            self._log_vars()
Exemple #10
0
    def __init__(self,
                 input_dim,
                 output_dim,
                 placeholders,
                 dropout=0.,
                 sparse_inputs=False,
                 act=tf.nn.relu,
                 bias=True):

        self.vars = {}
        if dropout:
            self.dropout = placeholders['dropout']
        else:
            self.dropout = 0.
        self.act = act

        self.sparse_inputs = sparse_inputs
        self.bias = bias

        # helper variable for sparse dropout

        # len(self.support) is asserted to be 1 since there is only one weight matrix per layer
        # initialize the weight matrices
        # the variables are found under the key tf.GlobalKeys.GLOBAL_VARIABLES
        self.vars['weights_' + str(0)] = inits.glorot([input_dim, output_dim],
                                                      name='weights_' + str(0))

        # initialize the biases as 0 matrices of correct shapes
        if self.bias:
            self.vars['bias'] = inits.zeros([output_dim], name='bias')
Exemple #11
0
    def __init__(self, input_dim, num_units, length, batch_size,
                 return_sequece, bias, **kwargs):
        """
		initialize method
		params:
			input_dim: integer
				the dimension of inputs
			num_units: integer
				the number of hiddens
			bias: Boolean
				the boolean number
		returns:
			none
		"""
        super(LSTM, self).__init__(**kwargs)

        self.input_dim = input_dim
        self.num_units = num_units
        self.return_sequece = return_sequece
        self.length = length
        self.batch_size = batch_size
        self.bias = bias

        with tf.variable_scope("{}_vars".format(self.name)):
            self.vars["fg"] = glorot(
                [self.input_dim + self.num_units, self.num_units],
                name="forget_weight")
            self.vars["il"] = glorot(
                [self.input_dim + self.num_units, self.num_units],
                name="input_weight")
            self.vars["ol"] = glorot(
                [self.input_dim + self.num_units, self.num_units],
                name="output_weight")
            self.vars["Cl"] = glorot(
                [self.input_dim + self.num_units, self.num_units],
                name="cell_weight")

            if self.bias:
                self.vars["fgb"] = zeros([self.num_units], name="forget_bias")
                self.vars["ilb"] = zeros([self.num_units], name="input_bias")
                self.vars["olb"] = zeros([self.num_units], name="output_bias")
                self.vars["Clb"] = zeros([self.num_units], name="cell_bias")

        self.hidden_state, self.cell_state = self.init_hidden(self.batch_size)

        if self.logging:
            self._log_vars()
Exemple #12
0
    def __init__(self,
                 input_dim,
                 output_dim,
                 placeholders,
                 dropout=0.,
                 sparse_inputs=False,
                 act=tf.nn.relu,
                 bias=False,
                 featureless=False,
                 norm=False,
                 precalc=False,
                 residual=False,
                 **kwargs):
        super(GraphConvolution, self).__init__(**kwargs)

        if dropout:
            self.dropout = placeholders['dropout']
        else:
            self.dropout = 0.

        self.act = act
        self.support = placeholders['support']
        self.sparse_inputs = sparse_inputs
        self.featureless = featureless
        self.bias = bias
        self.norm = norm
        self.precalc = precalc
        self.residual = residual

        # helper variable for sparse dropout
        self.num_features_nonzero = placeholders['num_features_nonzero']

        with tf.variable_scope(self.name + '_vars'):
            self.vars['weights'] = inits.glorot([input_dim, output_dim],
                                                name='weights')
            if self.bias:
                self.vars['bias'] = inits.zeros([output_dim], name='bias')

            if self.norm:
                self.vars['offset'] = inits.zeros([1, output_dim],
                                                  name='offset')
                self.vars['scale'] = inits.ones([1, output_dim], name='scale')

        if self.logging:
            self._log_vars()
Exemple #13
0
    def __init__(self,
                 input_dim,
                 output_dim,
                 model_size="small",
                 neigh_input_dim=None,
                 dropout=0.,
                 bias=True,
                 act=tf.nn.relu,
                 name=None,
                 concat=False,
                 **kwargs):
        super(MaxPoolingAggregator, self).__init__(**kwargs)

        self.dropout = dropout
        self.bias = bias
        self.act = act
        self.concat = concat

        if name is not None:
            name = '/' + name
        else:
            name = ''

        if neigh_input_dim == None:
            neigh_input_dim = input_dim

        if concat:
            self.output_dim = 2 * output_dim

        if model_size == "small":
            hidden_dim = self.hidden_dim = 50
        elif model_size == "big":
            hidden_dim = self.hidden_dim = 50

        self.mlp_layers = []
        self.mlp_layers.append(
            Dense(input_dim=neigh_input_dim,
                  output_dim=hidden_dim,
                  act=tf.nn.relu,
                  dropout=dropout,
                  sparse_inputs=False,
                  logging=self.logging))

        with tf.variable_scope(self.name + name + '_vars'):

            self.vars['neigh_weights'] = glorot([hidden_dim, output_dim],
                                                name='neigh_weights')

            self.vars['self_weights'] = glorot([input_dim, output_dim],
                                               name='self_weights')

            if self.bias:
                self.vars['bias'] = zeros([self.output_dim], name='bias')

        self.input_dim = input_dim
        self.output_dim = output_dim
        self.neigh_input_dim = neigh_input_dim
    def __init__(self, input_dim1, input_dim2, placeholders, dropout=False,
                 act=tf.nn.sigmoid, loss_fn='xent', neg_sample_weights=1.0,
                 bias=False, bilinear_weights=False, **kwargs):
        """
        Basic class that applies skip-gram-like loss
        (i.e., dot product of node+target and node and negative samples)
        Args:
            bilinear_weights: use a bilinear weight for affinity calculation:
                                u^T A v. If set to false, it is assumed that
                                input dimensions are the same and the affinity
                                will be based on dot product.
        """
        super(BipartiteEdgePredLayer, self).__init__(**kwargs)
        self.input_dim1 = input_dim1
        self.input_dim2 = input_dim2
        self.act = act
        self.bias = bias
        self.eps = 1e-7

        # Margin for hinge loss
        self.margin = 0.1
        self.neg_sample_weights = neg_sample_weights

        self.bilinear_weights = bilinear_weights

        if dropout:
            self.dropout = placeholders['dropout']
        else:
            self.dropout = 0.

        # output a likelihood term
        self.output_dim = 1
        with tf.compat.v1.variable_scope(self.name + '_vars'):
            # bilinear form
            if bilinear_weights:
                # self.vars['weights'] = glorot([input_dim1, input_dim2],
                #                              name='pred_weights')
                self.vars['weights'] = tf.compat.v1.get_variable(
                        'pred_weights',
                        shape=(input_dim1, input_dim2),
                        dtype=tf.float32,
                        initializer=tf.compat.v1.keras.initializers.VarianceScaling(
                                scale=1.0, mode="fan_avg",
                                distribution="uniform"))

            if self.bias:
                self.vars['bias'] = zeros([self.output_dim], name='bias')

        if loss_fn == 'xent':
            self.loss_fn = self._xent_loss
        elif loss_fn == 'skipgram':
            self.loss_fn = self._skipgram_loss
        elif loss_fn == 'hinge':
            self.loss_fn = self._hinge_loss

        if self.logging:
            self._log_vars()
    def __init__(self,
                 input_dim,
                 output_dim,
                 neigh_input_dim=None,
                 dropout=0,
                 bias=True,
                 act=tf.nn.relu,
                 name=None,
                 concat=False,
                 mode="train",
                 **kwargs):
        super(AttentionAggregator, self).__init__(**kwargs)

        self.dropout = dropout
        self.bias = bias
        self.act = act
        self.concat = concat
        self.mode = mode

        if name is not None:
            name = '/' + name
        else:
            name = ''

        if neigh_input_dim == None:
            neigh_input_dim = input_dim

        self.input_dim = input_dim
        self.output_dim = output_dim

        with tf.variable_scope(self.name + name + '_vars'):
            if self.bias:
                self.vars['bias'] = zeros([self.output_dim], name='bias')

            self.q_dense_layer = Dense(input_dim=input_dim,
                                       output_dim=input_dim,
                                       bias=False,
                                       sparse_inputs=False,
                                       name="q")
            self.k_dense_layer = Dense(input_dim=input_dim,
                                       output_dim=input_dim,
                                       bias=False,
                                       sparse_inputs=False,
                                       name="k")
            self.v_dense_layer = Dense(input_dim=input_dim,
                                       output_dim=input_dim,
                                       bias=False,
                                       sparse_inputs=False,
                                       name="v")

            self.output_dense_layer = Dense(input_dim=input_dim,
                                            output_dim=output_dim,
                                            bias=False,
                                            sparse_inputs=False,
                                            name="output_transform")
Exemple #16
0
    def __init__(self, num_classes, num_feas, learning_rate, **kwargs):
        super(CenterLoss, self).__init__(**kwargs)

        self.num_classes = num_classes
        self.num_feas = num_feas
        self.learning_rate = learning_rate

        # Declare variables
        with tf.variable_scope("{}_vars".format(self.name)):
            self.vars["center"] = zeros(
                shape=[self.num_classes, self.num_feas], trainable=False)
    def __init__(self,
                 input_dim,
                 output_dim,
                 model_size="small",
                 neigh_input_dim=None,
                 dropout=0.,
                 bias=True,
                 act=tf.nn.relu,
                 name=None,
                 concat=False,
                 mode="train",
                 **kwargs):
        super(SeqAggregator, self).__init__(**kwargs)

        self.dropout = dropout
        self.bias = bias
        self.act = act
        self.concat = concat
        self.mode = mode
        self.output_dim = output_dim

        if neigh_input_dim is None:
            neigh_input_dim = input_dim

        if name is not None:
            name = '/' + name
        else:
            name = ''

        if model_size == "small":
            hidden_dim = self.hidden_dim = 128
        elif model_size == "big":
            hidden_dim = self.hidden_dim = 256

        with tf.variable_scope(self.name + name + '_vars'):
            self.vars['neigh_weights'] = glorot([hidden_dim, output_dim],
                                                name='neigh_weights')

            self.vars['self_weights'] = glorot([input_dim, output_dim],
                                               name='self_weights')
            if self.bias:
                self.vars['bias'] = zeros([self.output_dim], name='bias')

        if self.logging:
            self._log_vars()

        self.input_dim = input_dim
        self.output_dim = output_dim
        self.neigh_input_dim = neigh_input_dim
        self.cell = tf.contrib.rnn.BasicLSTMCell(self.hidden_dim)
Exemple #18
0
    def __init__(self, num_in_channels, num_out_channels, filter_size, strides,
                 padding, dropout, bias, act, **kwargs):

        super(Conv1D, self).__init__(**kwargs)

        self.num_in_channels = num_in_channels
        self.num_out_channels = num_out_channels
        self.filter_size = filter_size
        self.strides = strides
        self.padding = padding
        self.dropout = dropout

        self.num_features_nonzero = num_in_channels
        self.bias = bias
        self.act = act

        with tf.variable_scope("{}_vars".format(self.name)):
            self.vars["weights"] = glorot(
                [filter_size, self.num_in_channels, self.num_out_channels],
                name="weights")
            if self.bias:
                self.vars["bias"] = zeros([self.num_out_channels], name="bias")
Exemple #19
0
    def __init__(self,
                 input_dim,
                 output_dim,
                 weight_decay,
                 model_size="small",
                 neigh_input_dim=None,
                 dropout=0.,
                 bias=False,
                 act=tf.nn.relu,
                 name=None,
                 concat=False,
                 **kwargs):
        super(TwoMaxLayerPoolingAggregator, self).__init__(**kwargs)

        self.dropout = dropout
        self.bias = bias
        self.act = act
        self.concat = concat

        if neigh_input_dim is None:
            neigh_input_dim = input_dim

        if name is not None:
            name = '/' + name
        else:
            name = ''

        if model_size == "small":
            hidden_dim_1 = self.hidden_dim_1 = 512
            hidden_dim_2 = self.hidden_dim_2 = 256
        elif model_size == "big":
            hidden_dim_1 = self.hidden_dim_1 = 1024
            hidden_dim_2 = self.hidden_dim_2 = 512

        self.mlp_layers = []
        self.mlp_layers.append(
            Dense(input_dim=neigh_input_dim,
                  output_dim=hidden_dim_1,
                  weight_decay=weight_decay,
                  act=tf.nn.relu,
                  dropout=dropout,
                  sparse_inputs=False,
                  logging=self.logging))
        self.mlp_layers.append(
            Dense(input_dim=hidden_dim_1,
                  output_dim=hidden_dim_2,
                  weight_decay=weight_decay,
                  act=tf.nn.relu,
                  dropout=dropout,
                  sparse_inputs=False,
                  logging=self.logging))

        with tf.compat.v1.variable_scope(self.name + name + '_vars'):
            self.vars['neigh_weights'] = glorot([hidden_dim_2, output_dim],
                                                name='neigh_weights')

            self.vars['self_weights'] = glorot([input_dim, output_dim],
                                               name='self_weights')
            if self.bias:
                self.vars['bias'] = zeros([self.output_dim], name='bias')

        if self.logging:
            self._log_vars()

        self.input_dim = input_dim
        self.output_dim = output_dim
        self.neigh_input_dim = neigh_input_dim
Exemple #20
0
 def reset_parameters(self):
     glorot(self.weight)
     zeros(self.bias)
     self.cached_result = None
 def reset_parameters(self):
     glorot(self.weight)
     glorot(self.att)
     zeros(self.bias)
Exemple #22
0
 def init_hidden(self, batch_size=-1):
     hidden_state = zeros([batch_size, self.num_units], name="hidden_state")
     cell_state = zeros([batch_size, self.num_units], name="cell_state")
     return hidden_state, cell_state