def __init__(self, input_dim1, input_dim2, placeholders, dropout=False, act=tf.nn.sigmoid, loss_fn='hinge', neg_sample_weights=1.0, bias=False, bilinear_weights=False, **kwargs): """ Basic class that applies skip-gram-like loss (i.e., dot product of node+target and node and negative samples) Args: bilinear_weights: use a bilinear weight for affinity calculation: u^T A v. If set to false, it is assumed that input dimensions are the same and the affinity will be based on dot product. """ super(BipartiteEdgePredLayer, self).__init__(**kwargs) self.input_dim1 = input_dim1 self.input_dim2 = input_dim2 self.act = act self.bias = bias self.eps = 1e-7 # Margin for hinge loss self.margin = 0.1 self.neg_sample_weights = neg_sample_weights self.bilinear_weights = bilinear_weights if dropout: self.dropout = placeholders['dropout'] else: self.dropout = 0. # output a likelihood term self.output_dim = 1 with tf.variable_scope(self.name + '_vars'): # bilinear form if bilinear_weights: #self.vars['weights'] = glorot([input_dim1, input_dim2], # name='pred_weights') self.vars['weights'] = tf.get_variable( 'pred_weights', shape=(input_dim1, input_dim2), dtype=tf.float32, initializer=tf.contrib.layers.xavier_initializer()) if self.bias: self.vars['bias'] = zeros([self.output_dim], name='bias') if loss_fn == 'hinge': self.loss_fn = self._hinge_loss if self.logging: self._log_vars()
def __init__(self, input_dim, output_dim, model_size="small", neigh_input_dim=None, dropout=0., bias=False, act=tf.nn.relu, name=None, concat=False, **kwargs): super(MeanPoolingAggregator, self).__init__(**kwargs) self.dropout = dropout self.bias = bias self.act = act self.concat = concat if neigh_input_dim is None: neigh_input_dim = input_dim if name is not None: name = '/' + name else: name = '' if model_size == "small": hidden_dim = self.hidden_dim = 512 elif model_size == "big": hidden_dim = self.hidden_dim = 1024 self.mlp_layers = [] self.mlp_layers.append( Dense(input_dim=neigh_input_dim, output_dim=hidden_dim, act=tf.nn.relu, dropout=dropout, sparse_inputs=False, logging=self.logging)) with tf.variable_scope(self.name + name + '_vars'): self.vars['neigh_weights'] = glorot([hidden_dim, output_dim], name='neigh_weights') self.vars['self_weights'] = glorot([input_dim, output_dim], name='self_weights') if self.bias: self.vars['bias'] = zeros([self.output_dim], name='bias') if self.logging: self._log_vars() self.input_dim = input_dim self.output_dim = output_dim self.neigh_input_dim = neigh_input_dim
def __init__(self, input_dim, output_dim, model_size="small", neigh_input_dim=None, dropout=0., bias=False, act=tf.nn.relu, name=None, concat=False, **kwargs): super(SeqAggregator, self).__init__(**kwargs) self.dropout = dropout self.bias = bias self.act = act self.concat = concat if neigh_input_dim is None: neigh_input_dim = input_dim if name is not None: name = '/' + name else: name = '' if model_size == "small": hidden_dim = self.hidden_dim = 128 elif model_size == "big": hidden_dim = self.hidden_dim = 256 with tf.variable_scope(self.name + name + '_vars'): self.vars['neigh_weights'] = glorot([hidden_dim, output_dim], name='neigh_weights') self.vars['self_weights'] = glorot([input_dim, output_dim], name='self_weights') if self.bias: self.vars['bias'] = zeros([self.output_dim], name='bias') if self.logging: self._log_vars() self.input_dim = input_dim self.output_dim = output_dim self.neigh_input_dim = neigh_input_dim self.cell = tf.contrib.rnn.BasicLSTMCell(self.hidden_dim)
def __init__(self, input_dim, output_dim, dropout=0., act=tf.nn.relu, placeholders=None, bias=True, featureless=False, sparse_inputs=False, params=[], **kwargs): super(Dense, self).__init__(**kwargs) self.dropout = dropout self.act = act self.featureless = featureless self.bias = bias self.input_dim = input_dim self.output_dim = output_dim # helper variable for sparse dropout self.sparse_inputs = sparse_inputs if sparse_inputs: self.num_features_nonzero = placeholders['num_features_nonzero'] with tf.variable_scope(self.name + '_vars'): self.vars['weights'] = tf.get_variable( 'weights', shape=(input_dim, output_dim), dtype=tf.float32, initializer=tf.contrib.layers.xavier_initializer(), regularizer=tf.contrib.layers.l2_regularizer( args.weight_decay)) if self.bias: self.vars['bias'] = zeros([output_dim], name='bias') params.extend([self.vars['weights'], self.vars['bias']]) if self.logging: self._log_vars()
def __init__(self, input_dim, output_dim, neigh_input_dim=None, dropout=0., bias=False, act=tf.nn.relu, name=None, concat=False, **kwargs): super(MeanAggregator, self).__init__(**kwargs) self.dropout = dropout self.bias = bias self.act = act self.concat = concat if neigh_input_dim is None: neigh_input_dim = input_dim if name is not None: name = '/' + name else: name = '' print("self.name", self.name) with tf.variable_scope(self.name + name + '_vars'): self.vars['neigh_weights'] = glorot([neigh_input_dim, output_dim], name='neigh_weights') self.vars['self_weights'] = glorot([input_dim, output_dim], name='self_weights') if self.bias: self.vars['bias'] = zeros([self.output_dim], name='bias') if self.logging: self._log_vars() self.input_dim = input_dim self.output_dim = output_dim