示例#1
0
 def get_layers(self, i, is_homo, use_bias):
     attn_layer = LinearLayer("attn_" + str(i), self.out_dim * 2, 1,
                              use_bias)
     x_layer = LinearLayer("x_" + str(i), self.in_dim[0], self.out_dim,
                           use_bias)
     n_layer = x_layer if is_homo else LinearLayer(
         "n_" + str(i), self.in_dim[1], self.out_dim, use_bias)
     return attn_layer, x_layer, n_layer
示例#2
0
 def __init__(self,
              name,
              input_dim,
              output_dim,
              eps=0.0,
              use_bias=False,
              **kwargs):
   self.eps = eps
   self.linear = LinearLayer(name, input_dim, output_dim, use_bias)
示例#3
0
    def __init__(self,
                 name,
                 in_dim,
                 out_dim,
                 num_relations,
                 num_bases=None,
                 num_blocks=None,
                 agg_type="mean",
                 use_bias=False,
                 **kwargs):
        super(EgoRGCNConv, self).__init__()
        assert agg_type in {"mean", "sum", "max"}
        if num_bases is not None and num_blocks is not None:
            raise ValueError(
                'Can not apply both basis- and block-diagonal-decomposition '
                'regularization at the same time.')

        self._agg_type = agg_type
        self._out_dim = out_dim
        self._num_relations = num_relations
        self._num_bases = num_bases
        self._num_blocks = num_blocks

        if isinstance(in_dim, list) or isinstance(in_dim, tuple):
            self._in_dim = in_dim
            assert len(self._in_dim) == 2
        else:
            self._in_dim = [in_dim, in_dim]

        with tf.variable_scope("ego_rgcn_layer_" + name, reuse=tf.AUTO_REUSE):
            self.root_weight = LinearLayer("root_weight", self._in_dim[0],
                                           self._out_dim)
            # neighbor's weight
            if num_bases is not None:
                self.weight = tf.get_variable(
                    name="weight", shape=[num_bases, self._in_dim[1], out_dim])
                self.coefficient = tf.get_variable(
                    name="coefficient", shape=[num_relations, num_bases])
            elif num_blocks is not None:
                assert (self._in_dim[1] % num_blocks == 0
                        and out_dim % num_blocks == 0)
                self.weight = tf.get_variable(
                    name='weight',
                    shape=[
                        num_relations, num_blocks,
                        self._in_dim[1] // num_blocks, out_dim // num_blocks
                    ])
            else:
                self.weight = tf.get_variable(
                    name="weight",
                    shape=[num_relations, self._in_dim[1], self._out_dim])

            if use_bias:
                self.bias = tf.get_variable(name="bias", shape=[out_dim])
            else:
                self.bias = None
示例#4
0
 def add_transform_layers(self, parameter_share, use_bias):
     layers = []
     if self.com_type == "concat":
         dim = self.in_dim[0] + self.in_dim[1]
         layers.append(
             LinearLayer("trans_nodes", dim, self.out_dim, use_bias))
     elif parameter_share and self.in_dim[0] == self.in_dim[1]:
         layer = LinearLayer("trans_nodes", self.in_dim[0], self.out_dim,
                             use_bias)
         layers.append(layer)
         layers.append(layer)
     else:
         layers.append(
             LinearLayer("trans_nodes", self.in_dim[0], self.out_dim,
                         use_bias))
         layers.append(
             LinearLayer("trans_nbrs", self.in_dim[1], self.out_dim,
                         use_bias))
     return layers
示例#5
0
    def __init__(self, name, input_dim, num_layers, dropout=0.0):
        self.dropout = dropout
        self.layers = []

        with tf.variable_scope(name, reuse=tf.AUTO_REUSE):
            for i in range(num_layers):
                output_dim = 1 if i == num_layers - 1 else input_dim
                layer = LinearLayer("link_predictor_" + str(i),
                                    input_dim=input_dim,
                                    output_dim=output_dim,
                                    use_bias=True)
                self.layers.append(layer)
示例#6
0
  def __init__(self,
               name,
               in_dim,
               out_dim,
               eps=0.0,
               use_bias=False,
               **kwargs):
    super(EgoGINConv, self).__init__()

    self._eps = eps
    self._out_dim = out_dim

    if isinstance(in_dim, list) or isinstance(in_dim, tuple):
      self._in_dim = in_dim
      assert len(self._in_dim) == 2
    else:
      self._in_dim = [in_dim, in_dim]

    self.trans = []
    with tf.variable_scope("ego_gin_layer_" + name, reuse=tf.AUTO_REUSE):
      if self._in_dim[0] == self._in_dim[1]:
        self.output = LinearLayer(
            "output", self._in_dim[0], self._out_dim, use_bias)
      else:
        self.output = LinearLayer(
            "output", self._out_dim, self._out_dim, use_bias)
        self.trans.append(
            LinearLayer("trans_x", self._in_dim[0], self._out_dim, use_bias))
        self.trans.append(
            LinearLayer("trans_nbrs", self._in_dim[1], self._out_dim, use_bias))
示例#7
0
    def __init__(self,
                 name,
                 feature_spec,
                 output_dim=None,
                 use_bias=False,
                 **kwargs):
        super(InputLayer, self).__init__()

        with tf.variable_scope("input_layer", reuse=tf.AUTO_REUSE):
            self.handler = FeatureHandler(name, feature_spec)
            self.linear = LinearLayer(
                name, feature_spec.dimension, output_dim,
                use_bias) if output_dim is not None else None
示例#8
0
    def __init__(self, dims, class_num=2, active_fn=tf.nn.relu, dropout=None):
        self.class_num = class_num
        self.active_func = active_fn

        dims.append(class_num)
        self.layers = []
        for i in range(len(dims) - 1):
            layer = LinearLayer("node_classifier_" + str(i),
                                input_dim=dims[i],
                                output_dim=dims[i + 1],
                                use_bias=True)
            self.layers.append(layer)

        if dropout is not None:
            self.dropout_func = lambda x: tf.nn.dropout(x,
                                                        keep_prob=1 - dropout)
        else:
            self.dropout_func = None
示例#9
0
    def __init__(self, name, dims, active_fn=tf.nn.relu, dropout=None):
        self.active_func = active_fn
        dims.append(1)
        self.layers = []

        with tf.variable_scope(name, reuse=tf.AUTO_REUSE):
            for i in range(len(dims) - 1):
                layer = LinearLayer("link_predictor_" + str(i),
                                    input_dim=dims[i],
                                    output_dim=dims[i + 1],
                                    use_bias=True)
                self.layers.append(layer)

        if dropout is not None:
            self.dropout_func = lambda x: tf.nn.dropout(x,
                                                        keep_prob=1 - dropout)
        else:
            self.dropout_func = None
示例#10
0
  def __init__(self,
               name,
               in_dim,
               out_dim,
               agg_type="mean",
               use_bias=False,
               **kwargs):
    super(EgoSAGEConv, self).__init__()
    assert agg_type in {"mean", "sum", "max", "gcn"}
    self._agg_type = agg_type
    self._out_dim = out_dim
    if isinstance(in_dim, list) or isinstance(in_dim, tuple):
      self._in_dim = in_dim
      assert len(self._in_dim) == 2
      assert self._agg_type != 'gcn'
    else:
      self._in_dim = [in_dim, in_dim]

    with tf.variable_scope("ego_sage_layer_" + name, reuse=tf.AUTO_REUSE):
      dim = self._in_dim[0] if self._agg_type == 'gcn' else self._in_dim[0] + self._in_dim[1]
      self.linear = LinearLayer("trans_nodes", dim, self._out_dim, use_bias)
示例#11
0
class EgoGINConv(EgoConv):
  """ GIN. https://arxiv.org/abs/1810.00826.

  Args:
    name: A string, layer name.
    in_dim: An integer or a two elements tuple. Dimension of input features.
      If an integer, nodes and neighbors share the same dimension.
      If an tuple, the two elements represent the dimensions of node features
      and neighbor features.
      Usually, different dimensions happen in the heterogeneous graph.
    out_dim: An integer, dimension of the output embeddings. Both the node
      features and neighbor features will be encoded into the same dimension,
      and then do some combination.
    use_bias: A boolean, whether add bias after computation.
  """

  def __init__(self,
               name,
               in_dim,
               out_dim,
               eps=0.0,
               use_bias=False,
               **kwargs):
    super(EgoGINConv, self).__init__()

    self._eps = eps
    self._out_dim = out_dim

    if isinstance(in_dim, list) or isinstance(in_dim, tuple):
      self._in_dim = in_dim
      assert len(self._in_dim) == 2
    else:
      self._in_dim = [in_dim, in_dim]

    self.trans = []
    with tf.variable_scope("ego_gin_layer_" + name, reuse=tf.AUTO_REUSE):
      if self._in_dim[0] == self._in_dim[1]:
        self.output = LinearLayer(
            "output", self._in_dim[0], self._out_dim, use_bias)
      else:
        self.output = LinearLayer(
            "output", self._out_dim, self._out_dim, use_bias)
        self.trans.append(
            LinearLayer("trans_x", self._in_dim[0], self._out_dim, use_bias))
        self.trans.append(
            LinearLayer("trans_nbrs", self._in_dim[1], self._out_dim, use_bias))

  def forward(self, x, neighbor, expand):
    """ Compute node embeddings based on GIN.
    ```x_i = W * [(1 + eps) * x_i + sum(x_j) for x_j in N_i]```,
    where ```N_i``` is the neighbor set of ```x_i```.

    Args:
      x: A float tensor with shape = [batch_size, in_dim].
      neighbor: A float tensor with shape = [batch_size * expand, in_dim].
      expand: An integer, the neighbor count.

    Return:
      A float tensor with shape=[batch_size, out_dim].
    """
    nbr = tf.reshape(neighbor, [-1, expand, self._in_dim[1]])
    agg = tf.math.reduce_sum(nbr, axis=1)

    if self.trans:
      x = self.trans[0].forward((1.0 + self._eps) * x)
      agg = self.trans[1].forward(agg)

    return self.output.forward(x + agg)