예제 #1
0
 def call(self, inputs):
     samples = euler_ops.sample_fanout(
         inputs, self.metapath, self.fanouts,
         default_node=self._max_id + 1)[0]
     h = self.agg(inputs, samples, False)
     h_neg = self.agg(inputs, samples, True)
     return [h, h_neg]
예제 #2
0
    def call(self, inputs):
        samples = euler_ops.sample_fanout(
            inputs, self.metapath, self.fanouts, default_node=0)[0]
        hidden = [self.node_encoder(sample) for sample in samples]
        hidden = self.layerwise_embed(hidden)
        output = self.fm(hidden)

        output_shape = inputs.shape.concatenate(self.dims[-1])
        output_shape = [d if d is not None else -1
                        for d in output_shape.as_list()]
        return tf.reshape(output, output_shape)
예제 #3
0
 def call(self, inputs):
     samples = euler_ops.sample_fanout(
         inputs, self.metapath, self.fanouts,
         default_node=self._max_id + 1)[0]
     hidden = [self.node_encoder(sample) for sample in samples]
     for layer in range(self.num_layers):
         aggregator = self.aggregators[layer]
         next_hidden = []
         for hop in range(self.num_layers - layer):
             neigh_shape = [-1, self.fanouts[hop], self.dims[layer]]
             h = aggregator((hidden[hop],
                             tf.reshape(hidden[hop + 1],
                             neigh_shape)))
             next_hidden.append(h)
         hidden = next_hidden
     output_shape = inputs.shape.concatenate(self.dims[-1])
     output_shape = [d if d is not None else -1
                     for d in output_shape.as_list()]
     return tf.reshape(hidden[0], output_shape)
예제 #4
0
    def call(self, inputs, training=None):
        if training is None:
            training = utils_context.training
        if not training:
            return super(ScalableSageEncoder, self).call(inputs)

        node, neighbor = samples = euler_ops.sample_fanout(
            inputs, [self.edge_type], [self.fanout],
            default_node=self.max_id + 1)[0]
        node_embedding, neigh_embedding = [
            self.node_encoder(sample) for sample in samples
        ]

        node_embeddings = []
        neigh_embeddings = []
        for layer in range(self.num_layers):
            aggregator = self.aggregators[layer]

            neigh_shape = [-1, self.fanout, self.dims[layer]]
            neigh_embedding = tf.reshape(neigh_embedding, neigh_shape)
            node_embedding = aggregator((node_embedding, neigh_embedding))
            node_embeddings.append(node_embedding)

            if layer < self.num_layers - 1:
                neigh_embedding = tf.nn.embedding_lookup(
                    self.stores[layer], neighbor)
                neigh_embeddings.append(neigh_embedding)

        self.update_store_op = self._update_store(node, node_embeddings)
        store_loss, self.optimize_store_op = \
            self._optimize_store(node, node_embeddings)
        self.get_update_gradient_op = lambda loss: \
            self._update_gradient(loss + store_loss, neighbor, neigh_embeddings)

        output_shape = inputs.shape.concatenate(node_embedding.shape[-1])
        output_shape = [
            d if d is not None else -1 for d in output_shape.as_list()
        ]
        return tf.reshape(node_embedding, output_shape)