Example #1
0
    def _build(self,
                sequence_graph, #graphs tuple with a single graph for the sequence graph
                pattern_graph, #graphs tuple with a single graph for each pattern
                num_processing_steps):

        latent_seq_graph = self.sequence_encoder(sequence_graph)
        latent_seq_graph0 = latent_seq_graph

        latent_pattern_graph = self.pattern_encoder(pattern_graph)
        latent_pattern_graph0 = latent_pattern_graph

        for _ in range(num_processing_steps):

            #core input is the concatenation of original input graph and the graph from the last iteration
            seq_core_input = utils_tf.concat([latent_seq_graph0, latent_seq_graph], axis=1)

            #the latent representation of the input graph in the current iteration
            latent_seq_graph = self.sequence_graph_core(seq_core_input)

            #copy node tensors from the latent sequence graph to the pattern graph
            latent_pattern_graph = latent_pattern_graph.replace(nodes = latent_seq_graph.nodes)
            pattern_core_input = utils_tf.concat([latent_pattern_graph0, latent_pattern_graph], axis=1)
            latent_pattern_graph = self.pattern_graph_core(pattern_core_input)

            latent_seq_graph = latent_seq_graph.replace(nodes = latent_pattern_graph.nodes)


        decoded_op = self.decoder(latent_pattern_graph)
        return self.output_transform(decoded_op)
Example #2
0
    def create_graph(self, num_graphs, is_training=True):
        if not self.idx_mgr:
            raise ValueError("No Doublet Graph is created")

        inputs = []
        targets = []
        for _ in range(num_graphs):
            idx = self.idx_mgr.next(is_training)
            input_dd, target_dd =  self.graphs[idx]
            inputs.append(input_dd)
            targets.append(target_dd)

        return utils_tf.concat(inputs, axis=0), utils_tf.concat(targets, axis=0)
Example #3
0
def get_input_signature(dataset, batch_size):
    with_batch_dim = False
    input_list = []
    target_list = []
    for dd in dataset.take(batch_size).as_numpy_iterator():
        input_list.append(dd[0])
        target_list.append(dd[1])

    inputs = utils_tf.concat(input_list, axis=0)
    targets = utils_tf.concat(target_list, axis=0)
    input_signature = (graph.specs_from_graphs_tuple(inputs, with_batch_dim),
                       graph.specs_from_graphs_tuple(targets, with_batch_dim),
                       tf.TensorSpec(shape=[], dtype=tf.bool))
    return input_signature
Example #4
0
def loop_dataset(datasets, batch_size):
    if batch_size > 0:
        in_list = []
        target_list = []
        for dataset in datasets:
            inputs_tr, targets_tr = dataset
            in_list.append(inputs_tr)
            target_list.append(targets_tr)
            if len(in_list) == batch_size:
                inputs_tr = utils_tf.concat(in_list, axis=0)
                targets_tr = utils_tf.concat(target_list, axis=0)
                yield (inputs_tr, targets_tr)
    else:
        for dataset in datasets:
            yield dataset
Example #5
0
 def test_raise_all_or_no_nones(self, none_field):
     graph_0 = utils_np.networkxs_to_graphs_tuple(
         [_generate_graph(0, 3),
          _generate_graph(1, 2)])
     graph_1 = utils_np.networkxs_to_graphs_tuple([_generate_graph(2, 2)])
     graph_2 = utils_np.networkxs_to_graphs_tuple([_generate_graph(3, 3)])
     graphs_ = [
         gr.map(tf.convert_to_tensor, graphs.ALL_FIELDS)
         for gr in [graph_0, graph_1, graph_2]
     ]
     graphs_[1] = graphs_[1].replace(**{none_field: None})
     with self.assertRaisesRegex(
             ValueError,
             "Different set of keys found when iterating over data dictionaries."
     ):
         utils_tf.concat(graphs_, axis=0)
Example #6
0
    def input_signature(self):
        with_batch_dim = False
        input_list = []
        target_list = []
        for dd in self.data_train.take(
                self.train_batch_size).as_numpy_iterator():
            input_list.append(dd[0])
            target_list.append(dd[1])

        inputs = utils_tf.concat(input_list, axis=0)
        targets = utils_tf.concat(target_list, axis=0)
        input_signature = (
            graph.specs_from_graphs_tuple(inputs, with_batch_dim),
            graph.specs_from_graphs_tuple(targets, with_batch_dim),
        )
        return input_signature
Example #7
0
 def _build(self, input_op, num_processing_steps):
   latent = self._encoder(input_op)
   latent0 = latent
   for _ in range(num_processing_steps):
     core_input = utils_tf.concat([latent0, latent], axis=1)
     latent = self._core(core_input)
   return latent
Example #8
0
 def _build(self, input_op, num_processing_steps):
     latent = self._encoder(input_op)
     latent0 = latent
     for _ in range(num_processing_steps):
         core_input = utils_tf.concat([latent0, latent], axis=1)
         latent = self._core(core_input)
     return latent
    def __call__(self, input_op, num_processing_steps):
        ####
        fea1 = input_op.nodes[:, 0]
        fea2 = input_op.globals[:, 0]
        fea3 = input_op.edges[:, 0]
        embed1 = self._embed_mod_symptomNode(fea1)  # node
        embed2 = self._embed_mod_global(fea2)  # global
        embed3 = self._embed_mod_edge(fea3)  # edge

        embed1 = tf.cast(embed1, tf.float64)
        embed2 = tf.cast(embed2, tf.float64)
        embed3 = tf.cast(embed3, tf.float64)
        input_op = input_op.replace(nodes=embed1, globals=embed2,
                                    edges=embed3)  # node [? 256] edge [?,]
        ###3
        latent = self._encoder(input_op)
        #ws = self.trainable_variables
        latent0 = latent
        output_ops = []
        for _ in range(num_processing_steps):
            core_input = utils_tf.concat([latent0, latent], axis=1)
            latent = self._core(core_input)
            #ws = self.trainable_variables
            decoded_op = self._decoder(latent)
            #ws = self.trainable_variables
            output_ops.append(self._output_transform(decoded_op))
            ###
            #ws=self.trainable_variables
        return output_ops
def create_linked_list_target(batch_size, input_graphs):
    """Creates linked list targets.

  Returns a graph with the same number of nodes as `input_graph`. Each node
  contains a 2d vector with targets for a 1-class classification where only one
  node is `True`, the smallest value in the array. The vector contains two
  values: [prob_true, prob_false].
  It also contains edges connecting all nodes. These are again 2d vectors with
  softmax targets [prob_true, prob_false]. An edge is True
  if n+1 is the element immediately after n in the sorted list.

  Args:
    batch_size: batch size for the `input_graphs`.
    input_graphs: a `graphs.GraphsTuple` which contains a batch of inputs.

  Returns:
    A `graphs.GraphsTuple` with the targets, which encode the linked list.
  """
    target_graphs = []
    for i in range(batch_size):
        input_graph = utils_tf.get_graph(input_graphs, i)
        num_elements = tf.shape(input_graph.nodes)[0]
        si = tf.cast(tf.squeeze(input_graph.nodes), tf.int32)
        nodes = tf.reshape(tf.one_hot(si[:1], num_elements), (-1, 1))
        x = tf.stack((si[:-1], si[1:]))[None]
        y = tf.stack((input_graph.senders, input_graph.receivers),
                     axis=1)[:, :, None]
        edges = tf.reshape(
            tf.cast(
                tf.reduce_any(tf.reduce_all(tf.equal(x, y), axis=1), axis=1),
                tf.float32), (-1, 1))
        target_graphs.append(input_graph._replace(nodes=nodes, edges=edges))
    return utils_tf.concat(target_graphs, axis=0)
Example #11
0
def test_batch_reshape():
    data = dict(nodes=tf.reshape(tf.range(3 * 2), (3, 2)),
                edges=tf.reshape(tf.range(40 * 5), (40, 5)),
                senders=tf.random.uniform((40, ),
                                          minval=0,
                                          maxval=3,
                                          dtype=tf.int32),
                receivers=tf.random.uniform((40, ),
                                            minval=0,
                                            maxval=3,
                                            dtype=tf.int32),
                n_node=tf.constant([3]),
                n_edge=tf.constant([40]),
                globals=None)
    graph = GraphsTuple(**data)
    graphs = utils_tf.concat([graph] * 4, axis=0)
    batched_graphs = graph_batch_reshape(graphs)
    assert tf.reduce_all(
        batched_graphs.nodes[0] == batched_graphs.nodes[1]).numpy()
    assert tf.reduce_all(
        batched_graphs.edges[0] == batched_graphs.edges[1]).numpy()
    assert tf.reduce_all(
        batched_graphs.senders[0] +
        graphs.n_node[0] == batched_graphs.senders[1]).numpy()
    assert tf.reduce_all(
        batched_graphs.receivers[0] +
        graphs.n_node[0] == batched_graphs.receivers[1]).numpy()

    # print(batched_graphs)
    unbatched_graphs = graph_unbatch_reshape(batched_graphs)
    for (t1, t2) in zip(graphs, unbatched_graphs):
        if t1 is not None:
            assert tf.reduce_all(t1 == t2).numpy()
Example #12
0
    def _build(self, input_op, num_processing_steps):
        fea1 = input_op.nodes[:, 0]
        fea2 = input_op.globals[:, 0]
        fea3 = input_op.edges[:, 0]
        embed1 = self._embed_mod_symptomNode(fea1)  # node
        embed2 = self._embed_mod_global(fea2)  # global
        embed3 = self._embed_mod_edge(fea3)  # edge

        embed1 = tf.cast(embed1, tf.float64)
        embed2 = tf.cast(embed2, tf.float64)
        embed3 = tf.cast(embed3, tf.float64)
        input_op = input_op.replace(nodes=embed1, globals=embed2,
                                    edges=embed3)  # node [? 256] edge [?,]

        #
        latent = self._encoder(
            input_op)  # graph_out=GN(graph_in)  latent  [? 256]
        latent0 = latent  #[?node_n,hid16] node[? 16] edge [? 16]
        output_ops = []
        for _ in range(num_processing_steps):  # how many cores GN block
            core_input = utils_tf.concat(
                [latent0, latent], axis=1)  # node 256 -> 512 ,edge 25->50
            latent = self._core(core_input)
            decoded_op = self._decoder(latent)
            output_ops.append(self._output_transform(decoded_op))
        return output_ops
Example #13
0
 def _build(self, input_op):
     # Initial step gets encoder output as input.
     latents = [self._encoder(input_op)]
     # Each sucsessive step gets all previous steps' outputs as input.
     for step, core in enumerate(self._cores):
         latents.append(core(utils_tf.concat(latents, axis=1)))
     # Return a single list of one output: decode of final core's output.
     return [self._output_transform(self._decoder(latents[-1]))]
Example #14
0
 def _build(self, input_op, num_processing_steps):
     latent = self._encoder(input_op)
     latent0 = latent
     output_ops = []
     for _ in range(num_processing_steps):
         core_input = utils_tf.concat([latent0, latent], axis=1)
         latent = self._core(core_input)
         if self._skip_encoder_decoder:
             decoder_input = utils_tf.concat([latent0, latent], axis=1)
         else:
             decoder_input = latent
         if self._edge_type_concat:
             decoder_input = decoder_input._replace(edges=tf.concat(
                 [input_op.edges[:, :8], decoder_input.edges], axis=1))
         decoded_op = self._decoder(decoder_input)
         output_ops.append(self._output_transform(decoded_op))
     return output_ops
Example #15
0
 def _build(self, input_op):
     latent = self._encoder(input_op)
     output_ops = [self._decoder(latent)]  # K = 0
     for i in range(self._num_processing_steps):
         latent = self._core(latent)
         decoded_op = self._decoder(latent)
         output_ops.append(decoded_op)  # K = 1, 2, 3, ...
     return self._output_transform(utils_tf.concat(output_ops, axis=1))
Example #16
0
    def test_nested_features(self):
        graph_0 = utils_np.networkxs_to_graphs_tuple(
            [_generate_graph(0, 3),
             _generate_graph(1, 2)])
        graph_1 = utils_np.networkxs_to_graphs_tuple([_generate_graph(2, 2)])
        graph_2 = utils_np.networkxs_to_graphs_tuple([_generate_graph(3, 3)])
        graphs_ = [
            gr.map(tf.convert_to_tensor, graphs.ALL_FIELDS)
            for gr in [graph_0, graph_1, graph_2]
        ]

        def _create_nested_fields(graphs_tuple):
            new_nodes = ({
                "a": graphs_tuple.nodes,
                "b": [graphs_tuple.nodes + 1, graphs_tuple.nodes + 2]
            }, )

            new_edges = [{
                "c": graphs_tuple.edges + 5,
                "d": (graphs_tuple.edges + 1, graphs_tuple.edges + 3),
            }]
            new_globals = []

            return graphs_tuple.replace(nodes=new_nodes,
                                        edges=new_edges,
                                        globals=new_globals)

        graphs_ = [_create_nested_fields(gr) for gr in graphs_]
        concat_graph = utils_tf.concat(graphs_, axis=0)

        actual_nodes = concat_graph.nodes
        actual_edges = concat_graph.edges
        actual_globals = concat_graph.globals

        expected_nodes = tree.map_structure(lambda *x: tf.concat(x, axis=0),
                                            *[gr.nodes for gr in graphs_])
        expected_edges = tree.map_structure(lambda *x: tf.concat(x, axis=0),
                                            *[gr.edges for gr in graphs_])
        expected_globals = tree.map_structure(lambda *x: tf.concat(x, axis=0),
                                              *[gr.globals for gr in graphs_])

        tree.assert_same_structure(expected_nodes, actual_nodes)
        tree.assert_same_structure(expected_edges, actual_edges)
        tree.assert_same_structure(expected_globals, actual_globals)

        tree.map_structure(self.assertAllEqual, expected_nodes, actual_nodes)
        tree.map_structure(self.assertAllEqual, expected_edges, actual_edges)
        tree.map_structure(self.assertAllEqual, expected_globals,
                           actual_globals)

        # Borrowed from `test_concat_first_axis`:
        self.assertAllEqual(np.array([3, 2, 2, 3]), concat_graph.n_node)
        self.assertAllEqual(np.array([2, 1, 1, 2]), concat_graph.n_edge)
        self.assertAllEqual(np.array([1, 2, 4, 6, 8, 9]), concat_graph.senders)
        self.assertAllEqual(np.array([0, 0, 3, 5, 7, 7]),
                            concat_graph.receivers)
Example #17
0
 def _build(self, input_op):
     latent = self._encoder(input_op)  # latent size = 16
     output_ops = [self._decoder(latent)]  # K = 0 data
     for i in range(self._num_processing_steps):
         for c in self._cores:
             latent = c(latent)
         decoded_op = self._decoder(latent)
         output_ops.append(decoded_op)  # K = 1, 2, 3... data
     return self._output_transform(utils_tf.concat(
         output_ops, axis=1))  # K * 16 for every node
Example #18
0
    def __call__(self, input_op, num_processing_steps):
        latent = self._edge_block(self._node_encoder_block(input_op))
        latent0 = latent

        output_ops = []
        for _ in range(num_processing_steps):
            core_input = utils_tf.concat([latent0, latent], axis=1)
            latent = self._core(core_input)
            output_ops.append(self._output_transform(latent))
        return output_ops
Example #19
0
 def _build(self, input_op, num_processing_steps):
     latent = self._encoder(input_op)
     latent0 = latent
     output_ops = []
     for _ in range(num_processing_steps):
         core_input = utils_tf.concat([latent0, latent], axis=1)
         latent = self._core(core_input)
         decoded_op = self._decoder(latent)
         output_ops.append(self._output_transform(decoded_op))
     return output_ops
Example #20
0
def data_dicts_to_graphs_tuple(input_dd, target_dd, with_batch_dim=True):
    # if type(input_dd) is not list:
    #     input_dd = [input_dd]
    # if type(target_dd) is not list:
    #     target_dd = [target_dd]
        
    # input_graphs = utils_tf.data_dicts_to_graphs_tuple(input_dd)
    # target_graphs = utils_tf.data_dicts_to_graphs_tuple(target_dd)
    input_graphs = utils_tf.concat(input_dd, axis=0)
    target_graphs = utils_tf.concat(target_dd, axis=0)
    # # fill zeros
    # input_graphs = utils_tf.set_zero_global_features(input_graphs, 1, dtype=tf.float64)
    # target_graphs = utils_tf.set_zero_global_features(target_graphs, 1, dtype=tf.float64)
    # target_graphs = utils_tf.set_zero_node_features(target_graphs, 1, dtype=tf.float64)
    
    # expand dims
    if with_batch_dim:
        input_graphs = add_batch_dim(input_graphs)
        target_graphs = add_batch_dim(target_graphs)
    return input_graphs, target_graphs
Example #21
0
    def _build(self, input_op, num_processing_steps, is_training, sess):
        print("EncodeProcessDecode is running in mode: full global data")
        latent = self._encoder(input_op)

        latent0 = latent
        output_ops = []
        for _ in range(num_processing_steps):
            core_input = utils_tf.concat([latent0, latent], axis=1)
            latent = self._core(core_input)
            decoded_op = self._decoder(latent)#, is_training)
            #output_ops.append(self._output_transform(decoded_op))
            output_ops.append(decoded_op)

        return output_ops
Example #22
0
    def __call__(self, input_op, num_processing_steps, is_training=True):
        latent = self._global_block(
            self._edge_block(self._node_encoder_block(input_op)))
        latent0 = latent

        output_ops = []
        for _ in range(num_processing_steps):
            core_input = utils_tf.concat([latent0, latent], axis=1)
            latent = self._core(core_input)

            output = latent.replace(
                globals=self._global_nn(latent.globals, is_training))
            output_ops.append(output)

        return output_ops
Example #23
0
 def __call__(self, input_op, num_processing_steps):
     latent = self._encoder(input_op)
     ws = self.trainable_variables
     latent0 = latent
     output_ops = []
     for _ in range(num_processing_steps):
         core_input = utils_tf.concat([latent0, latent], axis=1)
         latent = self._core(core_input)
         ws = self.trainable_variables
         decoded_op = self._decoder(latent)
         ws = self.trainable_variables
         output_ops.append(self._output_transform(decoded_op))
         ###
         ws = self.trainable_variables
     return output_ops
Example #24
0
 def _build(self, input_op, num_processing_steps):
     """
     Feed the input through the GraphAttention network
     :param input_op: The graph_nets graph input of the network
     :param num_processing_steps: The number of feeding forward on the data
     :return: The output graphs calculated
     """
     latent = self._encoder(input_op)
     latent0 = latent
     output_ops = []
     for _ in range(num_processing_steps):
         core_input = utils_tf.concat([latent0, latent], axis=1)
         latent = self._network(core_input)
         decoded_op = self._output_transform(latent)
         output_ops.append(decoded_op)
     return output_ops
Example #25
0
    def __call__(self, input_op, num_processing_steps, is_training=True):
        node_kwargs = edge_kwargs = dict(is_training=is_training)

        latent = self._edge_block(
            self._node_encoder_block(input_op, node_kwargs), edge_kwargs)
        latent0 = latent

        output_ops = []
        for _ in range(num_processing_steps):
            core_input = utils_tf.concat([latent0, latent], axis=1)
            latent = self._core(core_input,
                                edge_model_kwargs=edge_kwargs,
                                node_model_kwargs=node_kwargs)

            output_ops.append(self._output_transform(latent))

        return output_ops
 def test_concat_last_axis(self):
   graph0 = utils_np.networkxs_to_graphs_tuple(
       [_generate_graph(0, 3), _generate_graph(1, 2)])
   graph1 = utils_np.networkxs_to_graphs_tuple(
       [_generate_graph(2, 3), _generate_graph(3, 2)])
   graph0 = graph0.map(tf.convert_to_tensor, graphs.ALL_FIELDS)
   graph1 = graph1.map(tf.convert_to_tensor, graphs.ALL_FIELDS)
   concat_graph = utils_tf.concat([graph0, graph1], axis=-1)
   self.assertAllEqual(
       np.array([[0, 0, 0, 2], [1, 0, 1, 2], [2, 0, 2, 2], [0, 1, 0, 3],
                 [1, 1, 1, 3]]), concat_graph.nodes)
   self.assertAllEqual(
       np.array([[0, 1, 0, 0, 1, 2], [1, 2, 0, 1, 2, 2], [0, 1, 1, 0, 1, 3]]),
       concat_graph.edges)
   self.assertAllEqual(np.array([3, 2]), concat_graph.n_node)
   self.assertAllEqual(np.array([2, 1]), concat_graph.n_edge)
   self.assertAllEqual(np.array([1, 2, 4]), concat_graph.senders)
   self.assertAllEqual(np.array([0, 0, 3]), concat_graph.receivers)
   self.assertAllEqual(np.array([[0, 2], [1, 3]]), concat_graph.globals)
 def test_concat_first_axis(self, none_fields):
     graph_0 = utils_np.networkxs_to_graphs_tuple(
         [_generate_graph(0, 3),
          _generate_graph(1, 2)])
     graph_1 = utils_np.networkxs_to_graphs_tuple([_generate_graph(2, 2)])
     graph_2 = utils_np.networkxs_to_graphs_tuple([_generate_graph(3, 3)])
     graphs_ = [
         gr.map(tf.convert_to_tensor, graphs.ALL_FIELDS)
         for gr in [graph_0, graph_1, graph_2]
     ]
     graphs_ = [gr.map(lambda _: None, none_fields) for gr in graphs_]
     concat_graph = utils_tf.concat(graphs_, axis=0)
     for none_field in none_fields:
         self.assertEqual(None, getattr(concat_graph, none_field))
     concat_graph = concat_graph.map(tf.no_op, none_fields)
     with self.test_session() as sess:
         concat_graph = sess.run(concat_graph)
     if "nodes" not in none_fields:
         self.assertAllEqual(np.array([0, 1, 2, 0, 1, 0, 1, 0, 1, 2]),
                             [x[0] for x in concat_graph.nodes])
         self.assertAllEqual(np.array([0, 0, 0, 1, 1, 2, 2, 3, 3, 3]),
                             [x[1] for x in concat_graph.nodes])
     if "edges" not in none_fields:
         self.assertAllEqual(np.array([0, 1, 0, 0, 0, 1]),
                             [x[0] for x in concat_graph.edges])
         self.assertAllEqual(np.array([0, 0, 1, 2, 3, 3]),
                             [x[2] for x in concat_graph.edges])
     self.assertAllEqual(np.array([3, 2, 2, 3]), concat_graph.n_node)
     self.assertAllEqual(np.array([2, 1, 1, 2]), concat_graph.n_edge)
     if "senders" not in none_fields:
         # [1, 2], [1], [1], [1, 2] and 3, 2, 2, 3 nodes
         # So we are summing [1, 2, 1, 1, 2] with [0, 0, 3, 5, 7, 7]
         self.assertAllEqual(np.array([1, 2, 4, 6, 8, 9]),
                             concat_graph.senders)
     if "receivers" not in none_fields:
         # [0, 0], [0], [0], [0, 0] and 3, 2, 2, 3 nodes
         # So we are summing [0, 0, 0, 0, 0, 0] with [0, 0, 3, 5, 7, 7]
         self.assertAllEqual(np.array([0, 0, 3, 5, 7, 7]),
                             concat_graph.receivers)
     if "globals" not in none_fields:
         self.assertAllEqual(np.array([[0], [1], [2], [3]]),
                             concat_graph.globals)
Example #28
0
def _concat_batch_dim(G):
    """
    G is a GraphNtuple Tensor, with additional dimension for batch-size.
    Concatenate them along the axis for batch
    """
    input_graphs = []
    for ibatch in [0, 1]:
        data_dict = {
            "nodes": G.nodes[ibatch],
            "edges": G.edges[ibatch],
            "receivers": G.receivers[ibatch],
            'senders': G.senders[ibatch],
            'globals': G.globals[ibatch],
            'n_node': G.n_node[ibatch],
            'n_edge': G.n_edge[ibatch],
        }
        input_graphs.append(graphs.GraphsTuple(**data_dict))
        return (tf.add(ibatch, 1), input_graphs)
    print("{} graphs".format(len(input_graphs)))
    return utils_tf.concat(input_graphs, axis=0)
Example #29
0
  def _build(self, graphs_tuple):
    """Connects the model into the tensorflow graph.

    Args:
      graphs_tuple: input graph tensor as defined in `graphs_tuple.graphs`.

    Returns:
      tensor with shape [n_particles] containing the predicted particle
      mobilities.
    """
    encoded = self._encoder(graphs_tuple)
    outputs = encoded

    for _ in range(self._n_recurrences):
      # Adds skip connections.
      inputs = utils_tf.concat([outputs, encoded], axis=-1)
      outputs = self._propagation_network(inputs)

    decoded = self._decoder(outputs)
    return tf.squeeze(decoded.globals, axis=-1)
Example #30
0
    def __call__(self, input_op, num_processing_steps):
        ###node
        node = input_op.nodes[:, 0]  #[n,1]
        edge = input_op.edges[:, 0]  #[n,1]
        globals = input_op.globals
        node_f = self._embed(node)
        edge_f = self._embed_edge(edge)
        g_f = self._embed_global(globals)
        input_op = input_op.replace(nodes=node_f, edges=edge_f, globals=g_f)

        ### edge
        latent = self._encoder(input_op)

        latent0 = latent
        output_ops = []
        for _ in range(num_processing_steps):
            core_input = utils_tf.concat([latent0, latent], axis=1)
            latent = self._core(core_input)
            decoded_op = self._decoder(latent)
            output_ops.append(self._output_transform(decoded_op))
        return output_ops
Example #31
0
def padding(g, max_nodes, max_edges, do_concat=True):
    f_dtype = np.float32
    n_nodes = np.sum(g.n_node)
    n_edges = np.sum(g.n_edge)
    n_nodes_pad = max_nodes - n_nodes
    n_edges_pad = max_edges - n_edges

    if n_nodes_pad < 0:
        raise ValueError("Max Nodes: {}, but {} nodes in graph".format(max_nodes, n_nodes))

    if n_edges_pad < 0:
        raise ValueError("Max Edges: {}, but {} edges in graph".format(max_edges, n_edges))

    # padding edges all pointing to the last node
    # TODO: make the graphs more general <xju>
    edges_idx = tf.constant([0] * n_edges_pad, dtype=np.int32)
    # print(edges_idx)
    zeros = np.array([0.0], dtype=f_dtype)
    n_node_features = g.nodes.shape[-1]
    n_edge_features = g.edges.shape[-1]
    # print("input graph global: ", g.globals.shape)
    # print("zeros: ", np.zeros_like(g.globals.numpy()))
    # print("input edges", n_edges, "padding edges:", n_edges_pad)

    padding_datadict = {
        "n_node": n_nodes_pad,
        "n_edge": n_edges_pad,
        "nodes": np.zeros((n_nodes_pad, n_node_features), dtype=f_dtype),
        'edges': np.zeros((n_edges_pad, n_edge_features), dtype=f_dtype),
        'receivers': edges_idx,
        'senders': edges_idx,
        'globals':zeros
    }
    padding_graph = utils_tf.data_dicts_to_graphs_tuple([padding_datadict])
    if do_concat:
        return utils_tf.concat([g, padding_graph], axis=0)
    else:
        return padding_graph