Esempio n. 1
0
def create_data(batch_size, num_elements_min_max):
    """Returns graphs containing the inputs and targets for classification.

  Refer to create_data_dicts_tf and create_linked_list_target for more details.

  Args:
    batch_size: batch size for the `input_graphs`.
    num_elements_min_max: a 2-`tuple` of `int`s which define the [lower, upper)
      range of the number of elements per list.

  Returns:
    inputs: a `graphs.GraphsTuple` which contains the input list as a graph.
    targets: a `graphs.GraphsTuple` which contains the target as a graph.
    sort_indices: a `graphs.GraphsTuple` which contains the sort indices of
      the list elements a graph.
    ranks: a `graphs.GraphsTuple` which contains the ranks of the list
      elements as a graph.
  """
    inputs, sort_indices, ranks = create_graph_dicts_tf(
        batch_size, num_elements_min_max)
    inputs = utils_tf.data_dicts_to_graphs_tuple(inputs)
    sort_indices = utils_tf.data_dicts_to_graphs_tuple(sort_indices)
    ranks = utils_tf.data_dicts_to_graphs_tuple(ranks)

    inputs = utils_tf.fully_connect_graph_dynamic(inputs)
    sort_indices = utils_tf.fully_connect_graph_dynamic(sort_indices)
    ranks = utils_tf.fully_connect_graph_dynamic(ranks)

    targets = create_linked_list_target(batch_size, sort_indices)
    nodes = tf.concat((targets.nodes, 1.0 - targets.nodes), axis=1)
    edges = tf.concat((targets.edges, 1.0 - targets.edges), axis=1)
    targets = targets._replace(nodes=nodes, edges=edges)

    return inputs, targets, sort_indices, ranks  # input node[7,1] edge  target edge[49,2] node[7,2]
Esempio n. 2
0
 def object_encoder_graph(self, encodings_per_object):
     """Function to make a graph object from the object oriented encoding
     Inputs:
         encodings_per_object: tensor containing the embeddings per object slot (size : [batch, n_objects, embedding dim])
     Outputs:
         graph_representation: fully conected graph with as node attributes the object encodings (size : [batch, graph])
     """
     # Specify the number of nodes and edges implied by to the passed n_objects and the encodigs for each object
     n_nodes = tf.tile(tf.constant([self.n_objects]),
                       tf.shape(encodings_per_object)[0:1],
                       name='n_nodes')
     n_edges = tf.tile(tf.constant([0]),
                       tf.shape(encodings_per_object)[0:1],
                       name='n_edges')
     # put the node_attributes in the correct shape to make graph object:
     node_attributes = tf.reshape(encodings_per_object, [
         tf.shape(encodings_per_object)[0] *
         tf.shape(encodings_per_object)[1], self.state_dim_embedding
     ])
     # make graph object with specified node attributes:
     graph = graphs.GraphsTuple(nodes=node_attributes,
                                edges=None,
                                globals=None,
                                receivers=None,
                                senders=None,
                                n_node=n_nodes,
                                n_edge=n_edges)
     # Connect all node to the other nodes (i.e. make fully connected):
     fully_connected_graph = utils_tf.fully_connect_graph_dynamic(
         graph, exclude_self_edges=False)
     # Make it runnable in TF because None's are used:
     runnable_fc_graph = utils_tf.make_runnable_in_session(
         fully_connected_graph)
     return runnable_fc_graph
Esempio n. 3
0
    def _build(self, batch, *args, **kwargs):
        (graph, img, c) = batch
        del c
        # The encoded cluster graph has globals which can be compared against the encoded image graph
        encoded_graph = self.epd_graph(graph, self._core_steps)

        # Add an extra dimension to the image (tf.summary expects a Tensor of rank 4)
        img = img[None, ...]
        im_before_cnn = (img - tf.reduce_min(img)) / (tf.reduce_max(img) - tf.reduce_min(img))
        tf.summary.image(f'img_before_cnn', im_before_cnn, step=self.step)

        img = self.auto_encoder.encoder(img)

        # Prevent the autoencoder from learning
        try:
            for variable in self.auto_encoder.encoder.trainable_variables:
                variable._trainable = False
            for variable in self.auto_encoder.decoder.trainable_variables:
                variable._trainable = False
        except:
            pass

        img_after_autoencoder = (img - tf.reduce_min(img)) / (tf.reduce_max(img) - tf.reduce_min(img))
        tf.summary.image(f'img_after_autoencoder', tf.transpose(img_after_autoencoder, [3, 1, 2, 0]), step=self.step)

        decoded_img = self.auto_encoder.decoder(img)
        decoded_img = (decoded_img - tf.reduce_min(decoded_img)) / (tf.reduce_max(decoded_img) - tf.reduce_min(decoded_img))
        tf.summary.image(f'decoded_img', decoded_img, step=self.step)

        # Reshape the encoded image so it can be used for the nodes
        #1, w,h,c -> w*h, c
        nodes = tf.reshape(img, (-1,self.image_feature_size))

        # Create a graph that has a node for every encoded pixel. The features of each node
        # are the channels of the corresponding pixel. Then connect each node with every other
        # node.
        img_graph = GraphsTuple(nodes=nodes,
                            edges=None,
                            globals=None,
                            receivers=None,
                            senders=None,
                            n_node=tf.shape(nodes)[0:1],
                            n_edge=tf.constant([0]))
        connected_graph = fully_connect_graph_dynamic(img_graph)

        # The encoded image graph has globals which can be compared against the encoded cluster graph
        encoded_img = self.epd_image(connected_graph, 1)

        # Compare the globals from the encoded cluster graph and encoded image graph
        # to estimate the similarity between the input graph and input image
        distance = self.compare(tf.concat([encoded_graph.globals, encoded_img.globals], axis=1)) + self.compare(
            tf.concat([encoded_img.globals, encoded_graph.globals], axis=1))

        return distance
def get_batched_graphs (train_set):
    """
    description: converts inputs in each batch to complete graphs
    :param train_set: training set containing tuples (batch_input , batch_target)
    :return:
    """
    for batch_input , batch_target  in train_set:
        input_dict = create_graph_dicts(batch_input)
        targets = batch_target

        input_dict = utils_tf.data_dicts_to_graphs_tuple(input_dict)
        input_dict = utils_tf.fully_connect_graph_dynamic(input_dict)

        yield input_dict , targets
Esempio n. 5
0
 def sample_decoder(self, positions, logits, temperature):
     token_distribution = tfp.distributions.RelaxedOneHotCategorical(temperature, logits=logits)
     token_samples_onehot = token_distribution.sample((1,),
                                                      name='token_samples')
     token_sample_onehot = token_samples_onehot[0]  # [n_node, num_embedding]
     token_sample = tf.matmul(token_sample_onehot, self.embeddings)  # [n_node, embedding_dim]
     n_node = tf.shape(token_sample)[0]
     latent_graph = GraphsTuple(nodes=token_sample,
                                edges=None,
                                globals=tf.constant([0.], dtype=tf.float32),
                                senders=None,
                                receivers=None,
                                n_node=tf.constant([n_node], dtype=tf.int32),
                                n_edge=tf.constant([0], dtype=tf.int32))  # [n_node, embedding_dim]
     latent_graph = fully_connect_graph_dynamic(latent_graph)
     gaussian_tokens = self.decoder(latent_graph)  # nodes=[num_gaussian_components, component_dim]
     reconstructed_fields = reconstruct_fields_from_gaussians(gaussian_tokens, positions)
     return reconstructed_fields
        def _single_decode(token_sample_onehot):
            """

            Args:
                token_sample: [n_node, embedding_dim]

            Returns:
                log_likelihood: scalar
                kl_term: scalar
            """
            token_sample = tf.matmul(
                token_sample_onehot,
                self.embeddings)  # [n_node, embedding_dim]  # = z ~ q(z|x)
            latent_graph = GraphsTuple(
                nodes=token_sample,
                edges=None,
                globals=tf.constant([0.], dtype=tf.float32),
                senders=None,
                receivers=None,
                n_node=encoded_graph.n_node,
                n_edge=tf.constant([0],
                                   dtype=tf.int32))  # [n_node, embedding_dim]
            latent_graph = fully_connect_graph_dynamic(latent_graph)
            # print('\n latent_graph', latent_graph, '\n')
            gaussian_tokens = self.decoder(
                latent_graph)  # nodes=[num_gaussian_components, component_dim]
            # print('\n gaussian_tokens_nodes', gaussian_tokens.nodes, '\n')
            _, log_likelihood = gaussian_loss_function(gaussian_tokens.nodes,
                                                       graph)
            # [n_node, num_embeddings].[n_node, num_embeddings]
            sum_selected_logits = tf.math.reduce_sum(token_sample_onehot *
                                                     logits,
                                                     axis=1)  # [n_node]
            # print('sum', sum_selected_logits)
            # print('norm', log_norm)
            # print('num_embed', tf.cast(self.num_embedding, tf.float32))
            # print('embed', tf.math.log(tf.cast(self.num_embedding, tf.float32)))
            kl_term = sum_selected_logits - self.num_embedding * log_norm + \
                      self.num_embedding * tf.math.log(tf.cast(self.num_embedding, tf.float32))
            # print('kl_term 0', kl_term)
            # print('kl_term', tf.reduce_mean(kl_term))
            kl_term = self.beta * tf.reduce_mean(kl_term)
            return log_likelihood, kl_term
    def test_fully_connect_graph_dynamic_with_dynamic_sizes(
            self, exclude_self_edges):
        for g in self.graphs_dicts_in:
            g.pop("edges")
            g.pop("receivers")
            g.pop("senders")
        n_relation = 0
        for g in self.graphs_dicts_in:
            n_node = g["nodes"].shape[0]
            if exclude_self_edges:
                n_relation += n_node * (n_node - 1)
            else:
                n_relation += n_node * n_node

        graphs_tuple = utils_tf.data_dicts_to_graphs_tuple(
            self.graphs_dicts_in)
        graphs_tuple = graphs_tuple.map(
            test_utils.mask_leading_dimension,
            ["nodes", "globals", "n_node", "n_edge"])
        graphs_tuple = utils_tf.fully_connect_graph_dynamic(
            graphs_tuple, exclude_self_edges)
        with self.test_session() as sess:
            actual_receivers, actual_senders, actual_n_edge = sess.run([
                graphs_tuple.receivers, graphs_tuple.senders,
                graphs_tuple.n_edge
            ])
        self.assertAllEqual((n_relation, ), actual_receivers.shape)
        self.assertAllEqual((n_relation, ), actual_senders.shape)
        self.assertAllEqual((len(self.graphs_dicts_in), ), actual_n_edge.shape)
        expected_edges = []
        offset = 0
        for graph in self.graphs_dicts_in:
            n_node = graph["nodes"].shape[0]
            for e1 in range(n_node):
                for e2 in range(n_node):
                    if not exclude_self_edges or e1 != e2:
                        expected_edges.append((e1 + offset, e2 + offset))
            offset += n_node
        actual_edges = zip(actual_receivers, actual_senders)
        self.assertSetEqual(set(actual_edges), set(expected_edges))
Esempio n. 8
0
  def test_fully_connect_graph_dynamic(self, exclude_self_edges):
    for g in self.graphs_dicts_in:
      g.pop("edges")
      g.pop("receivers")
      g.pop("senders")
    n_relation = 0
    for g in self.graphs_dicts_in:
      n_node = g["nodes"].shape[0]
      if exclude_self_edges:
        n_relation += n_node * (n_node - 1)
      else:
        n_relation += n_node * n_node

    graphs_tuple = utils_tf.data_dicts_to_graphs_tuple(self.graphs_dicts_in)
    graphs_tuple = utils_tf.fully_connect_graph_dynamic(graphs_tuple,
                                                        exclude_self_edges)
    with self.test_session() as sess:
      actual_receivers, actual_senders = sess.run(
          [graphs_tuple.receivers, graphs_tuple.senders])
    self.assertAllEqual((n_relation,), actual_receivers.shape)
    self.assertAllEqual((n_relation,), actual_senders.shape)
    self.assertAllEqual((len(self.graphs_dicts_in),),
                        graphs_tuple.n_edge.get_shape().as_list())
Esempio n. 9
0
 def _build(self, batch, *args, **kwargs):
     (graph, img, c) = batch
     del c
     encoded_graph = self.encoder_graph(graph)
     tf.summary.image(f'img_before_cnn', img[None, ...], step=self.step)
     img = self.image_cnn(img[None, ...])
     for channel in range(img.shape[-1]):
         tf.summary.image(f'img_after_cnn[{channel}]',
                          img[..., channel:channel + 1],
                          step=self.step)
     #1, w,h,c -> w*h, c
     nodes = tf.reshape(img, (-1, self.image_feature_size))
     img_graph = GraphsTuple(nodes=nodes,
                             edges=None,
                             globals=None,
                             receivers=None,
                             senders=None,
                             n_node=tf.shape(nodes)[0:1],
                             n_edge=tf.constant([0]))
     connected_graph = fully_connect_graph_dynamic(img_graph)
     encoded_img = self.encoder_image(connected_graph)
     return self.compare(
         tf.concat([encoded_graph.globals, encoded_img.globals],
                   axis=1))  #[1]
Esempio n. 10
0
    def _build(self, graph):
        # give graph edges and new node dimension (linear transformation)
        graph = graph.replace(edges=tf.tile(self.intra_graph_edge_variable[None, :], [graph.n_edge[0], 1]))
        graph = self.projection_node_block(graph)  # [n_nodes, node_size]
        # print('graph 1', graph)
        n_node = tf.shape(graph.nodes)[0]
        graph.replace(n_node=n_node)
        # create fully connected output token nodes
        token_start_nodes = tf.tile(self.empty_node_variable[None, :], [self.num_output, 1])
        token_graph = GraphsTuple(nodes=token_start_nodes,
                                  edges=None,
                                  globals=tf.constant([0.], dtype=tf.float32),
                                  senders=None,
                                  receivers=None,
                                  n_node=tf.constant([self.num_output], dtype=tf.int32),
                                  n_edge=tf.constant([0], dtype=tf.int32))
        token_graph = fully_connect_graph_dynamic(token_graph)
        # print('\n token graph', token_graph, '\n')
        token_graph = token_graph.replace(
            edges=tf.tile(self.intra_token_graph_edge_variable[None, :], [token_graph.n_edge[0], 1]))
        concat_graph = concat([graph, token_graph], axis=0)  # n_node = [n_nodes, n_tokes]
        concat_graph = concat_graph.replace(n_node=tf.reduce_sum(concat_graph.n_node, keepdims=True),
                                            n_edge=tf.reduce_sum(concat_graph.n_edge,
                                                                 keepdims=True))  # n_node=[n_nodes+n_tokens]

        # add random edges between
        # choose random unique set of nodes in graph, choose random set of nodes in token_graph
        gumbel = -tf.math.log(-tf.math.log(tf.random.uniform((n_node,))))
        n_connect_edges = tf.cast(
            tf.multiply(tf.constant([self.inter_graph_connect_prob]), tf.cast(n_node, tf.float32)), tf.int32)
        _, graph_senders = tf.nn.top_k(gumbel, n_connect_edges[0])
        # print('graph_senders', graph_senders)
        token_graph_receivers = n_node + tf.random.uniform(shape=n_connect_edges, minval=0, maxval=self.num_output,
                                                           dtype=tf.int32)
        # print('token_graph_receivers', token_graph_receivers)
        senders = tf.concat([concat_graph.senders, graph_senders, token_graph_receivers],
                            axis=0)  # add bi-directional senders + receivers
        receivers = tf.concat([concat_graph.receivers, token_graph_receivers, graph_senders], axis=0)
        inter_edges = tf.tile(self.inter_graph_edge_variable[None, :],
                              tf.concat([2 * n_connect_edges, tf.constant([1], dtype=tf.int32)],
                                        axis=0))  # 200 = 10000(n_nodes) * 0.01 * 2
        edges = tf.concat([concat_graph.edges, inter_edges], axis=0)
        concat_graph = concat_graph.replace(senders=senders, receivers=receivers, edges=edges,
                                            n_edge=concat_graph.n_edge[0] + 2 * n_connect_edges[0],
                                            # concat_graph.n_edge[0] + 2 * n_connect_edges
                                            globals=self.starting_global_variable[None, :])
        # print('starting global', self.starting_global_variable[None, :])
        latent_graph = concat_graph

        print('concat_graph_nodes', self.name, concat_graph.nodes)
        for step in range(self.crossing_steps):  # this would be that theoretical crossing time for information through the graph
            input_nodes = latent_graph.nodes
            latent_graph = self.edge_block(latent_graph)
            latent_graph = self.node_block(latent_graph)
            latent_graph = self.global_block(latent_graph)
            latent_graph = latent_graph.replace(nodes=latent_graph.nodes + input_nodes)  # residual connections

        print('latent_graph_nodes', self.name, latent_graph.nodes)
        print('latent_graph_edges', self.name, latent_graph.edges)
        print('latent_graph_globals', self.name, latent_graph.globals)
        latent_graph = latent_graph.replace(nodes=latent_graph.nodes[n_node:],
                                            edges=None,
                                            receivers=None,
                                            senders=None,
                                            globals=None,
                                            n_node=tf.constant([self.num_output], dtype=tf.int32),
                                            n_edge=tf.constant(0, dtype=tf.int32))
        output_graph = self.output_projection_node_block(latent_graph)
        print('output_graph_nodes', self.name, output_graph.nodes)

        return output_graph
Esempio n. 11
0
def make_all_runnable_in_session(*args):
    """Lets an iterable of TF graphs be output from a session as NP graphs."""
    return [utils_tf.make_runnable_in_session(a) for a in args]


x = tf.placeholder(shape=[None, features.shape[1]], dtype=tf.float32)
y = tf.placeholder(shape=[None, y_train.shape[1]], dtype=tf.float32)
train_mask_holder = tf.placeholder(shape=[
    None,
], dtype=tf.int32)

# define a Graph object

input_graphs.append({"nodes": x})
input_graphs = utils_tf.data_dicts_to_graphs_tuple(input_graphs)
input_graphs = utils_tf.fully_connect_graph_dynamic(input_graphs)
adj = tf.constant(adj.toarray(), dtype=tf.float32)
model = GCN()
input_graph = utils_tf.get_graph(input_graphs, 0)

# the output_graph = make_linear_model()(input_graph)
# the output_graph.nodes = make_linear_model()(input_graph.nodes)

output_graph = model(input_graph)

# Make the graph can be ran in a session
output_graph, input_graph = make_all_runnable_in_session(
    output_graph, input_graph)

# Define the loss function
loss = tf.nn.softmax_cross_entropy_with_logits(
    def _build(self, batch, *args, **kwargs):
        (graph, img, c) = batch
        del c

        print(graph.nodes.shape)

        # The encoded cluster graph has globals which can be compared against the encoded image graph
        encoded_graph = self.epd_graph(graph, self._core_steps)

        # # Add an extra dimension to the image (tf.summary expects a Tensor of rank 4)
        # img = img[None, ...]

        print("IMG SHAPE:", img.shape)
        print("IMG MIN MAX:", tf.math.reduce_min(img), tf.math.reduce_max(img))

        img_before_cnn = (img - tf.reduce_min(img)) / \
                         (tf.reduce_max(img) - tf.reduce_min(img))
        tf.summary.image(f'img_before_cnn', img_before_cnn, step=self.step)

        # Smooth the image and use the encoder from the autoencoder to reduce the dimensionality of the image
        # The autoencoder was trained on images that were smoothed in the same way
        img = gaussian_filter2d(img, filter_shape=[6, 6])
        img = self.auto_encoder.encoder(img)

        # Prevent the autoencoder from learning
        try:
            for variable in self.auto_encoder.encoder.trainable_variables:
                variable._trainable = False
            for variable in self.auto_encoder.decoder.trainable_variables:
                variable._trainable = False
        except:
            pass

        print("IMG SHAPE AFTER CNN:", img.shape)
        print("IMG MIN MAX AFTER CNN:", tf.math.reduce_min(img),
              tf.math.reduce_max(img))

        img_after_cnn = (img - tf.reduce_min(img)) / \
                        (tf.reduce_max(img) - tf.reduce_min(img))
        tf.summary.image(
            f'quantized_img',
            img_after_cnn[:, :, :, np.random.randint(low=0, high=64)][:, :, :,
                                                                      None],
            step=self.step)

        decoded_img = self.auto_encoder.decoder(img)
        decoded_img = (decoded_img - tf.reduce_min(decoded_img)) / \
                      (tf.reduce_max(decoded_img) - tf.reduce_min(decoded_img))
        tf.summary.image(f'decoded_img', decoded_img, step=self.step)

        # Reshape the encoded image so it can be used for the nodes
        img_nodes = tf.reshape(img, (-1, self.image_feature_size))
        print(img_nodes.shape)

        # Create a graph that has a node for every encoded pixel. The features of each node
        # are the channels of the corresponding pixel. Then connect each node with every other
        # node.
        img_graph = GraphsTuple(nodes=img_nodes,
                                edges=None,
                                globals=None,
                                receivers=None,
                                senders=None,
                                n_node=tf.shape(img_nodes)[0:1],
                                n_edge=tf.constant([0]))
        connected_graph = fully_connect_graph_dynamic(img_graph)

        # The encoded image graph has globals which can be compared against the encoded cluster graph
        encoded_img = self.epd_image(connected_graph, 1)

        # Compare the globals from the encoded cluster graph and encoded image graph
        # to estimate the similarity between the input graph and input image
        print(encoded_img.globals.shape)
        print(encoded_graph.globals.shape)
        distance = self.compare(
            tf.concat([encoded_graph.globals, encoded_img.globals], axis=1)
        ) + self.compare(
            tf.concat([encoded_img.globals, encoded_graph.globals], axis=1))
        return distance