Exemplo n.º 1
0
    def _setup_ak(self, post, nn, n2):
        # This is the equivalent of CalculateAk in Fabber
        #
        # Some of this could probably be better done using linalg
        # operations but bear in mind this is one parameter only

        self.sigmaK = self.log_tf(tf.matrix_diag_part(post.cov)[:, self.idx], name="sigmak") # [W]
        self.wK = self.log_tf(post.mean[:, self.idx], name="wk") # [W]
        self.num_nn = self.log_tf(tf.sparse_reduce_sum(self.nn, axis=1), name="num_nn") # [W]

        # Sum over vertices of parameter variance multiplied by number of 
        # nearest neighbours for each vertex
        trace_term = self.log_tf(tf.reduce_sum(self.sigmaK * self.num_nn), name="trace") # [1]

        # Sum of nearest and next-nearest neighbour mean values
        self.sum_means_nn = self.log_tf(tf.reshape(tf.sparse_tensor_dense_matmul(self.nn, tf.reshape(self.wK, (-1, 1))), (-1,)), name="wksum") # [W]
        self.sum_means_n2 = self.log_tf(tf.reshape(tf.sparse_tensor_dense_matmul(self.n2, tf.reshape(self.wK, (-1, 1))), (-1,)), name="contrib8") # [W]
        
        # vertex parameter mean multipled by number of nearest neighbours
        wknn = self.log_tf(self.wK * self.num_nn, name="wknn") # [W]

        swk = self.log_tf(wknn - self.sum_means_nn, name="swk") # [W]

        term2 = self.log_tf(tf.reduce_sum(swk * self.wK), name="term2") # [1]

        gk = 1 / (0.5 * trace_term + 0.5 * term2 + 0.1)
        hk = tf.multiply(tf.to_float(self.nvertices), 0.5) + 1.0
        self.ak = self.log_tf(tf.identity(gk * hk, name="ak"))
Exemplo n.º 2
0
    def mean_log_pdf(self, samples):
        samples = tf.reshape(samples, (self.nvertices, -1)) # [W, N]
        self.num_nn = self.log_tf(tf.sparse_reduce_sum(self.nn, axis=1), name="num_nn") # [W]

        expanded_nn = tf.sparse_concat(2, [tf.sparse.reshape(self.nn, (self.nvertices, self.nvertices, 1))] * self.sample_size)
        xj = expanded_nn * tf.reshape(samples, (self.nvertices, 1, -1))
        #xi = tf.reshape(tf.sparse.to_dense(tf.sparse.reorder(self.nn)), (self.nvertices, self.nvertices, 1)) * tf.reshape(samples, (1, self.nvertices, -1))
        xi = expanded_nn * tf.reshape(samples, (1, self.nvertices, -1))
        #xi = tf.sparse.transpose(xj, perm=(1, 0, 2)) 
        neg_xi = tf.SparseTensor(xi.indices, -xi.values, dense_shape=xi.dense_shape )
        dx2 = tf.square(tf.sparse.add(xj, neg_xi), name="dx2")
        sdx = tf.sparse.reduce_sum(dx2, axis=0) # [W, N]
        term1 = tf.identity(0.5*self.logak, name="term1")
        term2 = tf.identity(-self.ak * sdx / 4, name="term2")
        log_pdf = term1 + term2  # [W, N]
        mean_log_pdf = tf.reshape(tf.reduce_mean(log_pdf, axis=-1), [self.nvertices]) # [W]
        return mean_log_pdf
Exemplo n.º 3
0
    def _post_setup(self):
        self.E_predict = _tf.reduce_sum([
            _tf.sparse_tensor_dense_matmul(self.atom_maps[t],
                                           self.ANNs[t].output)
            for t in self.atom_types
        ],
                                        axis=[0, 2],
                                        name="E_prediction")

        # Tensorflow operation to initialize the variables of the atomic
        # networks
        # self.init_vars = [a.init_vars for a in self.ANNs.itervalues()]

        self.num_atoms = _tf.reduce_sum([
            _tf.sparse_reduce_sum(self.atom_maps[t], axis=1)
            for t in self.atom_types
        ],
                                        axis=0,
                                        name="NumberOfAtoms")
        # Tensorflow operation that calculates the sum squared error per atom.
        # Note that the whole error per atom is squared.
        with _tf.name_scope("RMSE"):
            self.rmse_weights = _tf.placeholder(shape=(None, ),
                                                dtype=precision,
                                                name="weights")
            self.rmse = self.error_scaling * _tf.sqrt(
                _tf.reduce_mean(
                    (self.target - self.E_predict)**2 * self.rmse_weights))
            # self.rmse = self.error_scaling*_tf.sqrt(
            #    _tf.losses.mean_squared_error(self.target,
            #    self.E_predict, weights = 1.0/self.num_atoms**2))
            self.rmse_summ = _tf.summary.scalar("RMSE",
                                                self.rmse,
                                                family="performance")

        self.variables = _tf.get_collection(
            _tf.GraphKeys.MODEL_VARIABLES,
            scope=_tf.get_default_graph().get_name_scope())
        self.saver = _tf.train.Saver(self.variables,
                                     max_to_keep=None,
                                     save_relative_paths=True)
Exemplo n.º 4
0
    def mean_log_pdf(self, samples):
        r"""
        mean log PDF for the MRF spatial prior.

        This is calculating:

        :math:`\log P = \frac{1}{2} \log \phi - \frac{\phi}{2}\underline{x^T} D \underline{x}`
        """
        samples = tf.reshape(samples, (self.nvertices, -1)) # [W, N]
        self.num_nn = self.log_tf(tf.sparse_reduce_sum(self.nn, axis=1), name="num_nn") # [W]
        dx_diag = self.log_tf(tf.reshape(self.num_nn, (self.nvertices, 1)) * samples, name="dx_diag") # [W, N]
        dx_offdiag = self.log_tf(tf.sparse_tensor_dense_matmul(self.nn, samples), name="dx_offdiag") # [W, N]
        self.dx = self.log_tf(dx_diag - dx_offdiag, name="dx") # [W, N]
        self.xdx = self.log_tf(samples * self.dx, name="xdx") # [W, N]
        term1 = tf.identity(0.5*self.logak, name="term1")
        term2 = tf.identity(-0.5*self.ak*self.xdx, name="term2")
        log_pdf = term1 + term2  # [W, N]
        mean_log_pdf = tf.reshape(tf.reduce_mean(log_pdf, axis=-1), [self.nvertices]) # [W]

        # Gamma prior if we care
        #q1, q2 = 1, 100
        #mean_log_pdf += (q1-1) * self.logak - self.ak / q2

        return mean_log_pdf
Exemplo n.º 5
0
def sparse_message_pass(node_states,
                        adjacency_matrices,
                        num_edge_types,
                        hidden_size,
                        use_bias=True,
                        average_aggregation=False,
                        name="sparse_ggnn"):
    """One message-passing step for a GNN with a sparse adjacency matrix.

  Implements equation 2 (the message passing step) in
  [Li et al. 2015](https://arxiv.org/abs/1511.05493).

  N = The number of nodes in each batch.
  H = The size of the hidden states.
  T = The number of edge types.

  Args:
    node_states: Initial states of each node in the graph. Shape is [N, H].
    adjacency_matrices: Adjacency matrix of directed edges for each edge
      type. Shape is [N, N, T] (sparse tensor).
    num_edge_types: The number of edge types. T.
    hidden_size: The size of the hidden state. H.
    use_bias: Whether to use bias in the hidden layer.
    average_aggregation: How to aggregate the incoming node messages. If
      average_aggregation is true, the messages are averaged. If it is false,
      they are summed.
    name: (optional) The scope within which tf variables should be created.

  Returns:
    The result of one step of Gated Graph Neural Network (GGNN) message passing.
    Shape: [N, H]
  """
    n = tf.shape(node_states)[0]
    t = num_edge_types
    incoming_edges_per_type = tf.sparse_reduce_sum(adjacency_matrices, axis=1)

    # Convert the adjacency matrix into shape [T, N, N] - one [N, N] adjacency
    # matrix for each edge type. Since sparse tensor multiplication only supports
    # two-dimensional tensors, we actually convert the adjacency matrix into a
    # [T * N, N] tensor.
    adjacency_matrices = tf.sparse_transpose(adjacency_matrices, [2, 0, 1])
    adjacency_matrices = tf.sparse_reshape(adjacency_matrices, [t * n, n])

    # Multiply the adjacency matrix by the node states, producing a [T * N, H]
    # tensor. For each (edge type, node) pair, this tensor stores the sum of
    # the hidden states of the node's neighbors over incoming edges of that type.
    messages = tf.sparse_tensor_dense_matmul(adjacency_matrices, node_states)

    # Rearrange this tensor to have shape [N, T * H]. The incoming states of each
    # nodes neighbors are summed by edge type and then concatenated together into
    # a single T * H vector.
    messages = tf.reshape(messages, [t, n, hidden_size])
    messages = tf.transpose(messages, [1, 0, 2])
    messages = tf.reshape(messages, [n, t * hidden_size])

    # Run each of those T * H vectors through a linear layer that produces
    # a vector of size H. This process is equivalent to running each H-sized
    # vector through a separate linear layer for each edge type and then adding
    # the results together.
    #
    # Note that, earlier on, we added together all of the states of neighbors
    # that were connected by edges of the same edge type. Since addition and
    # multiplying by a linear layer are commutative, this process was equivalent
    # to running each incoming edge through a linear layer separately and then
    # adding everything at the end.
    with tf.variable_scope(name, default_name="sparse_ggnn"):
        final_node_states = common_layers.dense(messages,
                                                hidden_size,
                                                use_bias=False)

        # Multiply the bias by for each edge type by the number of incoming nodes
        # of that edge type.
        if use_bias:
            bias = tf.get_variable("bias",
                                   initializer=tf.zeros([t, hidden_size]))
            final_node_states += tf.matmul(incoming_edges_per_type, bias)

        if average_aggregation:
            incoming_edges = tf.reduce_sum(incoming_edges_per_type,
                                           -1,
                                           keepdims=True)
            incoming_edges = tf.tile(incoming_edges, [1, hidden_size])
            final_node_states /= incoming_edges + 1e-7

    return tf.reshape(final_node_states, [n, hidden_size])
Exemplo n.º 6
0
import tensorflow.compat.v1 as tf

tf.disable_v2_behavior()

sp = tf.SparseTensor(indices=[[0,0],[0,2],[1,1]], values=[1,1,1], dense_shape=[2,3])
reduce_sum_sp = [tf.sparse_reduce_sum(sp),
                tf.sparse_reduce_sum(sp, axis=1),
                tf.sparse_reduce_sum(sp, axis=1, keep_dims=True),
                tf.sparse_reduce_sum(sp, axis=0),
                tf.sparse_reduce_sum(sp, axis=0, keep_dims=True)
                ]
with tf.Session() as sess:
    print('***************************************')
    print(sess.run(reduce_sum_sp))