Exemple #1
0
 def CheckApproximation(self, a, u, s, v, full_matrices):
     if dtype_ in (np.float32, np.complex64):
         tol = 1e-5
     else:
         tol = 1e-14
     # Tests that a ~= u*diag(s)*transpose(v).
     batch_shape = a.shape[:-2]
     m = a.shape[-2]
     n = a.shape[-1]
     diag_s = tf.cast(tf.batch_matrix_diag(s), dtype=dtype_)
     if full_matrices:
         if m > n:
             zeros = tf.zeros(batch_shape + (m - n, n), dtype=dtype_)
             diag_s = tf.concat(a.ndim - 2, [diag_s, zeros])
         elif n > m:
             zeros = tf.zeros(batch_shape + (m, n - m), dtype=dtype_)
             diag_s = tf.concat(a.ndim - 1, [diag_s, zeros])
     a_recon = tf.batch_matmul(tf.cast(u, dtype=dtype_),
                               tf.cast(diag_s, dtype=dtype_))
     a_recon = tf.batch_matmul(a_recon,
                               tf.cast(v, dtype=dtype_),
                               adj_y=True)
     self.assertAllClose(np.real(a_recon.eval()),
                         np.real(a),
                         rtol=tol,
                         atol=tol)
     self.assertAllClose(np.imag(a_recon.eval()),
                         np.imag(a),
                         rtol=tol,
                         atol=tol)
  def Test(self):
    np.random.seed(1)
    n = shape_[-1]
    batch_shape = shape_[:-2]
    a = np.random.uniform(
        low=-1.0, high=1.0, size=n * n).reshape([n, n]).astype(dtype_)
    a += a.T
    a = np.tile(a, batch_shape + (1, 1))
    if dtype_ == np.float32:
      atol = 1e-4
    else:
      atol = 1e-12
    for compute_v in False, True:
      np_e, np_v = np.linalg.eig(a)
      with self.test_session():
        if compute_v:
          tf_e, tf_v = tf.self_adjoint_eig(tf.constant(a))

          # Check that V*diag(E)*V^T is close to A.
          a_ev = tf.batch_matmul(
              tf.batch_matmul(tf_v, tf.batch_matrix_diag(tf_e)),
              tf_v,
              adj_y=True)
          self.assertAllClose(a_ev.eval(), a, atol=atol)

          # Compare to numpy.linalg.eig.
          CompareEigenDecompositions(self, np_e, np_v, tf_e.eval(), tf_v.eval(),
                                     atol)
        else:
          tf_e = tf.self_adjoint_eigvals(tf.constant(a))
          self.assertAllClose(
              np.sort(np_e, -1), np.sort(tf_e.eval(), -1), atol=atol)
Exemple #3
0
    def build_predict(self, Xnew, full_cov=False):
        """
        The posterior variance of F is given by

            q(f) = N(f | K alpha + mean, [K^-1 + diag(lambda**2)]^-1)

        Here we project this to F*, the values of the GP at Xnew which is given
        by

           q(F*) = N ( F* | K_{*F} alpha + mean, K_{**} - K_{*f}[K_{ff} +
                                           diag(lambda**-2)]^-1 K_{f*} )

        """

        # compute kernel things
        Kx = self.kern.K(self.X, Xnew)
        K = self.kern.K(self.X)

        # predictive mean
        f_mean = tf.matmul(tf.transpose(Kx), self.q_alpha) + self.mean_function(Xnew)

        # predictive var
        A = K + tf.batch_matrix_diag(tf.transpose(1./tf.square(self.q_lambda)))
        L = tf.batch_cholesky(A)
        Kx_tiled = tf.tile(tf.expand_dims(Kx, 0), [self.num_latent, 1, 1])
        LiKx = tf.batch_matrix_triangular_solve(L, Kx_tiled)
        if full_cov:
            f_var = self.kern.K(Xnew) - tf.batch_matmul(LiKx, LiKx, adj_x=True)
        else:
            f_var = self.kern.Kdiag(Xnew) - tf.reduce_sum(tf.square(LiKx), 1)
        return f_mean, tf.transpose(f_var)
Exemple #4
0
 def testVector(self):
     with self.test_session(use_gpu=self._use_gpu):
         v = np.array([1.0, 2.0, 3.0])
         mat = np.diag(v)
         v_diag = tf.batch_matrix_diag(v)
         self.assertEqual((3, 3), v_diag.get_shape())
         self.assertAllEqual(v_diag.eval(), mat)
Exemple #5
0
 def testVector(self):
   with self.test_session(use_gpu=self._use_gpu):
     v = np.array([1.0, 2.0, 3.0])
     mat = np.diag(v)
     v_diag = tf.batch_matrix_diag(v)
     self.assertEqual((3, 3), v_diag.get_shape())
     self.assertAllEqual(v_diag.eval(), mat)
    def Test(self):
        np.random.seed(1)
        n = shape_[-1]
        batch_shape = shape_[:-2]
        a = np.random.uniform(low=-1.0, high=1.0,
                              size=n * n).reshape([n, n]).astype(dtype_)
        a += a.T
        a = np.tile(a, batch_shape + (1, 1))
        if dtype_ == np.float32:
            atol = 1e-4
        else:
            atol = 1e-12
        for compute_v in False, True:
            np_e, np_v = np.linalg.eig(a)
            with self.test_session():
                if compute_v:
                    tf_e, tf_v = tf.self_adjoint_eig(tf.constant(a))

                    # Check that V*diag(E)*V^T is close to A.
                    a_ev = tf.batch_matmul(tf.batch_matmul(
                        tf_v, tf.batch_matrix_diag(tf_e)),
                                           tf_v,
                                           adj_y=True)
                    self.assertAllClose(a_ev.eval(), a, atol=atol)

                    # Compare to numpy.linalg.eig.
                    CompareEigenDecompositions(self, np_e, np_v, tf_e.eval(),
                                               tf_v.eval(), atol)
                else:
                    tf_e = tf.self_adjoint_eigvals(tf.constant(a))
                    self.assertAllClose(np.sort(np_e, -1),
                                        np.sort(tf_e.eval(), -1),
                                        atol=atol)
Exemple #7
0
 def testGrad(self):
   shapes = ((3,), (7, 4))
   with self.test_session(use_gpu=self._use_gpu):
     for shape in shapes:
       x = tf.constant(np.random.rand(*shape), np.float32)
       y = tf.batch_matrix_diag(x)
       error = tf.test.compute_gradient_error(x, x.get_shape().as_list(),
                                              y, y.get_shape().as_list())
       self.assertLess(error, 1e-4)
Exemple #8
0
 def testGrad(self):
   shapes = ((3,), (7, 4))
   with self.test_session(use_gpu=self._use_gpu):
     for shape in shapes:
       x = tf.constant(np.random.rand(*shape), np.float32)
       y = tf.batch_matrix_diag(x)
       error = tf.test.compute_gradient_error(x, x.get_shape().as_list(),
                                              y, y.get_shape().as_list())
       self.assertLess(error, 1e-4)
 def _build_operator_and_mat(self, batch_shape, k, dtype=np.float64):
     # Build an identity matrix with right shape and dtype.
     # Build an operator that should act the same way.
     batch_shape = list(batch_shape)
     diag_shape = batch_shape + [k]
     matrix_shape = batch_shape + [k, k]
     diag = tf.ones(diag_shape, dtype=dtype)
     identity_matrix = tf.batch_matrix_diag(diag)
     operator = operator_pd_identity.OperatorPDIdentity(matrix_shape, dtype)
     return operator, identity_matrix.eval()
Exemple #10
0
    def testSample(self):
        mu = [-1.0, 1.0]
        diag = [1.0, 2.0]
        with self.test_session():
            dist = distributions.MultivariateNormalDiag(mu, diag)
            samps = dist.sample_n(1000, seed=0).eval()
            cov_mat = tf.batch_matrix_diag(diag).eval()**2

            self.assertAllClose(mu, samps.mean(axis=0), atol=0.1)
            self.assertAllClose(cov_mat, np.cov(samps.T), atol=0.1)
Exemple #11
0
 def _build_operator_and_mat(self, batch_shape, k, dtype=np.float64):
     # Build an identity matrix with right shape and dtype.
     # Build an operator that should act the same way.
     batch_shape = list(batch_shape)
     diag_shape = batch_shape + [k]
     matrix_shape = batch_shape + [k, k]
     diag = tf.ones(diag_shape, dtype=dtype)
     identity_matrix = tf.batch_matrix_diag(diag)
     operator = operator_pd_identity.OperatorPDIdentity(matrix_shape, dtype)
     return operator, identity_matrix.eval()
Exemple #12
0
  def testSample(self):
    mu = [-1.0, 1.0]
    diag = [1.0, 2.0]
    with self.test_session():
      dist = distributions.MultivariateNormalDiag(mu, diag)
      samps = dist.sample_n(1000, seed=0).eval()
      cov_mat = tf.batch_matrix_diag(diag).eval() ** 2

      self.assertAllClose(mu, samps.mean(axis=0), atol=0.1)
      self.assertAllClose(cov_mat, np.cov(samps.T), atol=0.1)
Exemple #13
0
 def testBatchVector(self):
     with self.test_session(use_gpu=self._use_gpu):
         v_batch = np.array([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]])
         mat_batch = np.array([[[1.0, 0.0, 0.0], [0.0, 2.0, 0.0],
                                [0.0, 0.0, 3.0]],
                               [[4.0, 0.0, 0.0], [0.0, 5.0, 0.0],
                                [0.0, 0.0, 6.0]]])
         v_batch_diag = tf.batch_matrix_diag(v_batch)
         self.assertEqual((2, 3, 3), v_batch_diag.get_shape())
         self.assertAllEqual(v_batch_diag.eval(), mat_batch)
Exemple #14
0
def vec2lower_triangle(vec, dim):
    """
    Convert a vector M of size (n * m) into a matrix of shape (n, m)
    [[e^M[0],    0,           0,             ...,    0]
     [M[n-1],    e^M[n],      0,      0,     ...,    0]
     [M[2n-1],   M[2n],       e^M[2n+1], 0,  ...,    0]
     ...
     [M[m(n-1)], M[m(n-1)+1], ...,       M[mn-2], e^M[mn-1]]
    """
    L = tf.reshape(vec, [-1, dim, dim])
    if int(tf.__version__.split('.')[1]) >= 10:
        L = tf.matrix_band_part(L, -1, 0) - tf.matrix_diag(
            tf.matrix_diag_part(L)) + tf.matrix_diag(
                tf.exp(tf.matrix_diag_part(L)))
    else:
        L = tf.batch_matrix_band_part(L, -1, 0) - tf.batch_matrix_diag(
            tf.batch_matrix_diag_part(L)) + tf.batch_matrix_diag(
                tf.exp(tf.batch_matrix_diag_part(L)))
    return L
Exemple #15
0
 def testBatchVector(self):
   with self.test_session(use_gpu=self._use_gpu):
     v_batch = np.array([[1.0, 2.0, 3.0],
                         [4.0, 5.0, 6.0]])
     mat_batch = np.array(
         [[[1.0, 0.0, 0.0],
           [0.0, 2.0, 0.0],
           [0.0, 0.0, 3.0]],
          [[4.0, 0.0, 0.0],
           [0.0, 5.0, 0.0],
           [0.0, 0.0, 6.0]]])
     v_batch_diag = tf.batch_matrix_diag(v_batch)
     self.assertEqual((2, 3, 3), v_batch_diag.get_shape())
     self.assertAllEqual(v_batch_diag.eval(), mat_batch)
  def _updated_mat(self, mat, v, diag):
    # Get dense matrix defined by its square root, which is an update of `mat`:
    # A = (mat + v D v^T) (mat + v D v^T)^T
    # D is the diagonal matrix with `diag` on the diagonal.

    # If diag is None, then it defaults to the identity matrix, so DV^T = V^T
    if diag is None:
      diag_vt = tf.batch_matrix_transpose(v)
    else:
      diag_mat = tf.batch_matrix_diag(diag)
      diag_vt = tf.batch_matmul(diag_mat, v, adj_y=True)

    v_diag_vt = tf.batch_matmul(v, diag_vt)
    sqrt = mat + v_diag_vt
    a = tf.batch_matmul(sqrt, sqrt, adj_y=True)
    return a.eval()
    def _updated_mat(self, mat, v, diag):
        # Get dense matrix defined by its square root, which is an update of `mat`:
        # A = (mat + v D v^T) (mat + v D v^T)^T
        # D is the diagonal matrix with `diag` on the diagonal.

        # If diag is None, then it defaults to the identity matrix, so DV^T = V^T
        if diag is None:
            diag_vt = tf.batch_matrix_transpose(v)
        else:
            diag_mat = tf.batch_matrix_diag(diag)
            diag_vt = tf.batch_matmul(diag_mat, v, adj_y=True)

        v_diag_vt = tf.batch_matmul(v, diag_vt)
        sqrt = mat + v_diag_vt
        a = tf.batch_matmul(sqrt, sqrt, adj_y=True)
        return a.eval()
Exemple #18
0
 def CheckApproximation(self, a, u, s, v, full_matrices, atol):
   # Tests that a ~= u*diag(s)*transpose(v).
   batch_shape = a.shape[:-2]
   m = a.shape[-2]
   n = a.shape[-1]
   diag_s = tf.batch_matrix_diag(s)
   if full_matrices:
     if m > n:
       zeros = tf.zeros(batch_shape + (m - n, n), dtype=dtype_)
       diag_s = tf.concat(a.ndim - 2, [diag_s, zeros])
     elif n > m:
       zeros = tf.zeros(batch_shape + (m, n - m), dtype=dtype_)
       diag_s = tf.concat(a.ndim - 1, [diag_s, zeros])
   a_recon = tf.batch_matmul(u, diag_s)
   a_recon = tf.batch_matmul(a_recon, v, adj_y=True)
   self.assertAllClose(a_recon.eval(), a, atol=atol)
Exemple #19
0
 def _sample(self, N):
     """
     :param integer N: number of samples
     :Returns
      samples picked from the variational posterior.
      The Kulback_leibler divergence is stored as self._KL
     """
     n = self.num_data
     R = self.num_latent
     # Match dimension of the posterior variance to the data.
     if self.q_diag:
         sqrt = tf.batch_matrix_diag(tf.transpose(self.q_sqrt)) # [R,n,n]
     else:
         sqrt = tf.batch_matrix_band_part(
                         tf.transpose(self.q_sqrt,[2,0,1]), -1, 0) # [R,n,n]
     # Log determinant of matrix S = q_sqrt * q_sqrt^T
     logdet_S = tf.cast(N, float_type)*tf.reduce_sum(
             tf.log(tf.square(tf.batch_matrix_diag_part(sqrt))))
     sqrt = tf.tile(tf.expand_dims(sqrt, 1), [1,N,1,1]) # [R,N,n,n]
     # noraml random samples, [R,N,n,1]
     v_samples = tf.random_normal([R,N,n,1], dtype=float_type)
     # Match dimension of the posterior mean, [R,N,n,1]
     mu = tf.tile(tf.expand_dims(tf.expand_dims(
                             tf.transpose(self.q_mu), 1), -1), [1,N,1,1])
     u_samples = mu + tf.batch_matmul(sqrt, v_samples)
     # Stochastic approximation of the Kulback_leibler KL[q(f)||p(f)]
     self._KL = - 0.5 * logdet_S\
          - 0.5 * tf.reduce_sum(tf.square(v_samples)) \
          + 0.5 * tf.reduce_sum(tf.square(u_samples))
     # Cholesky factor of kernel [R,N,n,n]
     L = tf.tile(tf.expand_dims(
             tf.transpose(self.kern.Cholesky(self.X), [2,0,1]),1), [1,N,1,1])
     # mean, sized [N,n,R]
     mean = tf.tile(tf.expand_dims(
                 self.mean_function(self.X),
             0), [N,1,1])
     # sample from posterior, [N,n,R]
     f_samples = tf.transpose(
             tf.squeeze(tf.batch_matmul(L, u_samples),[-1]), # [R,N,n]
             [1,2,0]) + mean
     # return as Dict to deal with
     return f_samples
Exemple #20
0
 def CheckApproximation(self, a, u, s, v, full_matrices):
     if is_single:
         tol = 1e-5
     else:
         tol = 1e-14
     # Tests that a ~= u*diag(s)*transpose(v).
     batch_shape = a.shape[:-2]
     m = a.shape[-2]
     n = a.shape[-1]
     diag_s = tf.cast(tf.batch_matrix_diag(s), dtype=dtype_)
     if full_matrices:
         if m > n:
             zeros = tf.zeros(batch_shape + (m - n, n), dtype=dtype_)
             diag_s = tf.concat(a.ndim - 2, [diag_s, zeros])
         elif n > m:
             zeros = tf.zeros(batch_shape + (m, n - m), dtype=dtype_)
             diag_s = tf.concat(a.ndim - 1, [diag_s, zeros])
     a_recon = tf.batch_matmul(u, diag_s)
     a_recon = tf.batch_matmul(a_recon, v, adj_y=True)
     self.assertAllClose(a_recon.eval(), a, rtol=tol, atol=tol)
Exemple #21
0
 def CheckApproximation(self, a, u, s, v, full_matrices):
     if dtype_ in (np.float32, np.complex64):
         tol = 1e-5
     else:
         tol = 1e-14
     # Tests that a ~= u*diag(s)*transpose(v).
     batch_shape = a.shape[:-2]
     m = a.shape[-2]
     n = a.shape[-1]
     diag_s = tf.cast(tf.batch_matrix_diag(s), dtype=dtype_)
     if full_matrices:
         if m > n:
             zeros = tf.zeros(batch_shape + (m - n, n), dtype=dtype_)
             diag_s = tf.concat(a.ndim - 2, [diag_s, zeros])
         elif n > m:
             zeros = tf.zeros(batch_shape + (m, n - m), dtype=dtype_)
             diag_s = tf.concat(a.ndim - 1, [diag_s, zeros])
     a_recon = tf.batch_matmul(tf.cast(u, dtype=dtype_), tf.cast(diag_s, dtype=dtype_))
     a_recon = tf.batch_matmul(a_recon, tf.cast(v, dtype=dtype_), adj_y=True)
     self.assertAllClose(np.real(a_recon.eval()), np.real(a), rtol=tol, atol=tol)
     self.assertAllClose(np.imag(a_recon.eval()), np.imag(a), rtol=tol, atol=tol)
Exemple #22
0
 def _diag_to_matrix(self, diag):
   return tf.batch_matrix_diag(diag**2).eval()
Exemple #23
0
def vec2trimat(vec, dim):
    L = tf.reshape(vec, [-1, dim, dim])
    L = tf.batch_matrix_band_part(L, -1, 0) - tf.batch_matrix_diag(tf.batch_matrix_diag_part(L)) + \
        tf.batch_matrix_diag(tf.exp(tf.batch_matrix_diag_part(L)))
    return L
Exemple #24
0
def GNN(signal_dim = 10, batch_size = 2, SD=1, communities=2, group_size=10, 
    p_min = 0.5, p_max = 0.5, Mean = 1, Mean_signal=1, l_rate = 0.0000001, Size=10):


    """ First implement of GNN"""
    dim = communities*group_size

    DATA = [np.asarray(balanced_stochastic_blockmodel(communities, group_size, p, 0.1*p)).astype(np.double) for p in np.linspace(p_min, p_max, Size)]
    np.random.shuffle(DATA)
    Signal = SD*np.random.randn(signal_dim, dim) + Mean_signal
    TRUE_A = np.append(np.zeros([batch_size, group_size], dtype=float),np.ones([batch_size, group_size], dtype=float), axis = 1)
    TRUE_B = 1-TRUE_A

    Adj = tf.placeholder(dtype=tf.float32, shape=[None, communities*group_size, communities*group_size])
    Adj_mod = tf.reshape(tf.transpose(Adj, perm = [1,0,2]), [dim, batch_size*dim])#preparing it to be multiplied by F to broadcast

    F = tf.placeholder(dtype=tf.float32, shape = [signal_dim, dim])

    #first diffusion step without cascading (unnormed version)
    Diff_1 = tf.reshape(tf.transpose(tf.matmul(F, Adj_mod)), shape=[batch_size, dim, signal_dim]) #shape=[batch_size, signal_dim, dim])

    diag_inv = tf.div(1.0, tf.reduce_sum(Adj, 2))
    diag_inv_batch = tf.batch_matrix_diag(diag_inv) #to use in subsequent layers
    Diag_1 = tf.mul(tf.expand_dims(diag_inv, 1), F)


    C_a = tf.Variable(tf.random_normal([signal_dim, signal_dim], stddev=1.0, mean=Mean))
    C_b = tf.Variable(tf.random_normal([signal_dim, signal_dim], stddev=1.0, mean=Mean))

    #treat this as the new Adj_mod
    A1 = tf.matmul(C_a, tf.reshape(tf.transpose(Diag_1, perm=[1, 0,2]), [signal_dim, batch_size*dim]))
    B1 = tf.matmul(C_b, tf.reshape(tf.transpose(Diff_1, perm=[2, 0,1]), [signal_dim, batch_size*dim]))

    #transform it back into the 3-D tensor it is
    #Psi_1 = tf.transpose(tf.reshape(A1 + B1, shape = [signal_dim, batch_size, dim]), perm=[1,0,2])
    #relu also added
    Psi_1 = tf.transpose(tf.reshape(tf.nn.relu(A1 + B1), shape = [signal_dim, batch_size, dim]), perm=[1,2,0])

    ###################
    ###################
    Diff_2 = tf.batch_matmul(Adj, Psi_1)
    Diag_2 = tf.batch_matmul(diag_inv_batch, Psi_1)
    #we change the constants for now but let's keep these the same for another model
    C_a_1 = tf.Variable(tf.random_normal([signal_dim, signal_dim], stddev=1.0, mean=Mean))
    C_b_1 = tf.Variable(tf.random_normal([signal_dim, signal_dim], stddev=1.0, mean=Mean))

    A2 = tf.matmul(C_a_1, tf.reshape(tf.transpose(Diag_2, perm=[2, 0,1]), [signal_dim, batch_size*dim]))
    B2 = tf.matmul(C_b_1, tf.reshape(tf.transpose(Diff_2, perm=[2, 0,1]), [signal_dim, batch_size*dim]))

    Psi_2 = tf.transpose(tf.reshape(tf.nn.relu(A2 + B2), shape = [signal_dim, batch_size, dim]), perm=[1,2,0])

    ##################
    ##################
    Diff_3 = tf.batch_matmul(Adj, Psi_2)
    Diag_3 = tf.batch_matmul(diag_inv_batch, Psi_2)

    C_a_2 = tf.Variable(tf.random_normal([signal_dim, signal_dim], stddev=1.0, mean=Mean))
    C_b_2 = tf.Variable(tf.random_normal([signal_dim, signal_dim], stddev=1.0, mean=Mean))

    A3 = tf.matmul(C_a_2, tf.reshape(tf.transpose(Diag_3, perm=[2, 0,1]), [signal_dim, batch_size*dim]))
    B3 = tf.matmul(C_b_2, tf.reshape(tf.transpose(Diff_3, perm=[2, 0,1]), [signal_dim, batch_size*dim]))

    Psi_3 = tf.transpose(tf.reshape(tf.nn.relu(A3 + B3), shape = [signal_dim, batch_size, dim]), perm=[1,2,0])

    ##################
    ##################
    Diff_4 = tf.batch_matmul(Adj, Psi_3)
    Diag_4 = tf.batch_matmul(diag_inv_batch, Psi_3)

    C_a_3 = tf.Variable(tf.random_normal([signal_dim, signal_dim], stddev=1.0, mean=Mean))
    C_b_3 = tf.Variable(tf.random_normal([signal_dim, signal_dim], stddev=1.0, mean=Mean))

    A4 = tf.matmul(C_a_3, tf.reshape(tf.transpose(Diag_4, perm=[2, 0,1]), [signal_dim, batch_size*dim]))
    B4 = tf.matmul(C_b_3, tf.reshape(tf.transpose(Diff_4, perm=[2, 0,1]), [signal_dim, batch_size*dim]))

    Psi_4 = tf.transpose(tf.reshape(tf.nn.relu(A4 + B4), shape = [signal_dim, batch_size, dim]), perm=[1,2,0])


    ##################
    ##################
    Diff_5 = tf.batch_matmul(Adj, Psi_4)
    Diag_5 = tf.batch_matmul(diag_inv_batch, Psi_4)

    C_a_4 = tf.Variable(tf.random_normal([signal_dim, signal_dim], stddev=1.0, mean=Mean))
    C_b_4 = tf.Variable(tf.random_normal([signal_dim, signal_dim], stddev=1.0, mean=Mean))

    A5 = tf.matmul(C_a_4, tf.reshape(tf.transpose(Diag_5, perm=[2, 0,1]), [signal_dim, batch_size*dim]))
    B5 = tf.matmul(C_b_4, tf.reshape(tf.transpose(Diff_5, perm=[2, 0,1]), [signal_dim, batch_size*dim]))

    Psi_5 = tf.transpose(tf.reshape(tf.nn.relu(A5 + B5), shape = [signal_dim, batch_size, dim]), perm=[1,2,0])


    ##################
    ##################
    Diff_6 = tf.batch_matmul(Adj, Psi_5)
    Diag_6 = tf.batch_matmul(diag_inv_batch, Psi_5)

    C_a_5 = tf.Variable(tf.random_normal([signal_dim, signal_dim], stddev=1.0, mean=Mean))
    C_b_5 = tf.Variable(tf.random_normal([signal_dim, signal_dim], stddev=1.0, mean=Mean))

    A6 = tf.matmul(C_a_5, tf.reshape(tf.transpose(Diag_6, perm=[2, 0,1]), [signal_dim, batch_size*dim]))
    B6 = tf.matmul(C_b_5, tf.reshape(tf.transpose(Diff_6, perm=[2, 0,1]), [signal_dim, batch_size*dim]))

    Psi_6 = tf.transpose(tf.reshape(tf.nn.relu(A6 + B6), shape = [signal_dim, batch_size, dim]), perm=[1,2,0])

    ##################
    ##################

    #choose some way to combine the Psi_6 to get an estimate for the labelling (average?)
    #softmax it!
    #only reduce across the signals, we keep the batch size and n-dim
    #to get cross entropy and get rid of nans

    #initialize vector to reduce the 10-dim signal to a 1-dim signal
    B_reduce = tf.Variable(tf.random_normal([signal_dim, 1], stddev=1.0, mean=0.0))

    Y_hat = tf.nn.relu(batch_vm2(Psi_6, B_reduce))[:,:, 0]

    ##################
    ################## TRUE ASSIGNMENTlton

    #true_assignment_a = tf.expand_dims(tf.concat(0, [tf.zeros([group_size], dtype=tf.float32),
     #                                     tf.ones([group_size], dtype=tf.float32)]), 1)
    #true_assignment_b = tf.expand_dims(tf.concat(0, [tf.ones([group_size], dtype=tf.float32),
     #                                     tf.zeros([group_size], dtype=tf.float32)]), 1)

    true_assignment_a= tf.placeholder(dtype=tf.float32, shape = [batch_size, dim])
    true_assignment_b= tf.placeholder(dtype=tf.float32, shape = [batch_size, dim])
    
    a = tf.nn.softmax_cross_entropy_with_logits(Y_hat, true_assignment_a)
    b = tf.nn.softmax_cross_entropy_with_logits(Y_hat, true_assignment_b)

    loss_a = tf.reduce_sum(a)
    loss_b = tf.reduce_sum(b)
    loss = tf.minimum(loss_a, loss_b)

    #at this point, 2 flips is not enough, there may be a need to do all batch! number of flips.  

    
    optimizer = tf.train.AdamOptimizer(l_rate)
    train = optimizer.minimize(loss, var_list=[C_a, C_a_1, C_a_2, C_a_3, C_a_4, C_a_5, 
                                               C_b, C_b_1, C_b_2, C_b_3, C_b_4, C_b_5, B_reduce])


    init = tf.initialize_all_variables()

    with tf.Session() as sess:
        iterations = Size//batch_size
        sess.run(init)
        loss_lst = [None]*iterations
        lossA_lst = [None]*iterations
        lossB_lst = [None]*iterations
        #Psi_6_lst = []
        variable_lst = []
        #Cb_list = []

        for i in xrange(iterations):
            #sess.run(init_F)
            sess.run(train, feed_dict={Adj: DATA[i:i+batch_size], F: Signal, true_assignment_a: TRUE_A, true_assignment_b: TRUE_B})
            loss_printed, lossA, lossB = sess.run([loss, loss_a, loss_b],
             feed_dict={Adj: DATA[i:i+batch_size], F: Signal, true_assignment_a: TRUE_A, true_assignment_b: TRUE_B})
            print i, "loss", loss_printed
            
            loss_lst[i]= loss_printed
            lossA_lst[i]= lossA 
            lossB_lst[i]= lossB


            d = {"loss": loss_lst, "lossA": lossA_lst, "lossB": lossB_lst}

            d = pd.DataFrame(d)
            d.to_csv("/accounts/grad/janetlishali/clusternet/GNN_data/Size{}Mean{}l_rate{}Mean_signal{}group_size{}batch_size{}p_min{}p_max{}.csv".format(Size, Mean, l_rate, Mean_signal, group_size, batch_size, p_min, p_max))
            if i==iterations-1:
                print "these are the variables after training:"
                a, b, a1, a2, a3, a4, a5, b1, b2, b3, b4, b5, b_reduce = sess.run([C_a, C_b, C_a_1, C_a_2, C_a_3, C_a_4, C_a_5, C_b_1, C_b_2, C_b_3, C_b_4, C_b_5, 
                    B_reduce], feed_dict={Adj: DATA[i:i+batch_size], F: Signal, true_assignment_a: TRUE_A, true_assignment_b: TRUE_B})

                variable_lst = variable_lst+[a, b, a1, a2, a3, a4, a5, b1, b2, b3, b4, b5, b_reduce]
                d_var = {"vars_a_b_ai_bi_b_reduce": variable_lst, "header": ["Ca", "Cb", "Ca1", "Ca2", "Ca3", "Ca4", "Ca5", 
                "Cb1", "Cb2", "Cb3", "Cb4", "Cb5", "B_reduce"]}

                d_var = pd.DataFrame(d_var)
                d_var.to_csv("/accounts/grad/janetlishali/clusternet/GNN_data/ModelPARAMS_Size{}Mean{}l_rate{}Mean_signal{}group_size{}batch_size{}p_min{}p_max{}.csv".format(Size, Mean, l_rate, Mean_signal, group_size, batch_size, p_min, p_max))
                print d_var
Exemple #25
0
 def testInvalidShapeAtEval(self):
   with self.test_session(use_gpu=self._use_gpu):
     v = tf.placeholder(dtype=tf.float32)
     with self.assertRaisesOpError("input must be at least 1-dim"):
       tf.batch_matrix_diag(v).eval(feed_dict={v: 0.0})
Exemple #26
0
 def testInvalidShape(self):
     with self.assertRaisesRegexp(ValueError, "must have rank at least 1"):
         tf.batch_matrix_diag(0)
Exemple #27
0
 def testInvalidShapeAtEval(self):
     with self.test_session(use_gpu=self._use_gpu):
         v = tf.placeholder(dtype=tf.float32)
         with self.assertRaisesOpError("input must be at least 1-dim"):
             tf.batch_matrix_diag(v).eval(feed_dict={v: 0.0})
Exemple #28
0
 def testInvalidShape(self):
   with self.assertRaisesRegexp(ValueError, "must have rank at least 1"):
     tf.batch_matrix_diag(0)