Example #1
0
 def get_function(points, mu, sigma):  # f_ik [n,k]
     div = coef * tf.rsqrt(tf.batch_matrix_determinant(sigma))  # ((2pi)^p*|S_k|)^-1/2  [k]
     div = tf.tile(tf.reshape(div, [1, k]), [n, 1])  # [n,k]
     diff = tf.sub(tf.tile(points, [k, 1, 1]), tf.tile(mu, [n, 1, 1]))  # x_i-u_k [n*k, p, 1]
     sigma = tf.tile(sigma, [n, 1, 1])  # [n*k,p,p]
     exp = tf.exp(
         -0.5
         * tf.batch_matmul(tf.transpose(diff, perm=[0, 2, 1]), tf.batch_matmul(tf.batch_matrix_inverse(sigma), diff))
     )  # e^(d'*S^-1*d)_ik [n*k, 1, 1]
     exp = tf.reshape(exp, [n, k])
     return tf.mul(div, exp)
    def get_function(points, mu, sigma):  # f_ik [n,k]
        div = coef * tf.rsqrt(
            tf.batch_matrix_determinant(sigma))  # ((2pi)^p*|S_k|)^-1/2  [k]
        div = tf.tile(tf.reshape(div, [1, k]), [n, 1])  # [n,k]
        diff = tf.sub(tf.tile(points, [k, 1, 1]),
                      tf.tile(mu, [n, 1, 1]))  # x_i-u_k [n*k, p, 1]
        sigma = tf.tile(sigma, [n, 1, 1])  # [n*k,p,p]
        exp = tf.exp(-0.5 * tf.batch_matmul(
            tf.transpose(diff, perm=[0, 2, 1]),
            tf.batch_matmul(tf.batch_matrix_inverse(sigma),
                            diff)))  # e^(d'*S^-1*d)_ik [n*k, 1, 1]
        exp = tf.reshape(exp, [n, k])

        return tf.mul(
            div, exp
        )  # Multivariate normal distribution evaluated for each vector, for each cluster parameter. Hence the [n,k] shape.
Example #3
0
 def _verifyInverse(self, x):
     for np_type in [np.float32, np.float64]:
         y = x.astype(np_type)
         with self.test_session():
             # Verify that x^{-1} * x == Identity matrix.
             if x.ndim == 2:
                 inv = tf.matrix_inverse(y)
                 tf_ans = tf.matmul(inv, y)
                 np_ans = np.identity(y.shape[-1])
             else:
                 inv = tf.batch_matrix_inverse(y)
                 tf_ans = tf.batch_matmul(inv, y)
                 tiling = list(y.shape)
                 tiling[-2:] = [1, 1]
                 np_ans = np.tile(np.identity(y.shape[-1]), tiling)
             out = tf_ans.eval()
         self.assertAllClose(np_ans, out)
         self.assertShapeEqual(y, tf_ans)
 def _verifyInverse(self, x):
   for np_type in [np.float32, np.float64]:
     y = x.astype(np_type)
     with self.test_session():
       # Verify that x^{-1} * x == Identity matrix.
       if x.ndim == 2:
         inv = tf.matrix_inverse(y)
         tf_ans = tf.matmul(inv, y)
         np_ans = np.identity(y.shape[-1])
       else:
         inv = tf.batch_matrix_inverse(y)
         tf_ans = tf.batch_matmul(inv, y)
         tiling = list(y.shape)
         tiling[-2:] = [1, 1]
         np_ans = np.tile(np.identity(y.shape[-1]), tiling)
       out = tf_ans.eval()
     self.assertAllClose(np_ans, out)
     self.assertShapeEqual(y, tf_ans)
  def Test(self):
    with self.test_session():
      np.random.seed(1)
      m = np.random.uniform(low=1.0, high=100.0, size=np.prod(shape)).reshape(
          shape).astype(dtype)
      a = tf.constant(m)
      epsilon = np.finfo(dtype).eps
      # Optimal stepsize for central difference is O(epsilon^{1/3}).
      delta = epsilon ** (1.0 / 3.0)
      tol = 1e-3

      if len(shape) == 2:
        ainv = tf.matrix_inverse(a)
      else:
        ainv = tf.batch_matrix_inverse(a)

      theoretical, numerical = gc.ComputeGradient(a, shape, ainv, shape,
                                                  delta=delta)
      self.assertAllClose(theoretical, numerical, atol=tol, rtol=tol)
Example #6
0
    def Test(self):
        with self.test_session():
            np.random.seed(1)
            m = np.random.uniform(
                low=1.0, high=100.0,
                size=np.prod(shape_)).reshape(shape_).astype(dtype_)
            a = tf.constant(m)
            epsilon = np.finfo(dtype_).eps
            # Optimal stepsize for central difference is O(epsilon^{1/3}).
            delta = epsilon**(1.0 / 3.0)
            tol = 1e-3

            if len(shape_) == 2:
                ainv = tf.matrix_inverse(a)
            else:
                ainv = tf.batch_matrix_inverse(a)

            theoretical, numerical = tf.test.compute_gradient(a,
                                                              shape_,
                                                              ainv,
                                                              shape_,
                                                              delta=delta)
            self.assertAllClose(theoretical, numerical, atol=tol, rtol=tol)
Example #7
0
def calc_snf_loss_tf(point, hyperplanes, variances, weights):
    #variances = tf.maximum(variances,1e-6) # Avoid NaN errors
    # Calculate the distance of the point from each hyperplane
    hyperplanes = tf.reshape(hyperplanes, [k, m, m])
    hp_inv = tf.batch_matrix_inverse(hyperplanes)  # [k,m,m]
    x = tf.ones((k, m, 1))
    a = tf.batch_matmul(hp_inv, x)  # [k,m,1]
    point = tf.reshape(point, [m, 1])
    a = tf.reshape(a, [k, m])
    D = tf.matmul(a, point) - 1
    D = tf.reshape(D, [k])
    norm = tf.sqrt(tf.reduce_sum(tf.square(a), reduction_indices=[1]))  # [k]
    D /= norm  #tf.maximum(norm,1e-6) # [k]

    losses = tf.square(D)  # [k]
    losses /= -2 * variances  # [k]
    losses = -tf.exp(losses)  # [k]
    var_coeffs = 1 / tf.sqrt(2 * variances * 3.14)  # [k]
    losses *= var_coeffs  # [k]
    losses *= weights  # element-wise [k]

    loss = tf.reduce_mean(losses)  # Average over the hyperplanes
    return loss
Example #8
0
# tf.matrix_determinant
x = np.random.rand(5, 5)
z_matrix_determinant = tf.matrix_determinant(x)

# tf.batch_matrix_determinant
batch_x = np.random.rand(10, 5, 5)
z_batch_matrix_determinant = tf.batch_matrix_determinant(batch_x)

# tf.matrix_inverse
x = np.random.rand(10, 10)
z_matrix_inverse = tf.matrix_inverse(x)

# tf.batch_matrix_inverse
batch_x = np.random.rand(10, 5, 5)
z_batch_matrix_inverse = tf.batch_matrix_inverse(batch_x)

# tf.cholesky
x = np.random.rand(10, 10)
z_cholesky = tf.cholesky(x)

# tf.batch_cholesky
batch_x = np.random.rand(10, 5, 5)
z_batch_cholesky = tf.batch_cholesky(x)

# tf.self_adjoint_eig
x = np.random.rand(10, 8)
z_self_adjoint_eig = tf.self_adjoint_eig(x)

# tf.batch_self_adjoint_eig
batch_x = np.random.rand(10, 8, 5)
Example #9
0
 def test_BatchMatrixInverse(self):
     t = tf.batch_matrix_inverse(self.random(2, 3, 4, 3, 3))
     self.check(t)
    def get_function(points, mu, sigma): # f_ik [n,k]
        div = coef*tf.rsqrt(tf.batch_matrix_determinant(sigma)) # ((2pi)^p*|S_k|)^-1/2  [k]
        div = tf.tile(tf.reshape(div, [1,k]), [n,1]) # [n,k]
        diff = tf.sub(tf.tile(points, [k,1,1]), tf.tile(mu, [n,1,1])) # x_i-u_k [n*k, p, 1]
        sigma = tf.tile(sigma, [n,1,1]) # [n*k,p,p]
        exp = tf.exp(-0.5*tf.batch_matmul( tf.transpose(diff,perm=[0,2,1]), tf.batch_matmul(tf.batch_matrix_inverse(sigma), diff) )) # e^(d'*S^-1*d)_ik [n*k, 1, 1]
        exp = tf.reshape(exp, [n,k])

        return tf.mul(div, exp) # Multivariate normal distribution evaluated for each vector, for each cluster parameter. Hence the [n,k] shape.
Example #11
0
 def get_function(points, mu, sigma): # f_ik [n,k]
     div = coef*tf.rsqrt(tf.batch_matrix_determinant(sigma)) # ((2pi)^p*|S_k|)^-1/2  [k]
     div = tf.tile(tf.reshape(div, [1,k]), [n,1]) # [n,k]
     diff = tf.sub(tf.tile(points, [k,1,1]), tf.tile(mu, [n,1,1])) # x_i-u_k [n*k, p, 1]
     sigma = tf.tile(sigma, [n,1,1]) # [n*k,p,p]
     exp = tf.exp(-0.5*tf.batch_matmul( tf.transpose(diff,perm=[0,2,1]), tf.batch_matmul(tf.batch_matrix_inverse(sigma), diff) )) # e^(d'*S^-1*d)_ik [n*k, 1, 1]
     exp = tf.reshape(exp, [n,k])
     return tf.mul(div, exp)