Beispiel #1
0
  def benchmarkQROp(self):
    for shape_ in self.shapes:
      with ops.Graph().as_default(), \
          session.Session(config=benchmark.benchmark_config()) as sess, \
          ops.device("/cpu:0"):
        matrix_value = np.random.uniform(
            low=-1.0, high=1.0, size=shape_).astype(np.float32)
        matrix = variables.Variable(matrix_value)
        q, r = linalg_ops.qr(matrix)
        variables.global_variables_initializer().run()
        self.run_op_benchmark(
            sess,
            control_flow_ops.group(q, r),
            min_iters=25,
            name="QR_cpu_{shape}".format(shape=shape_))

      if test.is_gpu_available(True):
        with ops.Graph().as_default(), \
            session.Session(config=benchmark.benchmark_config()) as sess, \
            ops.device("/device:GPU:0"):
          matrix_value = np.random.uniform(
              low=-1.0, high=1.0, size=shape_).astype(np.float32)
          matrix = variables.Variable(matrix_value)
          q, r = linalg_ops.qr(matrix)
          variables.global_variables_initializer().run()
          self.run_op_benchmark(
              sess,
              control_flow_ops.group(q, r),
              min_iters=25,
              name="QR_gpu_{shape}".format(shape=shape_))
Beispiel #2
0
  def benchmarkQROp(self):
    for shape_ in self.shapes:
      with ops.Graph().as_default(), \
          session.Session(config=benchmark.benchmark_config()) as sess, \
          ops.device("/cpu:0"):
        matrix_value = np.random.uniform(
            low=-1.0, high=1.0, size=shape_).astype(np.float32)
        matrix = variables.Variable(matrix_value)
        q, r = linalg_ops.qr(matrix)
        variables.global_variables_initializer().run()
        self.run_op_benchmark(
            sess,
            control_flow_ops.group(q, r),
            min_iters=25,
            name="QR_cpu_{shape}".format(shape=shape_))

      if test.is_gpu_available(True):
        with ops.Graph().as_default(), \
            session.Session(config=benchmark.benchmark_config()) as sess, \
            ops.device("/device:GPU:0"):
          matrix_value = np.random.uniform(
              low=-1.0, high=1.0, size=shape_).astype(np.float32)
          matrix = variables.Variable(matrix_value)
          q, r = linalg_ops.qr(matrix)
          variables.global_variables_initializer().run()
          self.run_op_benchmark(
              sess,
              control_flow_ops.group(q, r),
              min_iters=25,
              name="QR_gpu_{shape}".format(shape=shape_))
 def testWrongDimensions(self):
   # The input to qr should be a tensor of at least rank 2.
   scalar = constant_op.constant(1.)
   with self.assertRaisesRegexp(ValueError,
                                "Shape must be at least rank 2 but is rank 0"):
     linalg_ops.qr(scalar)
   vector = constant_op.constant([1., 2.])
   with self.assertRaisesRegexp(ValueError,
                                "Shape must be at least rank 2 but is rank 1"):
     linalg_ops.qr(vector)
Beispiel #4
0
 def testWrongDimensions(self):
     # The input to qr should be a tensor of at least rank 2.
     scalar = constant_op.constant(1.)
     with self.assertRaisesRegexp(
             ValueError, "Shape must be at least rank 2 but is rank 0"):
         linalg_ops.qr(scalar)
     vector = constant_op.constant([1., 2.])
     with self.assertRaisesRegexp(
             ValueError, "Shape must be at least rank 2 but is rank 1"):
         linalg_ops.qr(vector)
Beispiel #5
0
 def testWrongDimensions(self):
     # The input to svd should be a tensor of at least rank 2.
     scalar = constant_op.constant(1.)
     with self.assertRaisesRegex(
         (ValueError, errors_impl.InvalidArgumentError), "rank.* 2.*0"):
         linalg_ops.qr(scalar)
     vector = constant_op.constant([1., 2.])
     with self.assertRaisesRegex(
         (ValueError, errors_impl.InvalidArgumentError), "rank.* 2.*1"):
         linalg_ops.qr(vector)
Beispiel #6
0
  def __call__(self, shape, dtype=None, partition_info=None):
    if dtype is None:
      dtype = self.dtype
    # Check the shape
    if len(shape) < 2:
      raise ValueError("The tensor to initialize must be "
                       "at least two-dimensional")
    # Flatten the input shape with the last dimension remaining
    # its original shape so it works for conv2d
    num_rows = 1
    for dim in shape[:-1]:
      num_rows *= dim
    num_cols = shape[-1]
    flat_shape = (num_rows, num_cols)

    # Generate a random matrix
    a = random_ops.random_normal(flat_shape, dtype=dtype, seed=self.seed)
    # Compute the qr factorization
    q, r = linalg_ops.qr(a, full_matrices=False)
    # Make Q uniform
    square_len = math_ops.minimum(num_rows, num_cols)
    d = array_ops.diag_part(r[:square_len, :square_len])
    ph = d / math_ops.abs(d)
    q *= ph
    # Pad zeros to Q (if rows smaller than cols)
    if num_rows < num_cols:
      padding = array_ops.zeros([num_rows, num_cols - num_rows], dtype=dtype)
      q = array_ops.concat([q, padding], 1)
    return self.gain * array_ops.reshape(q, shape)
Beispiel #7
0
    def __call__(self, shape, dtype=None, partition_info=None):
        if dtype is None:
            dtype = self.dtype
        # Check the shape
        if len(shape) < 3 or len(shape) > 5:
            raise ValueError("The tensor to initialize must be at least "
                             "three-dimensional and at most five-dimensional")

        if shape[-2] > shape[-1]:
            raise ValueError("In_filters cannot be greater than out_filters.")

        # Generate a random matrix
        a = random_ops.random_normal([shape[-1], shape[-1]],
                                     dtype=dtype,
                                     seed=self.seed)
        # Compute the qr factorization
        q, r = linalg_ops.qr(a, full_matrices=False)
        # Make Q uniform
        d = array_ops.diag_part(r)
        q *= math_ops.sign(d)
        q = q[:shape[-2], :]
        q *= math_ops.sqrt(math_ops.cast(self.gain, dtype=dtype))
        if len(shape) == 3:
            weight = array_ops.scatter_nd([[(shape[0] - 1) // 2]],
                                          array_ops.expand_dims(q, 0), shape)
        elif len(shape) == 4:
            weight = array_ops.scatter_nd([[(shape[0] - 1) // 2,
                                            (shape[1] - 1) // 2]],
                                          array_ops.expand_dims(q, 0), shape)
        else:
            weight = array_ops.scatter_nd([[(shape[0] - 1) // 2,
                                            (shape[1] - 1) // 2,
                                            (shape[2] - 1) // 2]],
                                          array_ops.expand_dims(q, 0), shape)
        return weight
 def Test(self):
   np.random.seed(42)
   a = np.random.uniform(low=-1.0, high=1.0, size=shape_).astype(dtype_)
   if dtype_ in [np.complex64, np.complex128]:
     a += 1j * np.random.uniform(
         low=-1.0, high=1.0, size=shape_).astype(dtype_)
   # Optimal stepsize for central difference is O(epsilon^{1/3}).
   epsilon = np.finfo(dtype_).eps
   delta = 0.1 * epsilon**(1.0 / 3.0)
   if dtype_ in [np.float32, np.complex64]:
     tol = 3e-2
   else:
     tol = 1e-6
   with self.session(use_gpu=True):
     tf_a = constant_op.constant(a)
     tf_b = linalg_ops.qr(tf_a, full_matrices=full_matrices_)
     for b in tf_b:
       x_init = np.random.uniform(
           low=-1.0, high=1.0, size=shape_).astype(dtype_)
       if dtype_ in [np.complex64, np.complex128]:
         x_init += 1j * np.random.uniform(
             low=-1.0, high=1.0, size=shape_).astype(dtype_)
       theoretical, numerical = gradient_checker.compute_gradient(
           tf_a,
           tf_a.get_shape().as_list(),
           b,
           b.get_shape().as_list(),
           x_init_value=x_init,
           delta=delta)
       self.assertAllClose(theoretical, numerical, atol=tol, rtol=tol)
Beispiel #9
0
    def __call__(self, shape, dtype=None, partition_info=None):
        if dtype is None:
            dtype = self.dtype
        # Check the shape
        if len(shape) < 2:
            raise ValueError("The tensor to initialize must be "
                             "at least two-dimensional")
        # Flatten the input shape with the last dimension remaining
        # its original shape so it works for conv2d
        num_rows = 1
        for dim in shape[:-1]:
            num_rows *= dim
        num_cols = shape[-1]
        flat_shape = (num_cols,
                      num_rows) if num_rows < num_cols else (num_rows,
                                                             num_cols)

        # Generate a random matrix
        a = random_ops.random_normal(flat_shape, dtype=dtype, seed=self.seed)
        # Compute the qr factorization
        q, r = linalg_ops.qr(a, full_matrices=False)
        # Make Q uniform
        d = array_ops.diag_part(r)
        ph = d / math_ops.abs(d)
        q *= ph
        if num_rows < num_cols:
            q = array_ops.matrix_transpose(q)
        return self.gain * array_ops.reshape(q, shape)
Beispiel #10
0
  def _test(self, dtype, shape, full_matrices):
    np.random.seed(1)
    x_np = np.random.uniform(
        low=-1.0, high=1.0, size=np.prod(shape)).reshape(shape).astype(dtype)

    with self.test_session() as sess:
      x_tf = array_ops.placeholder(dtype)
      with self.test_scope():
        q_tf, r_tf = linalg_ops.qr(x_tf, full_matrices=full_matrices)
      q_tf_val, r_tf_val = sess.run([q_tf, r_tf], feed_dict={x_tf: x_np})

      q_dims = q_tf_val.shape
      np_q = np.ndarray(q_dims, dtype)
      np_q_reshape = np.reshape(np_q, (-1, q_dims[-2], q_dims[-1]))
      new_first_dim = np_q_reshape.shape[0]

      x_reshape = np.reshape(x_np, (-1, x_np.shape[-2], x_np.shape[-1]))
      for i in range(new_first_dim):
        if full_matrices:
          np_q_reshape[i, :, :], _ = np.linalg.qr(
              x_reshape[i, :, :], mode="complete")
        else:
          np_q_reshape[i, :, :], _ = np.linalg.qr(
              x_reshape[i, :, :], mode="reduced")
      np_q = np.reshape(np_q_reshape, q_dims)
      self.CompareOrthogonal(np_q, q_tf_val, min(shape[-2:]))
      self.CheckApproximation(x_np, q_tf_val, r_tf_val)
      self.CheckUnitary(q_tf_val)
Beispiel #11
0
 def Test(self):
     np.random.seed(42)
     a = np.random.uniform(low=-1.0, high=1.0, size=shape_).astype(dtype_)
     if dtype_ in [np.complex64, np.complex128]:
         a += 1j * np.random.uniform(low=-1.0, high=1.0,
                                     size=shape_).astype(dtype_)
     # Optimal stepsize for central difference is O(epsilon^{1/3}).
     epsilon = np.finfo(dtype_).eps
     delta = 0.1 * epsilon**(1.0 / 3.0)
     if dtype_ in [np.float32, np.complex64]:
         tol = 3e-2
     else:
         tol = 1e-6
     with self.session(use_gpu=True):
         tf_a = constant_op.constant(a)
         tf_b = linalg_ops.qr(tf_a, full_matrices=full_matrices_)
         for b in tf_b:
             x_init = np.random.uniform(low=-1.0, high=1.0,
                                        size=shape_).astype(dtype_)
             if dtype_ in [np.complex64, np.complex128]:
                 x_init += 1j * np.random.uniform(
                     low=-1.0, high=1.0, size=shape_).astype(dtype_)
             theoretical, numerical = gradient_checker.compute_gradient(
                 tf_a,
                 tf_a.get_shape().as_list(),
                 b,
                 b.get_shape().as_list(),
                 x_init_value=x_init,
                 delta=delta)
             self.assertAllClose(theoretical, numerical, atol=tol, rtol=tol)
Beispiel #12
0
  def __call__(self, shape, dtype=None, partition_info=None):
    if dtype is None:
      dtype = self.dtype
    # Check the shape
    if len(shape) < 2:
      raise ValueError("The tensor to initialize must be "
                       "at least two-dimensional")
    # Flatten the input shape with the last dimension remaining
    # its original shape so it works for conv2d
    num_rows = 1
    for dim in shape[:-1]:
      num_rows *= dim
    num_cols = shape[-1]
    flat_shape = (num_cols, num_rows) if num_rows < num_cols else (num_rows,
                                                                   num_cols)

    # Generate a random matrix
    a = random_ops.random_normal(flat_shape, dtype=dtype, seed=self.seed)
    # Compute the qr factorization
    q, r = linalg_ops.qr(a, full_matrices=False)
    # Make Q uniform
    d = array_ops.diag_part(r)
    q *= math_ops.sign(d)
    if num_rows < num_cols:
      q = array_ops.matrix_transpose(q)
    return self.gain * array_ops.reshape(q, shape)
Beispiel #13
0
  def _test(self, x_np, full_matrices, full_rank=True):
    dtype = x_np.dtype
    shape = x_np.shape
    with self.session() as sess:
      x_tf = array_ops.placeholder(dtype)
      with self.device_scope():
        q_tf, r_tf = linalg_ops.qr(x_tf, full_matrices=full_matrices)
      q_tf_val, r_tf_val = sess.run([q_tf, r_tf], feed_dict={x_tf: x_np})

      q_dims = q_tf_val.shape
      np_q = np.ndarray(q_dims, dtype)
      np_q_reshape = np.reshape(np_q, (-1, q_dims[-2], q_dims[-1]))
      new_first_dim = np_q_reshape.shape[0]

      x_reshape = np.reshape(x_np, (-1, x_np.shape[-2], x_np.shape[-1]))
      for i in range(new_first_dim):
        if full_matrices:
          np_q_reshape[i, :, :], _ = np.linalg.qr(
              x_reshape[i, :, :], mode="complete")
        else:
          np_q_reshape[i, :, :], _ = np.linalg.qr(
              x_reshape[i, :, :], mode="reduced")
      np_q = np.reshape(np_q_reshape, q_dims)
      if full_rank:
        # Q is unique up to sign/phase if the matrix is full-rank.
        self.CompareOrthogonal(np_q, q_tf_val, min(shape[-2:]))
      self.CheckApproximation(x_np, q_tf_val, r_tf_val)
      self.CheckUnitary(q_tf_val)
Beispiel #14
0
 def testConcurrentExecutesWithoutError(self):
   with self.session(use_gpu=True) as sess:
     all_ops = []
     for full_matrices_ in True, False:
       for rows_ in 4, 5:
         for cols_ in 4, 5:
           matrix1 = random_ops.random_normal([rows_, cols_], seed=42)
           matrix2 = random_ops.random_normal([rows_, cols_], seed=42)
           q1, r1 = linalg_ops.qr(matrix1, full_matrices=full_matrices_)
           q2, r2 = linalg_ops.qr(matrix2, full_matrices=full_matrices_)
           all_ops += [q1, r1, q2, r2]
     val = self.evaluate(all_ops)
     for i in range(8):
       q = 4 * i
       self.assertAllEqual(val[q], val[q + 2])  # q1 == q2
       self.assertAllEqual(val[q + 1], val[q + 3])  # r1 == r2
Beispiel #15
0
 def testConcurrentExecutesWithoutError(self):
   with self.session(use_gpu=True) as sess:
     all_ops = []
     for full_matrices_ in True, False:
       for rows_ in 4, 5:
         for cols_ in 4, 5:
           matrix1 = random_ops.random_normal([rows_, cols_], seed=42)
           matrix2 = random_ops.random_normal([rows_, cols_], seed=42)
           q1, r1 = linalg_ops.qr(matrix1, full_matrices=full_matrices_)
           q2, r2 = linalg_ops.qr(matrix2, full_matrices=full_matrices_)
           all_ops += [q1, r1, q2, r2]
     val = self.evaluate(all_ops)
     for i in range(8):
       q = 4 * i
       self.assertAllClose(val[q], val[q + 2])  # q1 == q2
       self.assertAllClose(val[q + 1], val[q + 3])  # r1 == r2
Beispiel #16
0
  def __call__(self, shape, dtype=None, partition_info=None):
    if dtype is None:
      dtype = self.dtype
    # Check the shape
    if len(shape) < 3 or len(shape) > 5:
      raise ValueError("The tensor to initialize must be at least "
                       "three-dimensional and at most five-dimensional")

    if shape[-2] > shape[-1]:
      raise ValueError("In_filters cannot be greater than out_filters.")

    # Generate a random matrix
    a = random_ops.random_normal([shape[-1], shape[-1]],
                                 dtype=dtype, seed=self.seed)
    # Compute the qr factorization
    q, r = linalg_ops.qr(a, full_matrices=False)
    # Make Q uniform
    d = array_ops.diag_part(r)
    q *= math_ops.sign(d)
    q = q[:shape[-2], :]
    q *= math_ops.sqrt(math_ops.cast(self.gain, dtype=dtype))
    if len(shape) == 3:
      weight = array_ops.scatter_nd([[(shape[0]-1)//2]],
                                    array_ops.expand_dims(q, 0), shape)
    elif len(shape) == 4:
      weight = array_ops.scatter_nd([[(shape[0]-1)//2, (shape[1]-1)//2]],
                                    array_ops.expand_dims(q, 0), shape)
    else:
      weight = array_ops.scatter_nd([[(shape[0]-1)//2, (shape[1]-1)//2,
                                      (shape[2]-1)//2]],
                                    array_ops.expand_dims(q, 0), shape)
    return weight
Beispiel #17
0
 def testConcurrentExecutesWithoutError(self):
   seed = [42, 24]
   all_ops = []
   for full_matrices_ in True, False:
     for rows_ in 4, 5:
       for cols_ in 4, 5:
         matrix_shape = [rows_, cols_]
         matrix1 = stateless_random_ops.stateless_random_normal(
             matrix_shape, seed)
         matrix2 = stateless_random_ops.stateless_random_normal(
             matrix_shape, seed)
         self.assertAllEqual(matrix1, matrix2)
         q1, r1 = linalg_ops.qr(matrix1, full_matrices=full_matrices_)
         q2, r2 = linalg_ops.qr(matrix2, full_matrices=full_matrices_)
         all_ops += [q1, q2, r1, r2]
   val = self.evaluate(all_ops)
   for i in range(0, len(val), 2):
     self.assertAllClose(val[i], val[i + 1])
Beispiel #18
0
 def Test(self):
     np.random.seed(42)
     # Optimal stepsize for central difference is O(epsilon^{1/3}).
     epsilon = np.finfo(dtype_).eps
     delta = 0.1 * epsilon**(1.0 / 3.0)
     if dtype_ in [np.float32, np.complex64]:
         tol = 3e-2
     else:
         tol = 1e-6
     # TODO(b/157171666): Sadly we have to double the computation because
     # gradient_checker_v2.compute_gradient expects a list of functions.
     funcs = [
         lambda a: linalg_ops.qr(a, full_matrices=full_matrices_)[0],
         lambda a: linalg_ops.qr(a, full_matrices=full_matrices_)[1]
     ]
     for f in funcs:
         theoretical, numerical = gradient_checker_v2.compute_gradient(
             f, [RandomInput()], delta=delta)
         self.assertAllClose(theoretical, numerical, atol=tol, rtol=tol)
Beispiel #19
0
  def _orthogonal_matrix(self, n):
    """Construct an n x n orthogonal matrix.

    Args:
      n: dimension.
    Returns:
      a n x n orthogonal matrix.
    """
    a = random_ops.random_normal([n, n], dtype=self.dtype, seed=self.seed)
    if self.seed:
      self.seed += 1
    q, r = linalg_ops.qr(a)
    d = array_ops.diag_part(r)
    # make q uniform
    q *= math_ops.sign(d)
    return q
Beispiel #20
0
    def _orthogonal_matrix(self, n):
        """Construct an n x n orthogonal matrix.

    Args:
      n: dimension.
    Returns:
      a n x n orthogonal matrix.
    """
        a = random_ops.random_normal([n, n], dtype=self.dtype, seed=self.seed)
        if self.seed:
            self.seed += 1
        q, r = linalg_ops.qr(a)
        d = array_ops.diag_part(r)
        # make q uniform
        q *= math_ops.sign(d)
        return q
Beispiel #21
0
    def Test(self):
        np.random.seed(1)
        x_np = np.random.uniform(
            low=-1.0, high=1.0,
            size=np.prod(shape_)).reshape(shape_).astype(dtype_)
        if is_complex:
            x_np += 1j * np.random.uniform(
                low=-1.0, high=1.0,
                size=np.prod(shape_)).reshape(shape_).astype(dtype_)

        # TODO(rmlarsen): Debug failure due to invalid parameter to ORMQR.
        rows_ = shape_[-2]
        cols_ = shape_[-1]
        use_gpu = False if rows_ < cols_ or (full_matrices_
                                             and rows_ != cols_) else True

        with self.test_session(use_gpu=use_gpu) as sess:
            if use_static_shape_:
                x_tf = constant_op.constant(x_np)
            else:
                x_tf = array_ops.placeholder(dtype_)
            q_tf, r_tf = linalg_ops.qr(x_tf, full_matrices=full_matrices_)

            if use_static_shape_:
                q_tf_val, r_tf_val = sess.run([q_tf, r_tf])
            else:
                q_tf_val, r_tf_val = sess.run([q_tf, r_tf],
                                              feed_dict={x_tf: x_np})

            q_dims = q_tf_val.shape
            np_q = np.ndarray(q_dims, dtype_)
            np_q_reshape = np.reshape(np_q, (-1, q_dims[-2], q_dims[-1]))
            new_first_dim = np_q_reshape.shape[0]

            x_reshape = np.reshape(x_np, (-1, x_np.shape[-2], x_np.shape[-1]))
            for i in range(new_first_dim):
                if full_matrices_:
                    np_q_reshape[i,:,:], _ = \
                          np.linalg.qr(x_reshape[i,:,:], mode="complete")
                else:
                    np_q_reshape[i,:,:], _ = \
                          np.linalg.qr(x_reshape[i,:,:], mode="reduced")
            np_q = np.reshape(np_q_reshape, q_dims)
            CompareOrthogonal(self, np_q, q_tf_val, min(shape_[-2:]))
            CheckApproximation(self, x_np, q_tf_val, r_tf_val)
            CheckUnitary(self, q_tf_val)
Beispiel #22
0
 def Test(self):
   np.random.seed(42)
   a = np.random.uniform(low=-1.0, high=1.0, size=shape_).astype(dtype_)
   if dtype_ in [np.complex64, np.complex128]:
     a += 1j * np.random.uniform(
         low=-1.0, high=1.0, size=shape_).astype(dtype_)
   # Optimal stepsize for central difference is O(epsilon^{1/3}).
   epsilon = np.finfo(dtype_).eps
   delta = 0.1 * epsilon**(1.0 / 3.0)
   if dtype_ in [np.float32, np.complex64]:
     tol = 3e-2
   else:
     tol = 1e-6
   with self.session(use_gpu=True):
     tf_a = constant_op.constant(a)
     tf_b = linalg_ops.qr(tf_a, full_matrices=full_matrices_)
     for b in tf_b:
       x_init = np.random.uniform(
           low=-1.0, high=1.0, size=shape_).astype(dtype_)
       if dtype_ in [np.complex64, np.complex128]:
         x_init += 1j * np.random.uniform(
             low=-1.0, high=1.0, size=shape_).astype(dtype_)
       # The compute_gradient on qr gradident will call blas kernel on GPU,
       # which might lead to deviated results from the reference with tf32.
       if dtype_ == np.float32:
         with ops.device("/cpu:0"):
           theoretical, numerical = gradient_checker.compute_gradient(
               tf_a,
               tf_a.get_shape().as_list(),
               b,
               b.get_shape().as_list(),
               x_init_value=x_init,
               delta=delta)
       else:
         theoretical, numerical = gradient_checker.compute_gradient(
             tf_a,
             tf_a.get_shape().as_list(),
             b,
             b.get_shape().as_list(),
             x_init_value=x_init,
             delta=delta)
       self.assertAllClose(theoretical, numerical, atol=tol, rtol=tol)
Beispiel #23
0
    def Test(self):
        np.random.seed(1)
        x_np = np.random.uniform(
            low=-1.0, high=1.0,
            size=np.prod(shape_)).reshape(shape_).astype(dtype_)
        if is_complex:
            x_np += 1j * np.random.uniform(
                low=-1.0, high=1.0,
                size=np.prod(shape_)).reshape(shape_).astype(dtype_)

        # rocBLAS on ROCm stack does not support complex<double> GEMM yet
        with self.session(
                use_gpu=True and not test.is_built_with_rocm()) as sess:
            if use_static_shape_:
                x_tf = constant_op.constant(x_np)
            else:
                x_tf = array_ops.placeholder(dtype_)
            q_tf, r_tf = linalg_ops.qr(x_tf, full_matrices=full_matrices_)

            if use_static_shape_:
                q_tf_val, r_tf_val = self.evaluate([q_tf, r_tf])
            else:
                q_tf_val, r_tf_val = sess.run([q_tf, r_tf],
                                              feed_dict={x_tf: x_np})

            q_dims = q_tf_val.shape
            np_q = np.ndarray(q_dims, dtype_)
            np_q_reshape = np.reshape(np_q, (-1, q_dims[-2], q_dims[-1]))
            new_first_dim = np_q_reshape.shape[0]

            x_reshape = np.reshape(x_np, (-1, x_np.shape[-2], x_np.shape[-1]))
            for i in range(new_first_dim):
                if full_matrices_:
                    np_q_reshape[i, :, :], _ = np.linalg.qr(x_reshape[i, :, :],
                                                            mode="complete")
                else:
                    np_q_reshape[i, :, :], _ = np.linalg.qr(x_reshape[i, :, :],
                                                            mode="reduced")
            np_q = np.reshape(np_q_reshape, q_dims)
            CompareOrthogonal(self, np_q, q_tf_val, min(shape_[-2:]))
            CheckApproximation(self, x_np, q_tf_val, r_tf_val)
            CheckUnitary(self, q_tf_val)
Beispiel #24
0
    def Test(self):
        if not use_static_shape_ and context.executing_eagerly():
            return
        np.random.seed(1)
        x_np = np.random.uniform(
            low=-1.0, high=1.0,
            size=np.prod(shape_)).reshape(shape_).astype(dtype_)
        if is_complex:
            x_np += 1j * np.random.uniform(
                low=-1.0, high=1.0,
                size=np.prod(shape_)).reshape(shape_).astype(dtype_)

            if use_static_shape_:
                x_tf = constant_op.constant(x_np)
            else:
                x_tf = array_ops.placeholder(dtype_)
            q_tf, r_tf = linalg_ops.qr(x_tf, full_matrices=full_matrices_)

            if use_static_shape_:
                q_tf_val, r_tf_val = self.evaluate([q_tf, r_tf])
            else:
                with self.session(use_gpu=True) as sess:
                    q_tf_val, r_tf_val = sess.run([q_tf, r_tf],
                                                  feed_dict={x_tf: x_np})

            q_dims = q_tf_val.shape
            np_q = np.ndarray(q_dims, dtype_)
            np_q_reshape = np.reshape(np_q, (-1, q_dims[-2], q_dims[-1]))
            new_first_dim = np_q_reshape.shape[0]

            x_reshape = np.reshape(x_np, (-1, x_np.shape[-2], x_np.shape[-1]))
            for i in range(new_first_dim):
                if full_matrices_:
                    np_q_reshape[i, :, :], _ = np.linalg.qr(x_reshape[i, :, :],
                                                            mode="complete")
                else:
                    np_q_reshape[i, :, :], _ = np.linalg.qr(x_reshape[i, :, :],
                                                            mode="reduced")
            np_q = np.reshape(np_q_reshape, q_dims)
            CompareOrthogonal(self, np_q, q_tf_val, min(shape_[-2:]))
            CheckApproximation(self, x_np, q_tf_val, r_tf_val)
            CheckUnitary(self, q_tf_val)
Beispiel #25
0
  def Test(self):
    np.random.seed(1)
    x_np = np.random.uniform(
        low=-1.0, high=1.0, size=np.prod(shape_)).reshape(shape_).astype(dtype_)
    if is_complex:
      x_np += 1j * np.random.uniform(
          low=-1.0, high=1.0,
          size=np.prod(shape_)).reshape(shape_).astype(dtype_)

    for full_matrices in False, True:
      with self.test_session() as sess:
        if use_static_shape_:
          x_tf = constant_op.constant(x_np)
        else:
          x_tf = array_ops.placeholder(dtype_)
        q_tf, r_tf = linalg_ops.qr(x_tf, full_matrices=full_matrices)

        if use_static_shape_:
          q_tf_val, r_tf_val = sess.run([q_tf, r_tf])
        else:
          q_tf_val, r_tf_val = sess.run([q_tf, r_tf], feed_dict={x_tf: x_np})

        q_dims = q_tf_val.shape
        np_q = np.ndarray(q_dims, dtype_)
        np_q_reshape = np.reshape(np_q, (-1, q_dims[-2], q_dims[-1]))
        new_first_dim = np_q_reshape.shape[0]

        x_reshape = np.reshape(x_np, (-1, x_np.shape[-2], x_np.shape[-1]))
        for i in range(new_first_dim):
          if full_matrices:
            np_q_reshape[i,:,:], _ = \
                np.linalg.qr(x_reshape[i,:,:], mode="complete")
          else:
            np_q_reshape[i,:,:], _ = \
                np.linalg.qr(x_reshape[i,:,:], mode="reduced")
        np_q = np.reshape(np_q_reshape, q_dims)
        CompareOrthogonal(self, np_q, q_tf_val, min(shape_[-2:]))
        CheckApproximation(self, x_np, q_tf_val, r_tf_val)
        CheckUnitary(self, q_tf_val)
Beispiel #26
0
    def _initializer(shape, dtype=None, partition_info=None):

        if len(shape) < 2:
            raise ValueError(
                "The tensor to initialize must be at least two-dimensional")

        num_rows = 1
        for dim in [shape[i] for i in flatten_axis]:
            num_rows *= dim
        num_cols = shape[list(set(range(len(shape))) - set(flatten_axis))[0]]

        flat_shape = (num_cols,
                      num_rows) if num_rows < num_cols else (num_rows,
                                                             num_cols)

        a = random(flat_shape, type='uniform', stddev=1.0)
        q, r = linalg_ops.qr(a, full_matrices=False)

        q *= np.sqrt(flat_shape[0])

        if num_rows < num_cols:
            q = tf.matrix_transpose(q)

        return stddev * tf.reshape(q, shape)
 def _NoGrad(x):
     with backprop.GradientTape() as tape:
         tape.watch(x)
         ret = linalg_ops.qr(x, full_matrices=True)
     return tape.gradient(ret, x)