コード例 #1
0
def test_BlockSparseTensor_init():
    np.random.seed(10)
    D = 10
    rank = 4
    flows = np.random.choice([True, False], size=rank, replace=True)
    charges = [
        U1Charge.random(dimension=D, minval=-5, maxval=5) for _ in range(rank)
    ]
    fused = fuse_charges(charges, flows)
    data = np.random.uniform(0,
                             1,
                             size=len(
                                 np.nonzero(fused == np.zeros((1, 1)))[0]))
    order = [[n] for n in range(rank)]
    arr = BlockSparseTensor(data, charges, flows, order=order)
    np.testing.assert_allclose(data, arr.data)
    for c1, c2 in zip(charges, arr.charges):
        assert charge_equal(c1, c2[0])
    for c1, c2 in zip(charges, arr._charges):
        assert charge_equal(c1, c2)
    data = np.random.uniform(
        0, 1, size=len(np.nonzero(fused == np.zeros((1, 1)))[0]) + 1)
    with pytest.raises(ValueError):
        arr = BlockSparseTensor(data,
                                charges,
                                flows,
                                order=order,
                                check_consistency=True)
コード例 #2
0
ファイル: linalg.py プロジェクト: mikechen66/TensorNetwork
def inv(matrix: BlockSparseTensor) -> BlockSparseTensor:
  """
  Compute the matrix inverse of `matrix`.
  Returns:
    BlockSparseTensor: The inverse of `matrix`.
  """
  if matrix.ndim != 2:
    raise ValueError("`inv` can only be taken for matrices, "
                     "found tensor.ndim={}".format(matrix.ndim))
  flat_charges = matrix._charges
  flat_flows = matrix._flows
  flat_order = matrix.flat_order
  tr_partition = len(matrix._order[0])
  blocks, _, shapes = _find_transposed_diagonal_sparse_blocks(
      flat_charges, flat_flows, tr_partition, flat_order)

  data = np.empty(np.sum(np.prod(shapes, axis=0)), dtype=matrix.dtype)
  for n, block in enumerate(blocks):
    data[block] = np.ravel(
        np.linalg.inv(np.reshape(matrix.data[block], shapes[:, n])).T)

  return BlockSparseTensor(
      data=data,
      charges=matrix._charges,
      flows=np.logical_not(matrix._flows),
      order=matrix._order,
      check_consistency=False).transpose((1, 0))#pytype: disable=bad-return-type
コード例 #3
0
ファイル: linalg.py プロジェクト: zyzhang1992/TensorNetwork
def eig(matrix: BlockSparseTensor) -> Tuple[ChargeArray, BlockSparseTensor]:
    """
  Compute the eigen decomposition of an `M` by `M` matrix `matrix`.
  Args:
    matrix: A matrix (i.e. a rank-2 tensor) of type  `BlockSparseTensor`

  Returns:
    (ChargeArray,BlockSparseTensor): The eigenvalues and eigenvectors

  """
    if matrix.ndim != 2:
        raise NotImplementedError(
            "eig currently supports only rank-2 tensors.")

    flat_charges = matrix._charges
    flat_flows = matrix._flows
    flat_order = matrix.flat_order
    tr_partition = len(matrix._order[0])
    blocks, charges, shapes = _find_transposed_diagonal_sparse_blocks(
        flat_charges, flat_flows, tr_partition, flat_order)

    eigvals = []
    v_blocks = []
    for n, block in enumerate(blocks):
        e, v = np.linalg.eig(np.reshape(matrix.data[block], shapes[:, n]))
        eigvals.append(e)
        v_blocks.append(v)
    tmp_labels = [
        np.full(len(eigvals[n]), fill_value=n, dtype=np.int16)
        for n in range(len(eigvals))
    ]
    if len(tmp_labels) > 0:
        eigvalscharge_labels = np.concatenate(tmp_labels)
    else:
        eigvalscharge_labels = np.empty(0, dtype=np.int16)

    eigvalscharge = charges[eigvalscharge_labels]

    if len(eigvals) > 0:
        all_eigvals = np.concatenate(eigvals)
    else:
        all_eigvals = np.empty(0, dtype=get_real_dtype(matrix.dtype))

    E = ChargeArray(all_eigvals, [eigvalscharge], [False])
    charges_v = [eigvalscharge
                 ] + [matrix._charges[o] for o in matrix._order[0]]
    order_v = [[0]] + [list(np.arange(1, len(matrix._order[0]) + 1))]
    flows_v = [True] + [matrix._flows[o] for o in matrix._order[0]]
    if len(v_blocks) > 0:
        all_v_blocks = np.concatenate([np.ravel(v.T) for v in v_blocks])
    else:
        all_v_blocks = np.empty(0, dtype=matrix.dtype)

    V = BlockSparseTensor(all_v_blocks,
                          charges=charges_v,
                          flows=flows_v,
                          order=order_v,
                          check_consistency=False).transpose()

    return E, V  #pytype: disable=bad-return-type
コード例 #4
0
ファイル: linalg.py プロジェクト: mikechen66/TensorNetwork
def pinv(matrix: BlockSparseTensor,
         rcond: Optional[float] = 1E-15,
         hermitian: Optional[bool] = False) -> BlockSparseTensor:
  """
  Compute the Moore-Penrose pseudo inverse of `matrix`.
  Args:
    rcond: Pseudo inverse cutoff.
  Returns:
    BlockSparseTensor: The pseudo inverse of `matrix`.
  """
  if matrix.ndim != 2:
    raise ValueError("`pinv` can only be taken for matrices, "
                     "found tensor.ndim={}".format(matrix.ndim))

  flat_charges = matrix._charges
  flat_flows = matrix._flows
  flat_order = matrix.flat_order
  tr_partition = len(matrix._order[0])
  blocks, _, shapes = _find_transposed_diagonal_sparse_blocks(
      flat_charges, flat_flows, tr_partition, flat_order)

  data = np.empty(np.sum(np.prod(shapes, axis=0)), dtype=matrix.dtype)
  for n, block in enumerate(blocks):
    data[block] = np.ravel(
        np.linalg.pinv(
            np.reshape(matrix.data[block], shapes[:, n]),
            rcond=rcond,
            hermitian=hermitian).T)

  return BlockSparseTensor(
      data=data,
      charges=matrix._charges,
      flows=np.logical_not(matrix._flows),
      order=matrix._order,
      check_consistency=False).transpose((1, 0))#pytype: disable=bad-return-type
コード例 #5
0
  def convert_to_tensor(self, tensor: Tensor) -> Tensor:
    if numpy.isscalar(tensor):
      tensor = BlockSparseTensor(
          data=tensor, charges=[], flows=[], order=[], check_consistency=False)

    if not isinstance(tensor, BlockSparseTensor):
      raise TypeError(
          "cannot convert tensor of type `{}` to `BlockSparseTensor`".format(
              type(tensor)))
    return tensor
コード例 #6
0
def ones_like(tensor: BlockSparseTensor) -> BlockSparseTensor:
    """
  Initialize a symmetric tensor with ones.
  The resulting tensor has the same shape and dtype as `tensor`.
  Args:
    tensor: A BlockSparseTensor.
  Returns:
    BlockSparseTensor
  """
    return BlockSparseTensor(np.ones(tensor.data.size, dtype=tensor.dtype),
                             charges=tensor._charges,
                             flows=tensor._flows,
                             order=tensor._order,
                             check_consistency=False)
コード例 #7
0
def test_item():
    t1 = BlockSparseTensor(data=np.array(1.0),
                           charges=[],
                           flows=[],
                           order=[],
                           check_consistency=False)
    assert t1.item() == 1
    Ds = [10, 11, 12, 13]
    charges = [U1Charge.random(Ds[n], -5, 5) for n in range(4)]
    flows = [True, False, True, False]
    inds = [Index(c, flows[n]) for n, c in enumerate(charges)]
    t2 = BlockSparseTensor.random(inds, dtype=np.float64)
    with pytest.raises(ValueError,
                       match="can only convert an array of"
                       " size 1 to a Python scalar"):
        t2.item()
コード例 #8
0
def random_like(
    tensor: BlockSparseTensor,
    boundaries: Tuple = (0, 1)) -> BlockSparseTensor:
    """
  Initialize a symmetric tensor with random uniform numbers.
  The resulting tensor has the same shape and dtype as `tensor`.
  Args:
    tensor: A BlockSparseTensor.
  Returns:
    BlockSparseTensor
  """
    return BlockSparseTensor(_random(tensor.data.size,
                                     dtype=tensor.dtype,
                                     boundaries=boundaries),
                             charges=tensor._charges,
                             flows=tensor._flows,
                             order=tensor._order,
                             check_consistency=False)
コード例 #9
0
ファイル: linalg.py プロジェクト: mikechen66/TensorNetwork
def eye(column_index: Index,
        row_index: Optional[Index] = None,
        dtype: Optional[Type[np.number]] = None) -> BlockSparseTensor:
  """
  Return an identity matrix.
  Args:
    column_index: The column index of the matrix.
    row_index: The row index of the matrix.
    dtype: The dtype of the matrix.
  Returns:
    BlockSparseTensor
  """
  if row_index is None:
    row_index = column_index.copy().flip_flow()
  if dtype is None:
    dtype = np.float64

  blocks, _, shapes = _find_diagonal_sparse_blocks(
      column_index.flat_charges + row_index.flat_charges,
      column_index.flat_flows + row_index.flat_flows,
      len(column_index.flat_charges))
  data = np.empty(np.int64(np.sum(np.prod(shapes, axis=0))), dtype=dtype)
  for n, block in enumerate(blocks):
    data[block] = np.ravel(np.eye(shapes[0, n], shapes[1, n], dtype=dtype))
  order = [list(np.arange(0, len(column_index.flat_charges)))] + [
      list(
          np.arange(
              len(column_index.flat_charges),
              len(column_index.flat_charges) + len(row_index.flat_charges)))
  ]
  return BlockSparseTensor(
      data=data,
      charges=column_index.flat_charges + row_index.flat_charges,
      flows=column_index.flat_flows + row_index.flat_flows,
      order=order,
      check_consistency=False)
コード例 #10
0
    def gmres(
            self,  #pylint: disable=arguments-differ
            A_mv: Callable,
            b: BlockSparseTensor,
            A_args: Optional[List] = None,
            A_kwargs: Optional[dict] = None,
            x0: Optional[BlockSparseTensor] = None,
            tol: float = 1E-05,
            atol: Optional[float] = None,
            num_krylov_vectors: Optional[int] = None,
            maxiter: Optional[int] = 1,
            M: Optional[Callable] = None,
            enable_caching: bool = True) -> Tuple[BlockSparseTensor, int]:
        """ GMRES solves the linear system A @ x = b for x given a vector `b` and
    a general (not necessarily symmetric/Hermitian) linear operator `A`.

    As a Krylov method, GMRES does not require a concrete matrix representation
    of the n by n `A`, but only a function
    `vector1 = A_mv(vector0, *A_args, **A_kwargs)`
    prescribing a one-to-one linear map from vector0 to vector1 (that is,
    A must be square, and thus vector0 and vector1 the same size). If `A` is a
    dense matrix, or if it is a symmetric/Hermitian operator, a different
    linear solver will usually be preferable.

    GMRES works by first constructing the Krylov basis
    K = (x0, A_mv@x0, A_mv@A_mv@x0, ..., (A_mv^num_krylov_vectors)@x_0) and then
    solving a certain dense linear system K @ q0 = q1 from whose solution x can
    be approximated. For `num_krylov_vectors = n` the solution is provably exact
    in infinite precision, but the expense is cubic in `num_krylov_vectors` so
    one is typically interested in the `num_krylov_vectors << n` case.
    The solution can in this case be repeatedly
    improved, to a point, by restarting the Arnoldi iterations each time
    `num_krylov_vectors` is reached. Unfortunately the optimal parameter choices
    balancing expense and accuracy are difficult to predict in advance, so
    applying this function requires a degree of experimentation.

    In a tensor network code one is typically interested in A_mv implementing
    some tensor contraction. This implementation thus allows `b` and `x0` to be
    of whatever arbitrary, though identical, shape `b = A_mv(x0, ...)` expects.
    Reshaping to and from a matrix problem is handled internally.

    The numpy backend version of GMRES is simply an interface to
    `scipy.sparse.linalg.gmres`, itself an interace to ARPACK.
    SciPy 1.1.0 or newer (May 05 2018) is required.

    Args:
      A_mv: A function `v0 = A_mv(v, *A_args, **A_kwargs)` where `v0` and
        `v` have the same shape.
      b: The `b` in `A @ x = b`; it should be of the shape `A_mv`
        operates on.
      A_args: Positional arguments to `A_mv`, supplied to this interface
        as a list. Default: None.
      A_kwargs: Keyword arguments to `A_mv`, supplied to this interface
        as a dictionary.
                 Default: None.
      x0: An optional guess solution. Zeros are used by default.
        If `x0` is supplied, its shape and dtype must match those of
        b`, or an error will be thrown. Default: zeros.
      tol, atol: Solution tolerance to achieve, 
        norm(residual) <= max(tol*norm(b), atol). Default: tol=1E-05
                          atol=tol
      num_krylov_vectors: Size of the Krylov space to build at each restart.
        Expense is cubic in this parameter. If supplied, it must be
        an integer in 0 < num_krylov_vectors <= b.size. 
        Default: min(100, b.size).
      maxiter: The Krylov space will be repeatedly rebuilt up to this many
        times. Large values of this argument
        should be used only with caution, since especially for nearly
        symmetric matrices and small `num_krylov_vectors` convergence
        might well freeze at a value significantly larger than `tol`.
        Default: 1
      M: Inverse of the preconditioner of A; see the docstring for
        `scipy.sparse.linalg.gmres`. This is only supported in the
        numpy backend. Supplying this argument to other backends will
        trigger NotImplementedError. Default: None.
      enable_caching: If `True`, block-data during calls to `matvec` is cached
        for later reuse. Note: usually it is safe to enable_caching, unless 
        `matvec` uses matrix decompositions like SVD, QR, eigh, eig or similar.
        In this case, if one does a large number of krylov steps, this can lead 
        to memory clutter and/or OOM errors.
    Raises:
      ValueError: -if `x0` is supplied but its shape differs from that of `b`.
                  -if the ARPACK solver reports a breakdown (which usually 
                   indicates some kind of floating point issue).
                  -if num_krylov_vectors is 0 or exceeds b.size.
                  -if tol was negative.
      TypeError:  -if the dtype of `x0` and `b` are mismatching.

    Returns:
      x: The converged solution. It has the same shape as `b`.
      info: 0 if convergence was achieved, the number of restarts otherwise.
    """

        if x0 is None:
            x0 = self.bs.randn_like(b)

        if not self.bs.compare_shapes(x0, b):
            errstring = (
                f"x0.sparse_shape = \n{x0.sparse_shape} \ndoes not match "
                f"b.sparse_shape = \n{b.sparse_shape}.")
            raise ValueError(errstring)

        if x0.dtype != b.dtype:
            raise TypeError(f"x0.dtype = {x0.dtype} does not"
                            f" match b.dtype = {b.dtype}")

        if num_krylov_vectors is None:
            num_krylov_vectors = min(b.size, 100)

        if num_krylov_vectors <= 0 or num_krylov_vectors > b.size:
            errstring = (f"num_krylov_vectors must be in "
                         f"0 < {num_krylov_vectors} <= {b.size}.")
            raise ValueError(errstring)
        if tol < 0:
            raise ValueError(f"tol = {tol} must be positive.")

        if atol is None:
            atol = tol
        elif atol < 0:
            raise ValueError(f"atol = {atol} must be positive.")

        if A_args is None:
            A_args = []
        if A_kwargs is None:
            A_kwargs = {}

        x0.contiguous(inplace=True)
        b.contiguous(inplace=True)
        tmp = BlockSparseTensor(numpy.empty(0, dtype=x0.dtype),
                                x0._charges,
                                x0._flows,
                                check_consistency=False)

        def matvec(vector):
            tmp.data = vector
            res = A_mv(tmp, *A_args, **A_kwargs)
            res.contiguous(inplace=True)
            return res.data

        dim = len(x0.data)
        A_op = sp.sparse.linalg.LinearOperator(dtype=x0.dtype,
                                               shape=(dim, dim),
                                               matvec=matvec)

        former_caching_status = self.bs.get_caching_status()
        self.bs.set_caching_status(enable_caching)
        if enable_caching:
            cache_was_empty = self.bs.get_cacher().is_empty
        try:
            x, info = sp.sparse.linalg.gmres(A_op,
                                             b.data,
                                             x0.data,
                                             tol=tol,
                                             atol=atol,
                                             restart=num_krylov_vectors,
                                             maxiter=maxiter,
                                             M=M)
        finally:
            #set caching status back to what it was
            self.bs.set_caching_status(former_caching_status)
            if enable_caching and cache_was_empty:
                self.bs.clear_cache()

        if info < 0:
            raise ValueError(
                "ARPACK gmres received illegal input or broke down.")
        if info > 0:
            warnings.warn("gmres did not converge.")
        tmp.data = x
        return tmp, info
コード例 #11
0
ファイル: linalg.py プロジェクト: mikechen66/TensorNetwork
def diag(tensor: ChargeArray) -> Any:
  """
  Return a diagonal `BlockSparseTensor` from a `ChargeArray`, or 
  return the diagonal of a `BlockSparseTensor` as a `ChargeArray`.
  For input of type `BlockSparseTensor`:
    The full diagonal is obtained from finding the diagonal blocks of the 
    `BlockSparseTensor`, taking the diagonal elements of those and packing
    the result into a ChargeArray. Note that the computed diagonal elements 
    are usually different from the  diagonal elements obtained from 
    converting the `BlockSparseTensor` to dense storage and taking the diagonal.
    Note that the flow of the resulting 1d `ChargeArray` object is `False`.
  Args:
    tensor: A `ChargeArray`.
  Returns:
    ChargeArray: A 1d `CharggeArray` containing the diagonal of `tensor`, 
      or a diagonal matrix of type `BlockSparseTensor` containing `tensor` 
      on its diagonal.

  """
  if tensor.ndim > 2:
    raise ValueError("`diag` currently only implemented for matrices, "
                     "found `ndim={}".format(tensor.ndim))
  if not isinstance(tensor, BlockSparseTensor):
    if tensor.ndim > 1:
      raise ValueError(
          "`diag` currently only implemented for `ChargeArray` with ndim=1, "
          "found `ndim={}`".format(tensor.ndim))
    flat_charges = tensor._charges + tensor._charges
    flat_flows = list(tensor._flows) + list(np.logical_not(tensor._flows))
    flat_order = list(tensor.flat_order) + list(
        np.asarray(tensor.flat_order) + len(tensor._charges))
    tr_partition = len(tensor._order[0])
    blocks, charges, shapes = _find_transposed_diagonal_sparse_blocks(
        flat_charges, flat_flows, tr_partition, flat_order)
    data = np.zeros(
        np.int64(np.sum(np.prod(shapes, axis=0))), dtype=tensor.dtype)
    lookup, unique, labels = compute_sparse_lookup(tensor._charges,
                                                   tensor._flows, charges)
    for n, block in enumerate(blocks):
      label = labels[np.nonzero(unique == charges[n])[0][0]]
      data[block] = np.ravel(
          np.diag(tensor.data[np.nonzero(lookup == label)[0]]))

    order = [
        tensor._order[0],
        list(np.asarray(tensor._order[0]) + len(tensor._charges))
    ]
    new_charges = [tensor._charges[0].copy(), tensor._charges[0].copy()]
    return BlockSparseTensor(
        data,
        charges=new_charges,
        flows=list(tensor._flows) + list(np.logical_not(tensor._flows)),
        order=order,
        check_consistency=False)

  flat_charges = tensor._charges
  flat_flows = tensor._flows
  flat_order = tensor.flat_order
  tr_partition = len(tensor._order[0])
  sparse_blocks, charges, block_shapes = _find_transposed_diagonal_sparse_blocks(#pylint: disable=line-too-long
      flat_charges, flat_flows, tr_partition, flat_order)

  shapes = np.min(block_shapes, axis=0)
  if len(sparse_blocks) > 0:
    data = np.concatenate([
        np.diag(np.reshape(tensor.data[sparse_blocks[n]], block_shapes[:, n]))
        for n in range(len(sparse_blocks))
    ])
    charge_labels = np.concatenate([
        np.full(shapes[n], fill_value=n, dtype=np.int16)
        for n in range(len(sparse_blocks))
    ])

  else:
    data = np.empty(0, dtype=tensor.dtype)
    charge_labels = np.empty(0, dtype=np.int16)
  newcharges = [charges[charge_labels]]
  flows = [False]
  return ChargeArray(data, newcharges, flows)
コード例 #12
0
ファイル: eigh.py プロジェクト: gevenbly/test_codes
def _eigh_fixed(
        matrix: BlockSparseTensor,
        link_charges: ChargeArray,
        which: Optional[Text] = 'LM',
        UPLO: Optional[Text] = 'L') -> Tuple[ChargeArray, BlockSparseTensor]:
    """
  Compute eigenvectors and eigenvalues of a hermitian matrix where the output
  charge order is fixed to match that of `link_charges`.
  """

    # reshape into matrix if needed
    pivot = matrix.ndim // 2
    m_shape = matrix.shape
    matrix = matrix.reshape(
        [np.prod(m_shape[:pivot]),
         np.prod(m_shape[pivot:])])

    # compute info about each block
    flat_charges = matrix._charges
    flat_flows = matrix._flows
    flat_order = matrix.flat_order
    tr_partition = len(matrix._order[0])
    blocks, charges, shapes = _find_transposed_diagonal_sparse_blocks(
        flat_charges, flat_flows, tr_partition, flat_order)

    # intersect between link charges and block charges
    link_uni, link_pos, link_counts = link_charges.unique(return_inverse=True,
                                                          return_counts=True)
    _, blk_common, link_common = charges.intersect(link_uni,
                                                   return_indices=True)

    # diagonalize each block
    eigvals = []
    v_blocks = []
    for n, m in enumerate(blk_common):
        e, v = np.linalg.eigh(np.reshape(matrix.data[blocks[m]], shapes[:, m]),
                              UPLO)

        # sort within each block
        if which == 'SA':
            blk_sort = np.argsort(e)
        elif which == 'LA':
            blk_sort = np.flip(np.argsort(e))
        elif which == 'SM':
            blk_sort = np.argsort(np.abs(e))
        elif which == 'LM':
            blk_sort = np.flip(np.argsort(np.abs(e)))

        eigvals.append(e[blk_sort])
        v_blocks.append(v[:, blk_sort].T)

    link_degens = np.zeros(np.size(link_pos), dtype=np.int64)
    for n, count in enumerate(link_counts):
        link_degens[link_pos == n] = np.arange(count, dtype=np.int64)

    all_eigvals = np.zeros(link_pos.size, dtype=matrix.dtype)
    for n in range(len(link_pos)):
        all_eigvals[n] = eigvals[link_pos[n]][link_degens[n]]

    e_charge = charges[blk_common[link_pos]]
    E = ChargeArray(all_eigvals, [e_charge], [False])

    charges_v = [e_charge] + [matrix._charges[o] for o in matrix._order[0]]
    order_v = [[0]] + [list(np.arange(1, len(matrix._order[0]) + 1))]
    flows_v = [True] + [matrix._flows[o] for o in matrix._order[0]]

    if len(v_blocks) > 0:
        all_v_blocks = np.concatenate([
            v_blocks[link_pos[n]][link_degens[n], :]
            for n in range(len(all_eigvals))
        ])
    else:
        all_v_blocks = np.empty(0, dtype=matrix.dtype)

    fin_shape = [*m_shape[:pivot], len(all_eigvals)]
    V = BlockSparseTensor(
        all_v_blocks,
        charges=charges_v,
        flows=flows_v,
        order=order_v,
        check_consistency=False).transpose().reshape(fin_shape)

    return E, V
コード例 #13
0
ファイル: linalg.py プロジェクト: mikechen66/TensorNetwork
def qr(matrix: BlockSparseTensor, mode: Optional[Text] = 'reduced') -> Any:
  """
  Compute the qr decomposition of an `M` by `N` matrix `matrix`.
  The matrix is factorized into `q*r`, with 
  `q` an orthogonal matrix and `r` an upper triangular matrix.
  Args:
    matrix: A matrix (i.e. a rank-2 tensor) of type  `BlockSparseTensor`
    mode : Can take values {'reduced', 'complete', 'r', 'raw'}.
    If K = min(M, N), then

    * 'reduced'  : returns q, r with dimensions (M, K), (K, N) (default)
    * 'complete' : returns q, r with dimensions (M, M), (M, N)
    * 'r'        : returns r only with dimensions (K, N)

  Returns:
    (BlockSparseTensor,BlockSparseTensor): If mode = `reduced` or `complete`
    BlockSparseTensor: If mode = `r`.
  """
  if mode == 'raw':
    raise NotImplementedError('mode `raw` currenntly not supported')
  if matrix.ndim != 2:
    raise NotImplementedError("qr currently supports only rank-2 tensors.")

  flat_charges = matrix._charges
  flat_flows = matrix._flows
  flat_order = matrix.flat_order
  tr_partition = len(matrix._order[0])
  blocks, charges, shapes = _find_transposed_diagonal_sparse_blocks(
      flat_charges, flat_flows, tr_partition, flat_order)

  q_blocks = []
  r_blocks = []
  for n, block in enumerate(blocks):
    out = np.linalg.qr(np.reshape(matrix.data[block], shapes[:, n]), mode)
    if mode in ('reduced', 'complete'):
      q_blocks.append(out[0])
      r_blocks.append(out[1])
    elif mode == 'r':
      r_blocks.append(out)
    else:
      raise ValueError('unknown value {} for input `mode`'.format(mode))

  tmp_r_charge_labels = [
      np.full(r_blocks[n].shape[0], fill_value=n, dtype=np.int16)
      for n in range(len(r_blocks))
  ]
  if len(tmp_r_charge_labels) > 0:
    left_r_charge_labels = np.concatenate(tmp_r_charge_labels)
  else:
    left_r_charge_labels = np.empty(0, dtype=np.int16)

  left_r_charge = charges[left_r_charge_labels]
  charges_r = [left_r_charge] + [matrix._charges[o] for o in matrix._order[1]]
  flows_r = [False] + [matrix._flows[o] for o in matrix._order[1]]
  order_r = [[0]] + [list(np.arange(1, len(matrix._order[1]) + 1))]
  if len(r_blocks) > 0:
    all_r_blocks = np.concatenate([np.ravel(r) for r in r_blocks])
  else:
    all_r_blocks = np.empty(0, dtype=matrix.dtype)
  R = BlockSparseTensor(
      all_r_blocks,
      charges=charges_r,
      flows=flows_r,
      order=order_r,
      check_consistency=False)

  if mode in ('reduced', 'complete'):
    tmp_right_q_charge_labels = [
        np.full(q_blocks[n].shape[1], fill_value=n, dtype=np.int16)
        for n in range(len(q_blocks))
    ]
    if len(tmp_right_q_charge_labels) > 0:
      right_q_charge_labels = np.concatenate(tmp_right_q_charge_labels)
    else:
      right_q_charge_labels = np.empty(0, dtype=np.int16)

    right_q_charge = charges[right_q_charge_labels]
    charges_q = [
        right_q_charge,
    ] + [matrix._charges[o] for o in matrix._order[0]]
    order_q = [[0]] + [list(np.arange(1, len(matrix._order[0]) + 1))]
    flows_q = [True] + [matrix._flows[o] for o in matrix._order[0]]
    if len(q_blocks) > 0:
      all_q_blocks = np.concatenate([np.ravel(q.T) for q in q_blocks])
    else:
      all_q_blocks = np.empty(0, dtype=matrix.dtype)
    return BlockSparseTensor(
        all_q_blocks,
        charges=charges_q,
        flows=flows_q,
        order=order_q,
        check_consistency=False).transpose((1, 0)), R

  return R
コード例 #14
0
ファイル: linalg.py プロジェクト: mikechen66/TensorNetwork
def svd(matrix: BlockSparseTensor,
        full_matrices: Optional[bool] = True,
        compute_uv: Optional[bool] = True,
        hermitian: Optional[bool] = False) -> Any:
  """
  Compute the singular value decomposition of `matrix`.
  The matrix if factorized into `u * s * vh`, with 
  `u` and `vh` the left and right singular vectors of `matrix`,
  and `s` its singular values.
  Args:
    matrix: A matrix (i.e. an order-2 tensor) of type  `BlockSparseTensor`
    full_matrices: If `True`, expand `u` and `v` to square matrices
      If `False` return the "economic" svd, i.e. `u.shape[1]=s.shape[0]`
      and `v.shape[0]=s.shape[1]`
    compute_uv: If `True`, return `u` and `v`.
    hermitian: If `True`, assume hermiticity of `matrix`.
  Returns:
    If `compute_uv` is `True`: Three BlockSparseTensors `U,S,V`.
    If `compute_uv` is `False`: A BlockSparseTensors `S` containing the 
      singular values.
  """

  if matrix.ndim != 2:
    raise NotImplementedError("svd currently supports only tensors of order 2.")

  flat_charges = matrix._charges
  flat_flows = matrix._flows
  flat_order = matrix.flat_order
  tr_partition = len(matrix._order[0])
  blocks, charges, shapes = _find_transposed_diagonal_sparse_blocks(
      flat_charges, flat_flows, tr_partition, flat_order)

  u_blocks = []
  singvals = []
  v_blocks = []
  for n, block in enumerate(blocks):
    out = np.linalg.svd(
        np.reshape(matrix.data[block], shapes[:, n]), full_matrices, compute_uv,
        hermitian)
    if compute_uv:
      u_blocks.append(out[0])
      singvals.append(out[1])
      v_blocks.append(out[2])

    else:
      singvals.append(out)

  tmp_labels = [
      np.full(len(singvals[n]), fill_value=n, dtype=np.int16)
      for n in range(len(singvals))
  ]
  if len(tmp_labels) > 0:
    left_singval_charge_labels = np.concatenate(tmp_labels)
  else:

    left_singval_charge_labels = np.empty(0, dtype=np.int16)
  left_singval_charge = charges[left_singval_charge_labels]
  if len(singvals) > 0:
    all_singvals = np.concatenate(singvals)
  else:
    all_singvals = np.empty(0, dtype=get_real_dtype(matrix.dtype))
  S = ChargeArray(all_singvals, [left_singval_charge], [False])

  if compute_uv:
    #define the new charges on the two central bonds
    tmp_left_labels = [
        np.full(u_blocks[n].shape[1], fill_value=n, dtype=np.int16)
        for n in range(len(u_blocks))
    ]
    if len(tmp_left_labels) > 0:
      left_charge_labels = np.concatenate(tmp_left_labels)
    else:
      left_charge_labels = np.empty(0, dtype=np.int16)

    tmp_right_labels = [
        np.full(v_blocks[n].shape[0], fill_value=n, dtype=np.int16)
        for n in range(len(v_blocks))
    ]
    if len(tmp_right_labels) > 0:
      right_charge_labels = np.concatenate(tmp_right_labels)
    else:
      right_charge_labels = np.empty(0, dtype=np.int16)
    new_left_charge = charges[left_charge_labels]
    new_right_charge = charges[right_charge_labels]

    charges_u = [new_left_charge
                ] + [matrix._charges[o] for o in matrix._order[0]]
    order_u = [[0]] + [list(np.arange(1, len(matrix._order[0]) + 1))]
    flows_u = [True] + [matrix._flows[o] for o in matrix._order[0]]
    charges_v = [new_right_charge
                ] + [matrix._charges[o] for o in matrix._order[1]]
    flows_v = [False] + [matrix._flows[o] for o in matrix._order[1]]
    order_v = [[0]] + [list(np.arange(1, len(matrix._order[1]) + 1))]
    # We fill in data into the transposed U
    # note that transposing is essentially free
    if len(u_blocks) > 0:
      all_u_blocks = np.concatenate([np.ravel(u.T) for u in u_blocks])
      all_v_blocks = np.concatenate([np.ravel(v) for v in v_blocks])
    else:
      all_u_blocks = np.empty(0, dtype=matrix.dtype)
      all_v_blocks = np.empty(0, dtype=matrix.dtype)

    return BlockSparseTensor(
        all_u_blocks,
        charges=charges_u,
        flows=flows_u,
        order=order_u,
        check_consistency=False).transpose((1, 0)), S, BlockSparseTensor(
            all_v_blocks,
            charges=charges_v,
            flows=flows_v,
            order=order_v,
            check_consistency=False)

  return S
コード例 #15
0
def svd(bt,
        tensor: BlockSparseTensor,
        pivot_axis: int,
        max_singular_values: Optional[int] = None,
        max_truncation_error: Optional[float] = None,
        relative: Optional[bool] = False
        ) -> Tuple[Tensor, Tensor, Tensor, Tensor]:
    """
  Computes the singular value decomposition (SVD) of a tensor.
  See tensornetwork.backends.tensorflow.decompositions for details.
  """

    left_dims = tensor.shape[:pivot_axis]
    right_dims = tensor.shape[pivot_axis:]

    matrix = bt.reshape(tensor, [np.prod(left_dims), np.prod(right_dims)])

    flat_charges = matrix._charges
    flat_flows = matrix._flows
    flat_order = matrix.flat_order
    tr_partition = len(matrix._order[0])
    blocks, charges, shapes = _find_transposed_diagonal_sparse_blocks(
        flat_charges, flat_flows, tr_partition, flat_order)

    u_blocks = []
    singvals = []
    v_blocks = []
    for n, b in enumerate(blocks):
        out = np.linalg.svd(np.reshape(matrix.data[b], shapes[:, n]),
                            full_matrices=False,
                            compute_uv=True)
        u_blocks.append(out[0])
        singvals.append(out[1])
        v_blocks.append(out[2])

    orig_num_singvals = np.int64(np.sum([len(s) for s in singvals]))
    discarded_singvals = np.zeros(0, dtype=get_real_dtype(tensor.dtype))
    if (max_singular_values
            is not None) and (max_singular_values >= orig_num_singvals):
        max_singular_values = None

    if (max_truncation_error is not None) or (max_singular_values is not None):
        max_D = np.max([len(s) for s in singvals]) if len(singvals) > 0 else 0

        #extend singvals of all blocks into a matrix by padding each block with 0
        if len(singvals) > 0:
            extended_singvals = np.stack([
                np.append(s, np.zeros(max_D - len(s), dtype=s.dtype))
                for s in singvals
            ],
                                         axis=1)
        else:
            extended_singvals = np.empty((0, 0),
                                         dtype=get_real_dtype(tensor.dtype))

        extended_flat_singvals = np.ravel(extended_singvals)
        #sort singular values
        inds = np.argsort(extended_flat_singvals, kind='stable')
        discarded_inds = np.zeros(0, dtype=SIZE_T)
        if inds.shape[0] > 0:
            maxind = inds[-1]
        else:
            maxind = 0
        if max_truncation_error is not None:
            if relative and (len(singvals) > 0):
                max_truncation_error = max_truncation_error * np.max(
                    [s[0] for s in singvals])

            kept_inds_mask = np.sqrt(
                np.cumsum(np.square(
                    extended_flat_singvals[inds]))) > max_truncation_error
            trunc_inds_mask = np.logical_not(kept_inds_mask)
            discarded_inds = inds[trunc_inds_mask]
            inds = inds[kept_inds_mask]
        if max_singular_values is not None:
            #if the original number of non-zero singular values
            #is smaller than `max_singular_values` we need to reset
            #`max_singular_values` (we were filling in 0.0 into singular
            #value blocks to facilitate trunction steps, thus we could end up
            #with more singular values than originally there).
            if max_singular_values > orig_num_singvals:
                max_singular_values = orig_num_singvals
            if max_singular_values < len(inds):
                discarded_inds = np.append(discarded_inds,
                                           inds[:(-1) * max_singular_values])
                inds = inds[(-1) * max_singular_values::]

        if len(inds) == 0:
            #special case of truncation to 0 dimension;
            warnings.warn("svd_decomposition truncated to 0 dimensions. "
                          "Adjusting to `max_singular_values = 1`")
            inds = np.asarray([maxind])

        if extended_singvals.shape[1] > 0:
            #pylint: disable=no-member
            keep = np.divmod(inds, extended_singvals.shape[1])
        else:
            keep = (np.zeros(1, dtype=SIZE_T), np.zeros(1, dtype=SIZE_T))
        newsingvals = [
            extended_singvals[keep[0][keep[1] == n],
                              keep[1][keep[1] == n]][::-1]
            for n in range(extended_singvals.shape[1])
        ]

        discarded_singvals = extended_flat_singvals[discarded_inds]
        singvals = newsingvals
    if len(singvals) > 0:
        left_singval_charge_labels = np.concatenate([
            np.full(singvals[n].shape[0], fill_value=n, dtype=np.int16)
            for n in range(len(singvals))
        ])
        all_singvals = np.concatenate(singvals)
        #define the new charges on the two central bonds
        left_charge_labels = np.concatenate([
            np.full(len(singvals[n]), fill_value=n, dtype=np.int16)
            for n in range(len(u_blocks))
        ])
        right_charge_labels = np.concatenate([
            np.full(len(singvals[n]), fill_value=n, dtype=np.int16)
            for n in range(len(v_blocks))
        ])
        all_ublocks = np.concatenate([
            np.ravel(np.transpose(u_blocks[n][:, 0:len(singvals[n])]))
            for n in range(len(u_blocks))
        ])
        all_vblocks = np.concatenate([
            np.ravel(v_blocks[n][0:len(singvals[n]), :])
            for n in range(len(v_blocks))
        ])
    else:
        left_singval_charge_labels = np.empty(0, dtype=np.int16)
        all_singvals = np.empty(0, dtype=get_real_dtype(tensor.dtype))
        left_charge_labels = np.empty(0, dtype=np.int16)
        right_charge_labels = np.empty(0, dtype=np.int16)
        all_ublocks = np.empty(0, dtype=get_real_dtype(tensor.dtype))
        all_vblocks = np.empty(0, dtype=get_real_dtype(tensor.dtype))
    left_singval_charge = charges[left_singval_charge_labels]
    S = ChargeArray(all_singvals, [left_singval_charge], [False])

    new_left_charge = charges[left_charge_labels]
    new_right_charge = charges[right_charge_labels]

    #get the indices of the new tensors U,S and V
    charges_u = [new_left_charge
                 ] + [matrix._charges[o] for o in matrix._order[0]]
    order_u = [[0]] + [list(np.arange(1, len(matrix._order[0]) + 1))]
    flows_u = [True] + [matrix._flows[o] for o in matrix._order[0]]
    charges_v = [new_right_charge
                 ] + [matrix._charges[o] for o in matrix._order[1]]
    flows_v = [False] + [matrix._flows[o] for o in matrix._order[1]]
    order_v = [[0]] + [list(np.arange(1, len(matrix._order[1]) + 1))]

    #We fill in data into the transposed U
    U = BlockSparseTensor(all_ublocks,
                          charges=charges_u,
                          flows=flows_u,
                          order=order_u,
                          check_consistency=False).transpose((1, 0))

    V = BlockSparseTensor(all_vblocks,
                          charges=charges_v,
                          flows=flows_v,
                          order=order_v,
                          check_consistency=False)
    left_shape = left_dims + (S.shape[0], )
    right_shape = (S.shape[0], ) + right_dims
    return U.reshape(left_shape), S, V.reshape(
        right_shape), discarded_singvals[discarded_singvals > 0.0]
コード例 #16
0
ファイル: eigh.py プロジェクト: gevenbly/test_codes
def _eigh_free(
        matrix: BlockSparseTensor,
        which: Optional[Text] = 'LM',
        full_sort: Optional[bool] = True,
        threshold: Optional[float] = None,
        max_kept: Optional[int] = None,
        UPLO: Optional[Text] = 'L') -> Tuple[ChargeArray, BlockSparseTensor]:
    """
  Compute eigenvectors and eigenvalues of a hermitian matrix where the output
  charge order is free.
  """
    # reshape into matrix if needed
    pivot = matrix.ndim // 2
    m_shape = matrix.shape
    matrix = matrix.reshape(
        [np.prod(m_shape[:pivot]),
         np.prod(m_shape[pivot:])])

    if max_kept is None:
        max_kept = matrix.shape[0]
    max_kept = min(max_kept, matrix.shape[0])

    if threshold is None:
        if which == 'LM' or which == 'LA':
            threshold = -float('inf')
        elif which == 'SM' or which == 'SA':
            threshold = float('inf')

    # compute info about each block
    flat_charges = matrix._charges
    flat_flows = matrix._flows
    flat_order = matrix.flat_order
    tr_partition = len(matrix._order[0])
    blocks, charges, shapes = _find_transposed_diagonal_sparse_blocks(
        flat_charges, flat_flows, tr_partition, flat_order)
    num_blocks = len(blocks)

    # diagonalize each block
    eigvals = [0] * num_blocks
    v_blocks = [0] * num_blocks
    for n, block in enumerate(blocks):
        etemp, vtemp = np.linalg.eigh(
            np.reshape(matrix.data[block], shapes[:, n]), UPLO)
        # sort within each block
        if which == 'LM':
            ord_temp = np.flip(np.argsort(abs(etemp)))
        elif which == 'LA':
            ord_temp = np.flip(np.argsort(etemp))
        elif which == 'SM':
            ord_temp = np.argsort(abs(etemp))
        elif which == 'SA':
            ord_temp = np.argsort(etemp)

        eigvals[n] = etemp[ord_temp]
        v_blocks[n] = (vtemp[:, ord_temp].T)

    # combine and sort eigenvalues from all symmetry blocks
    tmp_labels = [
        np.full(len(eigvals[n]), fill_value=n, dtype=np.int16)
        for n in range(len(eigvals))
    ]
    tmp_degens = [
        np.arange(len(eigvals[n]), dtype=np.int16) for n in range(len(eigvals))
    ]
    all_eigvals = np.concatenate(eigvals)
    all_labels = np.concatenate(tmp_labels)

    if which == 'LM':
        eig_ord = np.flip(np.argsort(np.abs(all_eigvals)))
        num_kept = min(sum(np.abs(all_eigvals) >= threshold), max_kept)
    elif which == 'LA':
        eig_ord = np.flip(np.argsort(all_eigvals))
        num_kept = min(sum(all_eigvals >= threshold), max_kept)
    elif which == 'SM':
        eig_ord = np.argsort(np.abs(all_eigvals))
        num_kept = min(sum(np.abs(all_eigvals) <= threshold), max_kept)
    elif which == 'SA':
        eig_ord = np.argsort(all_eigvals)
        num_kept = min(sum(all_eigvals <= threshold), max_kept)

    if full_sort:
        e_labels = np.concatenate(tmp_labels)[eig_ord[:num_kept]]
        e_degens = np.concatenate(tmp_degens)[eig_ord[:num_kept]]
        e_charge = charges[e_labels]
        E = ChargeArray(all_eigvals[eig_ord[:num_kept]], [e_charge], [False])
    else:
        num_per_block = [
            sum(all_labels[eig_ord[:num_kept]] == n) for n in range(num_blocks)
        ]
        e_labels = np.concatenate([
            np.full(num_per_block[n], fill_value=n, dtype=np.int16)
            for n in range(num_blocks)
        ])
        e_degens = np.concatenate([
            np.arange(num_per_block[n], dtype=np.int16)
            for n in range(num_blocks)
        ])
        e_charge = charges[e_labels]
        new_eigvals = np.concatenate(
            [eigvals[n][:num_per_block[n]] for n in range(num_blocks)])
        E = ChargeArray(new_eigvals, [e_charge], [False])

    charges_v = [e_charge] + [matrix._charges[o] for o in matrix._order[0]]
    order_v = [[0]] + [list(np.arange(1, len(matrix._order[0]) + 1))]
    flows_v = [True] + [matrix._flows[o] for o in matrix._order[0]]

    all_v_blocks = np.concatenate(
        [v_blocks[e_labels[n]][e_degens[n], :] for n in range(num_kept)])
    fin_shape = [*m_shape[:pivot], num_kept]

    V = BlockSparseTensor(
        all_v_blocks,
        charges=charges_v,
        flows=flows_v,
        order=order_v,
        check_consistency=False).transpose().reshape(fin_shape)

    return E, V
コード例 #17
0
    def eigs(
            self,  #pylint: disable=arguments-differ
            A: Callable,
            args: Optional[List] = None,
            initial_state: Optional[Tensor] = None,
            shape: Optional[Tuple[Index, ...]] = None,
            dtype: Optional[Type[numpy.number]] = None,
            num_krylov_vecs: int = 50,
            numeig: int = 6,
            tol: float = 1E-8,
            which: Text = 'LR',
            maxiter: Optional[int] = None,
            enable_caching: bool = True) -> Tuple[Tensor, List]:
        """
    Arnoldi method for finding the lowest eigenvector-eigenvalue pairs
    of a linear operator `A`.
    If no `initial_state` is provided then `shape`  and `dtype` are required
    so that a suitable initial state can be randomly generated.
    This is a wrapper for scipy.sparse.linalg.eigs which only supports
    a subset of the arguments of scipy.sparse.linalg.eigs.
    Note: read notes for `enable_caching` carefully.

    Args:
      A: A (sparse) implementation of a linear operator
      args: A list of arguments to `A`.  `A` will be called as
        `res = A(initial_state, *args)`.
      initial_state: An initial vector for the algorithm. If `None`,
        a random initial `Tensor` is created using the `numpy.random.randn`
        method.
      shape: The shape of the input-dimension of `A`.
      dtype: The dtype of the input `A`. If both no `initial_state` is provided,
        a random initial state with shape `shape` and dtype `dtype` is created.
      num_krylov_vecs: The number of iterations (number of krylov vectors).
      numeig: The nummber of eigenvector-eigenvalue pairs to be computed.
        If `numeig > 1`, `reorthogonalize` has to be `True`.
      tol: The desired precision of the eigenvalus. Uses
      which : ['LM' | 'SM' | 'LR' | 'SR' | 'LI']
        Which `k` eigenvectors and eigenvalues to find:
            'LM' : largest magnitude
            'SM' : smallest magnitude
            'LR' : largest real part
            'SR' : smallest real part
            'LI' : largest imaginary part
      maxiter: The maximum number of iterations.
      enable_caching: If `True`, block-data during calls to `matvec` are cached
        for later reuse. Note: usually it is save to enable_caching, unless
        `matvec` uses matrix decompositions like SVD, QR, eigh, eig or similar.
        In this case, if one does a large number of krylov steps, this can lead
        to memory clutter and/or overflow.

    Returns:
       `np.ndarray`: An array of `numeig` lowest eigenvalues
       `list`: A list of `numeig` lowest eigenvectors
    """

        if args is None:
            args = []

        if which in ('SI', 'LI'):
            raise ValueError(f'which = {which} is currently not supported.')

        if numeig + 1 >= num_krylov_vecs:
            raise ValueError("`num_krylov_vecs` > `numeig + 1` required")

        if initial_state is None:
            if (shape is None) or (dtype is None):
                raise ValueError(
                    "if no `initial_state` is passed, then `shape` and"
                    "`dtype` have to be provided")
            initial_state = self.randn(shape, dtype)

        if not isinstance(initial_state, BlockSparseTensor):
            raise TypeError("Expected a `BlockSparseTensor`. Got {}".format(
                type(initial_state)))

        initial_state.contiguous(inplace=True)
        dim = len(initial_state.data)

        def matvec(vector):
            tmp.data = vector
            res = A(tmp, *args)
            res.contiguous(inplace=True)
            return res.data

        tmp = BlockSparseTensor(numpy.empty(0, dtype=initial_state.dtype),
                                initial_state._charges,
                                initial_state._flows,
                                check_consistency=False)
        lop = sp.sparse.linalg.LinearOperator(dtype=initial_state.dtype,
                                              shape=(dim, dim),
                                              matvec=matvec)

        former_caching_status = self.bs.get_caching_status()
        self.bs.set_caching_status(enable_caching)
        if enable_caching:
            cache_was_empty = self.bs.get_cacher().is_empty
        try:
            eta, U = sp.sparse.linalg.eigs(A=lop,
                                           k=numeig,
                                           which=which,
                                           v0=initial_state.data,
                                           ncv=num_krylov_vecs,
                                           tol=tol,
                                           maxiter=maxiter)
        finally:
            #set caching status back to what it was
            self.bs.set_caching_status(former_caching_status)
            if enable_caching and cache_was_empty:
                self.bs.clear_cache()

        eVs = [
            BlockSparseTensor(U[:, n],
                              initial_state._charges,
                              initial_state._flows,
                              check_consistency=False) for n in range(numeig)
        ]

        self.bs.set_caching_status(former_caching_status)
        if enable_caching and cache_was_empty:
            self.bs.clear_cache()

        return eta, eVs