Exemplo n.º 1
0
def test_cache():
    D = 10
    mpsinds = [
        Index(U1Charge(np.random.randint(-5, 5, D, dtype=np.int16)), False),
        Index(U1Charge(np.random.randint(-5, 5, D, dtype=np.int16)), False),
        Index(U1Charge(np.random.randint(-5, 5, D, dtype=np.int16)), False),
        Index(U1Charge(np.random.randint(-5, 5, D, dtype=np.int16)), True)
    ]
    A = BlockSparseTensor.random(mpsinds)
    B = A.conj()
    res_charges = [
        A.flat_charges[2], A.flat_charges[3], B.flat_charges[2],
        B.flat_charges[3]
    ]
    res_flows = [
        A.flat_flows[2], A.flat_flows[3], B.flat_flows[2], B.flat_flows[3]
    ]

    enable_caching()
    ncon([A, B], [[1, 2, -1, -2], [1, 2, -3, -4]], backend='symmetric')
    cacher = get_cacher()
    sA = _to_string(A.flat_charges, A.flat_flows, 2, [2, 3, 0, 1])
    sB = _to_string(B.flat_charges, B.flat_flows, 2, [0, 1, 2, 3])
    sC = _to_string(res_charges, res_flows, 2, [0, 1, 2, 3])
    blocksA, chargesA, dimsA = _find_transposed_diagonal_sparse_blocks(
        A.flat_charges, A.flat_flows, 2, [2, 3, 0, 1])
    blocksB, chargesB, dimsB = _find_transposed_diagonal_sparse_blocks(
        B.flat_charges, B.flat_flows, 2, [0, 1, 2, 3])
    blocksC, chargesC, dimsC = _find_transposed_diagonal_sparse_blocks(
        res_charges, res_flows, 2, [0, 1, 2, 3])

    assert sA in cacher.cache
    assert sB in cacher.cache
    assert sC in cacher.cache

    for b1, b2 in zip(cacher.cache[sA][0], blocksA):
        np.testing.assert_allclose(b1, b2)
    for b1, b2 in zip(cacher.cache[sB][0], blocksB):
        np.testing.assert_allclose(b1, b2)
    for b1, b2 in zip(cacher.cache[sC][0], blocksC):
        np.testing.assert_allclose(b1, b2)
    assert charge_equal(cacher.cache[sA][1], chargesA)
    assert charge_equal(cacher.cache[sB][1], chargesB)
    assert charge_equal(cacher.cache[sC][1], chargesC)

    np.testing.assert_allclose(cacher.cache[sA][2], dimsA)
    np.testing.assert_allclose(cacher.cache[sB][2], dimsB)
    np.testing.assert_allclose(cacher.cache[sC][2], dimsC)
    disable_caching()
    clear_cache()
Exemplo n.º 2
0
def inv(matrix: BlockSparseTensor) -> BlockSparseTensor:
  """
  Compute the matrix inverse of `matrix`.
  Returns:
    BlockSparseTensor: The inverse of `matrix`.
  """
  if matrix.ndim != 2:
    raise ValueError("`inv` can only be taken for matrices, "
                     "found tensor.ndim={}".format(matrix.ndim))
  flat_charges = matrix._charges
  flat_flows = matrix._flows
  flat_order = matrix.flat_order
  tr_partition = len(matrix._order[0])
  blocks, _, shapes = _find_transposed_diagonal_sparse_blocks(
      flat_charges, flat_flows, tr_partition, flat_order)

  data = np.empty(np.sum(np.prod(shapes, axis=0)), dtype=matrix.dtype)
  for n, block in enumerate(blocks):
    data[block] = np.ravel(
        np.linalg.inv(np.reshape(matrix.data[block], shapes[:, n])).T)

  return BlockSparseTensor(
      data=data,
      charges=matrix._charges,
      flows=np.logical_not(matrix._flows),
      order=matrix._order,
      check_consistency=False).transpose((1, 0))  #pytype: disable=bad-return-type
Exemplo n.º 3
0
def pinv(matrix: BlockSparseTensor,
         rcond: Optional[float] = 1E-15,
         hermitian: Optional[bool] = False) -> BlockSparseTensor:
  """
  Compute the Moore-Penrose pseudo inverse of `matrix`.
  Args:
    rcond: Pseudo inverse cutoff.
  Returns:
    BlockSparseTensor: The pseudo inverse of `matrix`.
  """
  if matrix.ndim != 2:
    raise ValueError("`pinv` can only be taken for matrices, "
                     "found tensor.ndim={}".format(matrix.ndim))

  flat_charges = matrix._charges
  flat_flows = matrix._flows
  flat_order = matrix.flat_order
  tr_partition = len(matrix._order[0])
  blocks, _, shapes = _find_transposed_diagonal_sparse_blocks(
      flat_charges, flat_flows, tr_partition, flat_order)

  data = np.empty(np.sum(np.prod(shapes, axis=0)), dtype=matrix.dtype)
  for n, block in enumerate(blocks):
    data[block] = np.ravel(
        np.linalg.pinv(
            np.reshape(matrix.data[block], shapes[:, n]),
            rcond=rcond,
            hermitian=hermitian).T)

  return BlockSparseTensor(
      data=data,
      charges=matrix._charges,
      flows=np.logical_not(matrix._flows),
      order=matrix._order,
      check_consistency=False).transpose((1, 0))  #pytype: disable=bad-return-type
Exemplo n.º 4
0
def eig(matrix: BlockSparseTensor) -> Tuple[ChargeArray, BlockSparseTensor]:
    """
  Compute the eigen decomposition of an `M` by `M` matrix `matrix`.
  Args:
    matrix: A matrix (i.e. a rank-2 tensor) of type  `BlockSparseTensor`

  Returns:
    (ChargeArray,BlockSparseTensor): The eigenvalues and eigenvectors

  """
    if matrix.ndim != 2:
        raise NotImplementedError(
            "eig currently supports only rank-2 tensors.")

    flat_charges = matrix._charges
    flat_flows = matrix.flat_flows
    flat_order = matrix.flat_order
    tr_partition = len(matrix._order[0])
    blocks, charges, shapes = _find_transposed_diagonal_sparse_blocks(
        flat_charges, flat_flows, tr_partition, flat_order)

    eigvals = []
    v_blocks = []
    for n, block in enumerate(blocks):
        e, v = np.linalg.eig(np.reshape(matrix.data[block], shapes[:, n]))
        eigvals.append(e)
        v_blocks.append(v)
    tmp_labels = [
        np.full(len(eigvals[n]), fill_value=n, dtype=np.int16)
        for n in range(len(eigvals))
    ]
    if len(tmp_labels) > 0:
        eigvalscharge_labels = np.concatenate(tmp_labels)
    else:
        eigvalscharge_labels = np.empty(0, dtype=np.int16)

    eigvalscharge = charges[eigvalscharge_labels]

    if len(eigvals) > 0:
        all_eigvals = np.concatenate(eigvals)
    else:
        all_eigvals = np.empty(0, dtype=get_real_dtype(matrix.dtype))

    E = ChargeArray(all_eigvals, [eigvalscharge], [False])
    charges_v = [eigvalscharge
                 ] + [matrix._charges[o] for o in matrix._order[0]]
    order_v = [[0]] + [list(np.arange(1, len(matrix._order[0]) + 1))]
    flows_v = [True] + [matrix._flows[o] for o in matrix._order[0]]
    if len(v_blocks) > 0:
        all_v_blocks = np.concatenate([np.ravel(v.T) for v in v_blocks])
    else:
        all_v_blocks = np.empty(0, dtype=matrix.dtype)

    V = BlockSparseTensor(all_v_blocks,
                          charges=charges_v,
                          flows=flows_v,
                          order=order_v,
                          check_consistency=False).transpose()

    return E, V  #pytype: disable=bad-return-type
Exemplo n.º 5
0
    def contiguous(self,
                   permutation: Optional[Union[Tuple, List,
                                               np.ndarray]] = None,
                   inplace: Optional[bool] = False) -> Any:
        """
    Transpose the tensor data in place such that the linear order 
    of the elements in `BlockSparseTensor.data` corresponds to the 
    current order of tensor indices. 
    Consider a tensor with current order given by `_order=[[1,2],[3],[0]]`,
    i.e. `data` was initialized according to order [0,1,2,3], and the tensor
    has since been reshaped and transposed. The linear oder of `data` does not
    match the desired order [1,2,3,0] of the tensor. `contiguous` fixes this
    by permuting `data` into this order, transposing `_charges` and `_flows`,
    and changing `_order` to `[[0,1],[2],[3]]`.
    Args:
      permutation: An optional alternative order to be used to transposed the 
        tensor. If `None` defaults to `BlockSparseTensor.permutation`.
    """
        flat_charges = self._charges
        flat_flows = self._flows
        if permutation is None:
            permutation = self.flat_order

        if np.array_equal(permutation, np.arange(len(permutation))):
            return self
        tr_partition = _find_best_partition(
            [flat_charges[n].dim for n in permutation])

        tr_sparse_blocks, tr_charges, _ = _find_transposed_diagonal_sparse_blocks(
            flat_charges, flat_flows, tr_partition, permutation)

        sparse_blocks, charges, _ = _find_diagonal_sparse_blocks(
            [flat_charges[n] for n in permutation],
            [flat_flows[n] for n in permutation], tr_partition)
        data = np.empty(len(self.data), dtype=self.dtype)
        for n, sparse_block in enumerate(sparse_blocks):
            ind = np.nonzero(tr_charges == charges[n])[0][0]
            perm = tr_sparse_blocks[ind]
            data[sparse_block] = self.data[perm]

        _, inds = np.unique(permutation, return_index=True)
        new_flat_order = inds[self.flat_order]
        tmp = np.append(0, np.cumsum([len(o) for o in self._order]))
        order = [
            list(new_flat_order[tmp[n]:tmp[n + 1]])
            for n in range(len(tmp) - 1)
        ]
        charges = [self._charges[o] for o in permutation]
        flows = [self._flows[o] for o in permutation]
        if not inplace:
            return BlockSparseTensor(data,
                                     charges=charges,
                                     flows=flows,
                                     order=order,
                                     check_consistency=False)
        self.data = data
        self._order = order
        self._charges = charges
        self._flows = flows
        return self
def tensordot(
    tensor1: BlockSparseTensor,
    tensor2: BlockSparseTensor,
    axes: Optional[Union[Sequence[Sequence[int]],
                         int]] = 2) -> BlockSparseTensor:
    """
  Contract two `BlockSparseTensor`s along `axes`.
  Args:
    tensor1: First tensor.
    tensor2: Second tensor.
    axes: The axes to contract.
  Returns:
      BlockSparseTensor: The result of the tensor contraction.
  """
    #process scalar input for `axes`
    if isinstance(axes, (np.integer, int)):
        axes = [
            np.arange(tensor1.ndim - axes, tensor1.ndim, dtype=np.int16),
            np.arange(0, axes, dtype=np.int16)
        ]
    elif isinstance(axes[0], (np.integer, int)):
        if len(axes) > 1:
            raise ValueError(
                "invalid input `axes = {}` to tensordot".format(axes))
        axes = [np.array(axes, dtype=np.int16), np.array(axes, dtype=np.int16)]
    axes1 = axes[0]
    axes2 = axes[1]

    if len(axes1) != len(axes2):
        raise ValueError(
            "`axes1 = {}` and `axes2 = {}` have to be of same length. ".format(
                axes1, axes2))

    if len(axes1) > len(tensor1.shape):
        raise ValueError(
            "`axes1 = {}` is incompatible with `tensor1.shape = {}. ".format(
                axes1, tensor1.shape))

    if len(axes2) > len(tensor2.shape):
        raise ValueError(
            "`axes2 = {}` is incompatible with `tensor2.shape = {}. ".format(
                axes2, tensor2.shape))

    if not np.all(np.unique(axes1) == np.sort(axes1)):
        raise ValueError(
            "Some values in axes[0] = {} appear more than once!".format(axes1))
    if not np.all(np.unique(axes2) == np.sort(axes2)):
        raise ValueError(
            "Some values in axes[1] = {} appear more than once!".format(axes2))

    #special case outer product
    if len(axes1) == 0:
        return outerproduct(tensor1, tensor2)

    #more checks
    if max(axes1) >= len(tensor1.shape):
        raise ValueError(
            "rank of `tensor1` is smaller than `max(axes1) = {}.`".format(
                max(axes1)))

    if max(axes2) >= len(tensor2.shape):
        raise ValueError(
            "rank of `tensor2` is smaller than `max(axes2) = {}`".format(
                max(axes1)))

    contr_flows_1 = []
    contr_flows_2 = []
    contr_charges_1 = []
    contr_charges_2 = []
    for a in axes1:
        contr_flows_1.extend(tensor1._flows[tensor1._order[a]])
        contr_charges_1.extend(
            [tensor1._charges[n] for n in tensor1._order[a]])
    for a in axes2:
        contr_flows_2.extend(tensor2._flows[tensor2._order[a]])
        contr_charges_2.extend(
            [tensor2._charges[n] for n in tensor2._order[a]])

    if len(contr_charges_2) != len(contr_charges_1):
        raise ValueError(
            "`axes1 = {}` and `axes2 = {}` have incompatible elementary"
            " shapes {} and {}".format(axes1, axes2,
                                       [e.dim for e in contr_charges_1],
                                       [e.dim for e in contr_charges_2]))
    if not np.all(
            np.asarray(contr_flows_1) == np.logical_not(
                np.asarray(contr_flows_2))):

        raise ValueError(
            "`axes1 = {}` and `axes2 = {}` have incompatible elementary"
            " flows {} and {}".format(axes1, axes2, contr_flows_1,
                                      contr_flows_2))
    charge_check = [
        charge_equal(c1, c2)
        for c1, c2 in zip(contr_charges_1, contr_charges_2)
    ]
    if not np.all(charge_check):
        inds = np.nonzero(np.logical_not(charge_check))[0]
        raise ValueError(
            "`axes = {}` of tensor1 and `axes = {}` of tensor2 have incompatible charges"
            " {} and {}".format(
                np.array(axes1)[inds],
                np.array(axes2)[inds], [contr_charges_1[i] for i in inds],
                [contr_charges_2[i] for i in inds]))

    #checks finished

    #special case inner product
    if (len(axes1) == tensor1.ndim) and (len(axes2) == tensor2.ndim):
        t1 = tensor1.transpose(axes1).transpose_data()
        t2 = tensor2.transpose(axes2).transpose_data()
        data = np.dot(t1.data, t2.data)
        charge = tensor1._charges[0]
        final_charge = charge.__new__(type(charge))

        final_charge.__init__(np.empty((charge.num_symmetries, 0),
                                       dtype=np.int16),
                              charge_labels=np.empty(0, dtype=np.int16),
                              charge_types=charge.charge_types)
        return BlockSparseTensor(data=data,
                                 charges=[final_charge],
                                 flows=[False],
                                 order=[[0]],
                                 check_consistency=False)

    #in all other cases we perform a regular tensordot
    free_axes1 = sorted(set(np.arange(tensor1.ndim)) - set(axes1))
    free_axes2 = sorted(set(np.arange(tensor2.ndim)) - set(axes2))

    new_order1 = [tensor1._order[n]
                  for n in free_axes1] + [tensor1._order[n] for n in axes1]
    new_order2 = [tensor2._order[n]
                  for n in axes2] + [tensor2._order[n] for n in free_axes2]

    flat_order_1 = flatten(new_order1)
    flat_order_2 = flatten(new_order2)

    flat_charges_1, flat_flows_1 = tensor1._charges, tensor1.flat_flows
    flat_charges_2, flat_flows_2 = tensor2._charges, tensor2.flat_flows

    left_charges = []
    right_charges = []
    left_flows = []
    right_flows = []
    left_order = []
    right_order = []

    s = 0
    for n in free_axes1:
        left_charges.extend([tensor1._charges[o] for o in tensor1._order[n]])
        left_order.append(list(np.arange(s, s + len(tensor1._order[n]))))
        s += len(tensor1._order[n])
        left_flows.extend([tensor1._flows[o] for o in tensor1._order[n]])

    s = 0
    for n in free_axes2:
        right_charges.extend([tensor2._charges[o] for o in tensor2._order[n]])
        right_order.append(
            list(len(left_charges) + np.arange(s, s + len(tensor2._order[n]))))
        s += len(tensor2._order[n])
        right_flows.extend([tensor2._flows[o] for o in tensor2._order[n]])

    tr_sparse_blocks_1, charges1, shapes_1 = _find_transposed_diagonal_sparse_blocks(
        flat_charges_1, flat_flows_1, len(left_charges), flat_order_1)

    tr_sparse_blocks_2, charges2, shapes_2 = _find_transposed_diagonal_sparse_blocks(
        flat_charges_2, flat_flows_2, len(contr_charges_2), flat_order_2)

    common_charges, label_to_common_1, label_to_common_2 = intersect(
        charges1.unique_charges,
        charges2.unique_charges,
        axis=1,
        return_indices=True)

    #Note: `cs` may contain charges that are not present in `common_charges`
    charges = left_charges + right_charges
    flows = left_flows + right_flows

    sparse_blocks, cs, _ = _find_diagonal_sparse_blocks(
        charges, flows, len(left_charges))
    num_nonzero_elements = np.int64(np.sum([len(v) for v in sparse_blocks]))

    #Note that empty is not a viable choice here.
    data = np.zeros(num_nonzero_elements,
                    dtype=np.result_type(tensor1.dtype, tensor2.dtype))

    label_to_common_final = intersect(cs.unique_charges,
                                      common_charges,
                                      axis=1,
                                      return_indices=True)[1]

    for n in range(common_charges.shape[1]):
        n1 = label_to_common_1[n]
        n2 = label_to_common_2[n]
        nf = label_to_common_final[n]
        data[sparse_blocks[nf].ravel()] = np.ravel(
            np.matmul(
                tensor1.data[tr_sparse_blocks_1[n1].reshape(shapes_1[:, n1])],
                tensor2.data[tr_sparse_blocks_2[n2].reshape(shapes_2[:, n2])]))

    res = BlockSparseTensor(data=data,
                            charges=charges,
                            flows=flows,
                            order=left_order + right_order,
                            check_consistency=False)
    return res
Exemplo n.º 7
0
def svd_decomposition(
        bt,
        tensor: BlockSparseTensor,
        split_axis: int,
        max_singular_values: Optional[int] = None,
        max_truncation_error: Optional[float] = None,
        relative: Optional[bool] = False
) -> Tuple[Tensor, Tensor, Tensor, Tensor]:
    """
  Computes the singular value decomposition (SVD) of a tensor.
  See tensornetwork.backends.tensorflow.decompositions for details.
  """

    left_dims = tensor.shape[:split_axis]
    right_dims = tensor.shape[split_axis:]

    matrix = bt.reshape(tensor, [np.prod(left_dims), np.prod(right_dims)])

    flat_charges = matrix._charges
    flat_flows = matrix.flat_flows
    flat_order = matrix.flat_order
    tr_partition = len(matrix._order[0])
    blocks, charges, shapes = _find_transposed_diagonal_sparse_blocks(
        flat_charges, flat_flows, tr_partition, flat_order)

    u_blocks = []
    singvals = []
    v_blocks = []
    for n, b in enumerate(blocks):
        out = np.linalg.svd(np.reshape(matrix.data[b], shapes[:, n]),
                            full_matrices=False,
                            compute_uv=True)
        u_blocks.append(out[0])
        singvals.append(out[1])
        v_blocks.append(out[2])

    orig_num_singvals = np.int64(np.sum([len(s) for s in singvals]))
    discarded_singvals = np.zeros(0, dtype=get_real_dtype(tensor.dtype))
    if (max_singular_values
            is not None) and (max_singular_values >= orig_num_singvals):
        max_singular_values = None

    if (max_truncation_error is not None) or (max_singular_values is not None):
        max_D = np.max([len(s) for s in singvals]) if len(singvals) > 0 else 0

        #extend singvals of all blocks into a matrix by padding each block with 0
        if len(singvals) > 0:
            extended_singvals = np.stack([
                np.append(s, np.zeros(max_D - len(s), dtype=s.dtype))
                for s in singvals
            ],
                                         axis=1)
        else:
            extended_singvals = np.empty((0, 0),
                                         dtype=get_real_dtype(tensor.dtype))

        extended_flat_singvals = np.ravel(extended_singvals)
        #sort singular values
        inds = np.argsort(extended_flat_singvals, kind='stable')
        discarded_inds = np.zeros(0, dtype=SIZE_T)
        if inds.shape[0] > 0:
            maxind = inds[-1]
        else:
            maxind = 0
        if max_truncation_error is not None:
            if relative and (len(singvals) > 0):
                max_truncation_error = max_truncation_error * np.max(
                    [s[0] for s in singvals])

            kept_inds_mask = np.sqrt(
                np.cumsum(np.square(
                    extended_flat_singvals[inds]))) > max_truncation_error
            trunc_inds_mask = np.logical_not(kept_inds_mask)
            discarded_inds = inds[trunc_inds_mask]
            inds = inds[kept_inds_mask]
        if max_singular_values is not None:
            #if the original number of non-zero singular values
            #is smaller than `max_singular_values` we need to reset
            #`max_singular_values` (we were filling in 0.0 into singular
            #value blocks to facilitate trunction steps, thus we could end up
            #with more singular values than originally there).
            if max_singular_values > orig_num_singvals:
                max_singular_values = orig_num_singvals
            if max_singular_values < len(inds):
                discarded_inds = np.append(discarded_inds,
                                           inds[:(-1) * max_singular_values])
                inds = inds[(-1) * max_singular_values::]

        if len(inds) == 0:
            #special case of truncation to 0 dimension;
            warnings.warn("svd_decomposition truncated to 0 dimensions. "
                          "Adjusting to `max_singular_values = 1`")
            inds = np.asarray([maxind])

        if extended_singvals.shape[1] > 0:
            #pylint: disable=no-member
            keep = np.divmod(inds, extended_singvals.shape[1])
        else:
            keep = (np.zeros(1, dtype=SIZE_T), np.zeros(1, dtype=SIZE_T))
        newsingvals = [
            extended_singvals[keep[0][keep[1] == n],
                              keep[1][keep[1] == n]][::-1]
            for n in range(extended_singvals.shape[1])
        ]

        discarded_singvals = extended_flat_singvals[discarded_inds]
        singvals = newsingvals
    if len(singvals) > 0:
        left_singval_charge_labels = np.concatenate([
            np.full(singvals[n].shape[0], fill_value=n, dtype=np.int16)
            for n in range(len(singvals))
        ])
        all_singvals = np.concatenate(singvals)
        #define the new charges on the two central bonds
        left_charge_labels = np.concatenate([
            np.full(len(singvals[n]), fill_value=n, dtype=np.int16)
            for n in range(len(u_blocks))
        ])
        right_charge_labels = np.concatenate([
            np.full(len(singvals[n]), fill_value=n, dtype=np.int16)
            for n in range(len(v_blocks))
        ])
        all_ublocks = np.concatenate([
            np.ravel(np.transpose(u_blocks[n][:, 0:len(singvals[n])]))
            for n in range(len(u_blocks))
        ])
        all_vblocks = np.concatenate([
            np.ravel(v_blocks[n][0:len(singvals[n]), :])
            for n in range(len(v_blocks))
        ])
    else:
        left_singval_charge_labels = np.empty(0, dtype=np.int16)
        all_singvals = np.empty(0, dtype=get_real_dtype(tensor.dtype))
        left_charge_labels = np.empty(0, dtype=np.int16)
        right_charge_labels = np.empty(0, dtype=np.int16)
        all_ublocks = np.empty(0, dtype=get_real_dtype(tensor.dtype))
        all_vblocks = np.empty(0, dtype=get_real_dtype(tensor.dtype))
    left_singval_charge = charges[left_singval_charge_labels]
    S = ChargeArray(all_singvals, [left_singval_charge], [False])

    new_left_charge = charges[left_charge_labels]
    new_right_charge = charges[right_charge_labels]

    #get the indices of the new tensors U,S and V
    charges_u = [new_left_charge
                 ] + [matrix._charges[o] for o in matrix._order[0]]
    order_u = [[0]] + [list(np.arange(1, len(matrix._order[0]) + 1))]
    flows_u = [True] + [matrix._flows[o] for o in matrix._order[0]]
    charges_v = [new_right_charge
                 ] + [matrix._charges[o] for o in matrix._order[1]]
    flows_v = [False] + [matrix._flows[o] for o in matrix._order[1]]
    order_v = [[0]] + [list(np.arange(1, len(matrix._order[1]) + 1))]

    #We fill in data into the transposed U
    U = BlockSparseTensor(all_ublocks,
                          charges=charges_u,
                          flows=flows_u,
                          order=order_u,
                          check_consistency=False).transpose((1, 0))

    V = BlockSparseTensor(all_vblocks,
                          charges=charges_v,
                          flows=flows_v,
                          order=order_v,
                          check_consistency=False)
    left_shape = left_dims + (S.shape[0], )
    right_shape = (S.shape[0], ) + right_dims
    return U.reshape(left_shape), S, V.reshape(
        right_shape), discarded_singvals[discarded_singvals > 0.0]
Exemplo n.º 8
0
def test_find_transposed_diagonal_sparse_blocks(num_charges, order, D):
    order = list(order)
    num_legs = len(order)
    np.random.seed(10)
    np_charges = [
        np.random.randint(-5, 5, (num_charges, D), dtype=np.int16)
        for _ in range(num_legs)
    ]
    tr_charge_list = []
    charge_list = []
    for c in range(num_charges):

        tr_charge_list.append(
            fuse_ndarrays(
                [np_charges[order[n]][c, :] for n in range(num_legs)]))
        charge_list.append(
            fuse_ndarrays([np_charges[n][c, :] for n in range(num_legs)]))

    tr_fused = np.stack(tr_charge_list, axis=0)
    fused = np.stack(charge_list, axis=0)

    dims = [c.shape[1] for c in np_charges]
    strides = _get_strides(dims)
    transposed_linear_positions = fuse_stride_arrays(
        dims, [strides[o] for o in order])
    left_charges = np.stack([
        fuse_ndarrays(
            [np_charges[order[n]][c, :] for n in range(num_legs // 2)])
        for c in range(num_charges)
    ],
                            axis=0)
    right_charges = np.stack([
        fuse_ndarrays([
            np_charges[order[n]][c, :] for n in range(num_legs // 2, num_legs)
        ]) for c in range(num_charges)
    ],
                             axis=0)
    #pylint: disable=no-member
    mask = np.logical_and.reduce(fused.T == np.zeros((1, num_charges)), axis=1)
    nz = np.nonzero(mask)[0]
    dense_to_sparse = np.empty(len(mask), dtype=np.int64)
    dense_to_sparse[mask] = np.arange(len(nz))
    #pylint: disable=no-member
    tr_mask = np.logical_and.reduce(tr_fused.T == np.zeros((1, num_charges)),
                                    axis=1)
    tr_nz = np.nonzero(tr_mask)[0]
    tr_linear_locs = transposed_linear_positions[tr_nz]
    # pylint: disable=no-member
    left_inds, _ = np.divmod(tr_nz, right_charges.shape[1])
    left = left_charges[:, left_inds]
    unique_left = np.unique(left, axis=1)
    blocks = []
    for n in range(unique_left.shape[1]):
        ul = unique_left[:, n][None, :]
        #pylint: disable=no-member
        blocks.append(dense_to_sparse[tr_linear_locs[np.nonzero(
            np.logical_and.reduce(left.T == ul, axis=1))[0]]])

    charges = [
        BaseCharge(c, charge_types=[U1Charge] * num_charges)
        for c in np_charges
    ]
    flows = [False] * num_legs
    bs, cs, ss = _find_transposed_diagonal_sparse_blocks(
        charges, flows, tr_partition=num_legs // 2, order=order)
    np.testing.assert_allclose(cs.charges, unique_left)
    for b1, b2 in zip(blocks, bs):
        assert np.all(b1 == b2)

    assert np.sum(np.prod(ss, axis=0)) == np.sum([len(b) for b in bs])
    np.testing.assert_allclose(unique_left, cs.charges)
Exemplo n.º 9
0
def diag(tensor: ChargeArray) -> Any:
  """
  Return a diagonal `BlockSparseTensor` from a `ChargeArray`, or 
  return the diagonal of a `BlockSparseTensor` as a `ChargeArray`.
  For input of type `BlockSparseTensor`:
    The full diagonal is obtained from finding the diagonal blocks of the 
    `BlockSparseTensor`, taking the diagonal elements of those and packing
    the result into a ChargeArray. Note that the computed diagonal elements 
    are usually different from the  diagonal elements obtained from 
    converting the `BlockSparseTensor` to dense storage and taking the diagonal.
    Note that the flow of the resulting 1d `ChargeArray` object is `False`.
  Args:
    tensor: A `ChargeArray`.
  Returns:
    ChargeArray: A 1d `CharggeArray` containing the diagonal of `tensor`, 
      or a diagonal matrix of type `BlockSparseTensor` containing `tensor` 
      on its diagonal.

  """
  if tensor.ndim > 2:
    raise ValueError("`diag` currently only implemented for matrices, "
                     "found `ndim={}".format(tensor.ndim))
  if not isinstance(tensor, BlockSparseTensor):
    if tensor.ndim > 1:
      raise ValueError(
          "`diag` currently only implemented for `ChargeArray` with ndim=1, "
          "found `ndim={}`".format(tensor.ndim))
    flat_charges = tensor._charges + tensor._charges
    flat_flows = list(tensor._flows) + list(np.logical_not(tensor._flows))
    flat_order = list(tensor.flat_order) + list(
        np.asarray(tensor.flat_order) + len(tensor._charges))
    tr_partition = len(tensor._order[0])
    blocks, charges, shapes = _find_transposed_diagonal_sparse_blocks(
        flat_charges, flat_flows, tr_partition, flat_order)
    data = np.zeros(
        np.int64(np.sum(np.prod(shapes, axis=0))), dtype=tensor.dtype)
    lookup, unique, labels = compute_sparse_lookup(tensor._charges,
                                                   tensor._flows, charges)
    for n, block in enumerate(blocks):
      label = labels[np.nonzero(unique == charges[n])[0][0]]
      data[block] = np.ravel(
          np.diag(tensor.data[np.nonzero(lookup == label)[0]]))

    order = [
        tensor._order[0],
        list(np.asarray(tensor._order[0]) + len(tensor._charges))
    ]
    new_charges = [tensor._charges[0].copy(), tensor._charges[0].copy()]
    return BlockSparseTensor(
        data,
        charges=new_charges,
        flows=list(tensor._flows) + list(np.logical_not(tensor._flows)),
        order=order,
        check_consistency=False)

  flat_charges = tensor._charges
  flat_flows = tensor._flows
  flat_order = tensor.flat_order
  tr_partition = len(tensor._order[0])
  sparse_blocks, charges, block_shapes = _find_transposed_diagonal_sparse_blocks(
      flat_charges, flat_flows, tr_partition, flat_order)

  shapes = np.min(block_shapes, axis=0)
  if len(sparse_blocks) > 0:
    data = np.concatenate([
        np.diag(np.reshape(tensor.data[sparse_blocks[n]], block_shapes[:, n]))
        for n in range(len(sparse_blocks))
    ])
    charge_labels = np.concatenate([
        np.full(shapes[n], fill_value=n, dtype=np.int16)
        for n in range(len(sparse_blocks))
    ])

  else:
    data = np.empty(0, dtype=tensor.dtype)
    charge_labels = np.empty(0, dtype=np.int16)
  newcharges = [charges[charge_labels]]
  flows = [False]
  return ChargeArray(data, newcharges, flows)
Exemplo n.º 10
0
def qr(matrix: BlockSparseTensor, mode: Optional[Text] = 'reduced') -> Any:
  """
  Compute the qr decomposition of an `M` by `N` matrix `matrix`.
  The matrix is factorized into `q*r`, with 
  `q` an orthogonal matrix and `r` an upper triangular matrix.
  Args:
    matrix: A matrix (i.e. a rank-2 tensor) of type  `BlockSparseTensor`
    mode : Can take values {'reduced', 'complete', 'r', 'raw'}.
    If K = min(M, N), then

    * 'reduced'  : returns q, r with dimensions (M, K), (K, N) (default)
    * 'complete' : returns q, r with dimensions (M, M), (M, N)
    * 'r'        : returns r only with dimensions (K, N)

  Returns:
    (BlockSparseTensor,BlockSparseTensor): If mode = `reduced` or `complete`
    BlockSparseTensor: If mode = `r`.
  """
  if mode == 'raw':
    raise NotImplementedError('mode `raw` currenntly not supported')
  if matrix.ndim != 2:
    raise NotImplementedError("qr currently supports only rank-2 tensors.")

  flat_charges = matrix._charges
  flat_flows = matrix._flows
  flat_order = matrix.flat_order
  tr_partition = len(matrix._order[0])
  blocks, charges, shapes = _find_transposed_diagonal_sparse_blocks(
      flat_charges, flat_flows, tr_partition, flat_order)

  q_blocks = []
  r_blocks = []
  for n, block in enumerate(blocks):
    out = np.linalg.qr(np.reshape(matrix.data[block], shapes[:, n]), mode)
    if mode in ('reduced', 'complete'):
      q_blocks.append(out[0])
      r_blocks.append(out[1])
    elif mode == 'r':
      r_blocks.append(out)
    else:
      raise ValueError('unknown value {} for input `mode`'.format(mode))

  tmp_r_charge_labels = [
      np.full(r_blocks[n].shape[0], fill_value=n, dtype=np.int16)
      for n in range(len(r_blocks))
  ]
  if len(tmp_r_charge_labels) > 0:
    left_r_charge_labels = np.concatenate(tmp_r_charge_labels)
  else:
    left_r_charge_labels = np.empty(0, dtype=np.int16)

  left_r_charge = charges[left_r_charge_labels]
  charges_r = [left_r_charge] + [matrix._charges[o] for o in matrix._order[1]]
  flows_r = [False] + [matrix._flows[o] for o in matrix._order[1]]
  order_r = [[0]] + [list(np.arange(1, len(matrix._order[1]) + 1))]
  if len(r_blocks) > 0:
    all_r_blocks = np.concatenate([np.ravel(r) for r in r_blocks])
  else:
    all_r_blocks = np.empty(0, dtype=matrix.dtype)
  R = BlockSparseTensor(
      all_r_blocks,
      charges=charges_r,
      flows=flows_r,
      order=order_r,
      check_consistency=False)

  if mode in ('reduced', 'complete'):
    tmp_right_q_charge_labels = [
        np.full(q_blocks[n].shape[1], fill_value=n, dtype=np.int16)
        for n in range(len(q_blocks))
    ]
    if len(tmp_right_q_charge_labels) > 0:
      right_q_charge_labels = np.concatenate(tmp_right_q_charge_labels)
    else:
      right_q_charge_labels = np.empty(0, dtype=np.int16)

    right_q_charge = charges[right_q_charge_labels]
    charges_q = [
        right_q_charge,
    ] + [matrix._charges[o] for o in matrix._order[0]]
    order_q = [[0]] + [list(np.arange(1, len(matrix._order[0]) + 1))]
    flows_q = [True] + [matrix._flows[o] for o in matrix._order[0]]
    if len(q_blocks) > 0:
      all_q_blocks = np.concatenate([np.ravel(q.T) for q in q_blocks])
    else:
      all_q_blocks = np.empty(0, dtype=matrix.dtype)
    return BlockSparseTensor(
        all_q_blocks,
        charges=charges_q,
        flows=flows_q,
        order=order_q,
        check_consistency=False).transpose((1, 0)), R

  return R
Exemplo n.º 11
0
def svd(matrix: BlockSparseTensor,
        full_matrices: Optional[bool] = True,
        compute_uv: Optional[bool] = True,
        hermitian: Optional[bool] = False) -> Any:
  """
  Compute the singular value decomposition of `matrix`.
  The matrix if factorized into `u * s * vh`, with 
  `u` and `vh` the left and right singular vectors of `matrix`,
  and `s` its singular values.
  Args:
    matrix: A matrix (i.e. an order-2 tensor) of type  `BlockSparseTensor`
    full_matrices: If `True`, expand `u` and `v` to square matrices
      If `False` return the "economic" svd, i.e. `u.shape[1]=s.shape[0]`
      and `v.shape[0]=s.shape[1]`
    compute_uv: If `True`, return `u` and `v`.
    hermitian: If `True`, assume hermiticity of `matrix`.
  Returns:
    If `compute_uv` is `True`: Three BlockSparseTensors `U,S,V`.
    If `compute_uv` is `False`: A BlockSparseTensors `S` containing the 
      singular values.
  """

  if matrix.ndim != 2:
    raise NotImplementedError("svd currently supports only tensors of order 2.")

  flat_charges = matrix._charges
  flat_flows = matrix._flows
  flat_order = matrix.flat_order
  tr_partition = len(matrix._order[0])
  blocks, charges, shapes = _find_transposed_diagonal_sparse_blocks(
      flat_charges, flat_flows, tr_partition, flat_order)

  u_blocks = []
  singvals = []
  v_blocks = []
  for n, block in enumerate(blocks):
    out = np.linalg.svd(
        np.reshape(matrix.data[block], shapes[:, n]), full_matrices, compute_uv,
        hermitian)
    if compute_uv:
      u_blocks.append(out[0])
      singvals.append(out[1])
      v_blocks.append(out[2])

    else:
      singvals.append(out)

  tmp_labels = [
      np.full(len(singvals[n]), fill_value=n, dtype=np.int16)
      for n in range(len(singvals))
  ]
  if len(tmp_labels) > 0:
    left_singval_charge_labels = np.concatenate(tmp_labels)
  else:

    left_singval_charge_labels = np.empty(0, dtype=np.int16)
  left_singval_charge = charges[left_singval_charge_labels]
  if len(singvals) > 0:
    all_singvals = np.concatenate(singvals)
  else:
    all_singvals = np.empty(0, dtype=get_real_dtype(matrix.dtype))
  S = ChargeArray(all_singvals, [left_singval_charge], [False])

  if compute_uv:
    #define the new charges on the two central bonds
    tmp_left_labels = [
        np.full(u_blocks[n].shape[1], fill_value=n, dtype=np.int16)
        for n in range(len(u_blocks))
    ]
    if len(tmp_left_labels) > 0:
      left_charge_labels = np.concatenate(tmp_left_labels)
    else:
      left_charge_labels = np.empty(0, dtype=np.int16)

    tmp_right_labels = [
        np.full(v_blocks[n].shape[0], fill_value=n, dtype=np.int16)
        for n in range(len(v_blocks))
    ]
    if len(tmp_right_labels) > 0:
      right_charge_labels = np.concatenate(tmp_right_labels)
    else:
      right_charge_labels = np.empty(0, dtype=np.int16)
    new_left_charge = charges[left_charge_labels]
    new_right_charge = charges[right_charge_labels]

    charges_u = [new_left_charge
                ] + [matrix._charges[o] for o in matrix._order[0]]
    order_u = [[0]] + [list(np.arange(1, len(matrix._order[0]) + 1))]
    flows_u = [True] + [matrix._flows[o] for o in matrix._order[0]]
    charges_v = [new_right_charge
                ] + [matrix._charges[o] for o in matrix._order[1]]
    flows_v = [False] + [matrix._flows[o] for o in matrix._order[1]]
    order_v = [[0]] + [list(np.arange(1, len(matrix._order[1]) + 1))]
    # We fill in data into the transposed U
    # note that transposing is essentially free
    if len(u_blocks) > 0:
      all_u_blocks = np.concatenate([np.ravel(u.T) for u in u_blocks])
      all_v_blocks = np.concatenate([np.ravel(v) for v in v_blocks])
    else:
      all_u_blocks = np.empty(0, dtype=matrix.dtype)
      all_v_blocks = np.empty(0, dtype=matrix.dtype)

    return BlockSparseTensor(
        all_u_blocks,
        charges=charges_u,
        flows=flows_u,
        order=order_u,
        check_consistency=False).transpose((1, 0)), S, BlockSparseTensor(
            all_v_blocks,
            charges=charges_v,
            flows=flows_v,
            order=order_v,
            check_consistency=False)

  return S