Esempio n. 1
0
def test_split_node_qr_unitarity(dtype, num_charges):
    np.random.seed(10)
    a = tn.Node(get_square_matrix(50, num_charges, dtype=dtype),
                backend='symmetric')
    q, r = tn.split_node_qr(a, [a[0]], [a[1]])
    r[0] | q[1]
    qbar = tn.conj(q)
    q[1] ^ qbar[1]
    u1 = q @ qbar
    qbar[0] ^ q[0]
    u2 = qbar @ q
    blocks, _, shapes = _find_diagonal_sparse_blocks(u1.tensor.flat_charges,
                                                     u1.tensor.flat_flows,
                                                     len(u1.tensor._order[0]))
    for n, block in enumerate(blocks):
        np.testing.assert_almost_equal(
            np.reshape(u1.tensor.data[block], shapes[:, n]),
            np.eye(N=shapes[0, n], M=shapes[1, n]))

    blocks, _, shapes = _find_diagonal_sparse_blocks(u2.tensor.flat_charges,
                                                     u2.tensor.flat_flows,
                                                     len(u2.tensor._order[0]))
    for n, block in enumerate(blocks):
        np.testing.assert_almost_equal(
            np.reshape(u2.tensor.data[block], shapes[:, n]),
            np.eye(N=shapes[0, n], M=shapes[1, n]))
def test_pinv(dtype, num_charges):
    np.random.seed(10)
    R = 2
    D = 10
    charge = BaseCharge(np.random.randint(-5,
                                          6, (num_charges, D),
                                          dtype=np.int16),
                        charge_types=[U1Charge] * num_charges)
    flows = [True, False]
    A = BlockSparseTensor.random([Index(charge, flows[n]) for n in range(R)],
                                 (-0.5, 0.5),
                                 dtype=dtype)
    invA = pinv(A)
    left_eye = invA @ A

    blocks, _, shapes = _find_diagonal_sparse_blocks(left_eye.flat_charges,
                                                     left_eye.flat_flows, 1)
    for n, block in enumerate(blocks):
        t = np.reshape(left_eye.data[block], shapes[:, n])
        assert np.linalg.norm(t - np.eye(t.shape[0], t.shape[1])) < 1E-12

    right_eye = A @ invA
    blocks, _, shapes = _find_diagonal_sparse_blocks(right_eye.flat_charges,
                                                     right_eye.flat_flows, 1)
    for n, block in enumerate(blocks):
        t = np.reshape(right_eye.data[block], shapes[:, n])
        assert np.linalg.norm(t - np.eye(t.shape[0], t.shape[1])) < 1E-12
Esempio n. 3
0
def test_get_diag(dtype, num_charges, Ds, flow):
  np.random.seed(10)
  np_flow = -np.int((np.int(flow) - 0.5) * 2)
  indices = [
      Index(
          BaseCharge(
              np.random.randint(-2, 3, (num_charges, Ds[n])),
              charge_types=[U1Charge] * num_charges), flow) for n in range(2)
  ]
  arr = BlockSparseTensor.random(indices, dtype=dtype)
  fused = fuse_charges(arr.flat_charges, arr.flat_flows)
  inds = np.nonzero(fused == np.zeros((num_charges, 1), dtype=np.int16))[0]
  # pylint: disable=no-member
  left, _ = np.divmod(inds, Ds[1])
  unique = np.unique(
      np_flow * (indices[0]._charges[0].charges[:, left]), axis=1)
  diagonal = diag(arr)
  sparse_blocks, _, block_shapes = _find_diagonal_sparse_blocks(
      arr.flat_charges, arr.flat_flows, 1)
  data = np.concatenate([
      np.diag(np.reshape(arr.data[sparse_blocks[n]], block_shapes[:, n]))
      for n in range(len(sparse_blocks))
  ])
  np.testing.assert_allclose(data, diagonal.data)
  np.testing.assert_allclose(unique, diagonal.flat_charges[0].unique_charges)
Esempio n. 4
0
def outerproduct(tensor1: BlockSparseTensor,
                 tensor2: BlockSparseTensor) -> BlockSparseTensor:
  """
  Compute the outer product of two `BlockSparseTensor`.
  The first `tensor1.ndim` indices of the resulting tensor are the 
  indices of `tensor1`, the last `tensor2.ndim` indices are those
  of `tensor2`.
  Args:
    tensor1: A tensor.
    tensor2: A tensor.
  Returns:
    BlockSparseTensor: The result of taking the outer product.
  """

  final_charges = tensor1._charges + tensor2._charges
  final_flows = tensor1.flat_flows + tensor2.flat_flows
  order2 = [list(np.asarray(s) + len(tensor1._charges)) for s in tensor2._order]

  data = np.zeros(
      compute_num_nonzero(final_charges, final_flows), dtype=tensor1.dtype)
  if ((len(tensor1.data) > 0) and (len(tensor2.data) > 0)) and (len(data) > 0):
    # find the location of the zero block in the output
    final_block_maps, final_block_charges, _ = _find_diagonal_sparse_blocks(
        final_charges, final_flows, len(tensor1._charges))
    index = np.nonzero(
        final_block_charges == final_block_charges.identity_charges)[0][0]
    data[final_block_maps[index].ravel()] = np.outer(tensor1.data,
                                                     tensor2.data).ravel()

  return BlockSparseTensor(
      data,
      charges=final_charges,
      flows=final_flows,
      order=tensor1._order + order2,
      check_consistency=False)
Esempio n. 5
0
    def contiguous(self,
                   permutation: Optional[Union[Tuple, List,
                                               np.ndarray]] = None,
                   inplace: Optional[bool] = False) -> Any:
        """
    Transpose the tensor data in place such that the linear order 
    of the elements in `BlockSparseTensor.data` corresponds to the 
    current order of tensor indices. 
    Consider a tensor with current order given by `_order=[[1,2],[3],[0]]`,
    i.e. `data` was initialized according to order [0,1,2,3], and the tensor
    has since been reshaped and transposed. The linear oder of `data` does not
    match the desired order [1,2,3,0] of the tensor. `contiguous` fixes this
    by permuting `data` into this order, transposing `_charges` and `_flows`,
    and changing `_order` to `[[0,1],[2],[3]]`.
    Args:
      permutation: An optional alternative order to be used to transposed the 
        tensor. If `None` defaults to `BlockSparseTensor.permutation`.
    """
        flat_charges = self._charges
        flat_flows = self._flows
        if permutation is None:
            permutation = self.flat_order

        if np.array_equal(permutation, np.arange(len(permutation))):
            return self
        tr_partition = _find_best_partition(
            [flat_charges[n].dim for n in permutation])

        tr_sparse_blocks, tr_charges, _ = _find_transposed_diagonal_sparse_blocks(
            flat_charges, flat_flows, tr_partition, permutation)

        sparse_blocks, charges, _ = _find_diagonal_sparse_blocks(
            [flat_charges[n] for n in permutation],
            [flat_flows[n] for n in permutation], tr_partition)
        data = np.empty(len(self.data), dtype=self.dtype)
        for n, sparse_block in enumerate(sparse_blocks):
            ind = np.nonzero(tr_charges == charges[n])[0][0]
            perm = tr_sparse_blocks[ind]
            data[sparse_block] = self.data[perm]

        _, inds = np.unique(permutation, return_index=True)
        new_flat_order = inds[self.flat_order]
        tmp = np.append(0, np.cumsum([len(o) for o in self._order]))
        order = [
            list(new_flat_order[tmp[n]:tmp[n + 1]])
            for n in range(len(tmp) - 1)
        ]
        charges = [self._charges[o] for o in permutation]
        flows = [self._flows[o] for o in permutation]
        if not inplace:
            return BlockSparseTensor(data,
                                     charges=charges,
                                     flows=flows,
                                     order=order,
                                     check_consistency=False)
        self.data = data
        self._order = order
        self._charges = charges
        self._flows = flows
        return self
Esempio n. 6
0
def test_eye(dtype, num_charges, D):
  charge = BaseCharge(
      np.random.randint(-5, 6, (num_charges, D), dtype=np.int16),
      charge_types=[U1Charge] * num_charges)
  flow = False
  index = Index(charge, flow)
  A = eye(index, dtype=dtype)
  blocks, _, shapes = _find_diagonal_sparse_blocks(A.flat_charges, A.flat_flows,
                                                   1)
  for n, block in enumerate(blocks):
    t = np.reshape(A.data[block], shapes[:, n])
    np.testing.assert_almost_equal(t, np.eye(t.shape[0], t.shape[1]))
Esempio n. 7
0
def test_find_diagonal_sparse_blocks(num_legs, num_charges):
    np.random.seed(10)
    np_charges = [
        np.random.randint(-5, 5, (60, num_charges), dtype=np.int16)
        for _ in range(num_legs)
    ]
    fused = np.stack([
        fuse_ndarrays([np_charges[n][:, c] for n in range(num_legs)])
        for c in range(num_charges)
    ],
                     axis=1)

    left_charges = np.stack([
        fuse_ndarrays([np_charges[n][:, c] for n in range(num_legs // 2)])
        for c in range(num_charges)
    ],
                            axis=1)
    right_charges = np.stack([
        fuse_ndarrays(
            [np_charges[n][:, c] for n in range(num_legs // 2, num_legs)])
        for c in range(num_charges)
    ],
                             axis=1)
    #pylint: disable=no-member
    nz = np.nonzero(
        np.logical_and.reduce(fused == np.zeros((1, num_charges)), axis=1))[0]
    linear_locs = np.arange(len(nz))
    # pylint: disable=no-member
    left_inds, _ = np.divmod(nz, right_charges.shape[0])
    left = left_charges[left_inds, :]
    unique_left = np.unique(left, axis=0)
    blocks = []
    for n in range(unique_left.shape[0]):
        ul = unique_left[n, :][None, :]
        #pylint: disable=no-member
        blocks.append(linear_locs[np.nonzero(
            np.logical_and.reduce(left == ul, axis=1))[0]])

    charges = [
        BaseCharge(left_charges, charge_types=[U1Charge] * num_charges),
        BaseCharge(right_charges, charge_types=[U1Charge] * num_charges)
    ]
    print(left_charges)
    print(right_charges)
    bs, cs, ss = _find_diagonal_sparse_blocks(charges, [False, False], 1)
    np.testing.assert_allclose(cs.charges, unique_left)
    for b1, b2 in zip(blocks, bs):
        assert np.all(b1 == b2)

    assert np.sum(np.prod(ss, axis=0)) == np.sum([len(b) for b in bs])
    np.testing.assert_allclose(unique_left, cs.charges)
def test_create_diag(dtype, num_charges):
    np.random.seed(10)
    D = 200
    index = Index(
        BaseCharge(np.random.randint(-2, 3, (num_charges, D)),
                   charge_types=[U1Charge] * num_charges), False)

    arr = ChargeArray.random([index], dtype=dtype)
    diagarr = diag(arr)
    dense = np.ravel(diagarr.todense())
    np.testing.assert_allclose(np.sort(dense[dense != 0.0]),
                               np.sort(diagarr.data[diagarr.data != 0.0]))

    sparse_blocks, charges, block_shapes = _find_diagonal_sparse_blocks(
        diagarr.flat_charges, diagarr.flat_flows, 1)
    #in range(index._charges[0].unique_charges.shape[1]):
    for n, block in enumerate(sparse_blocks):
        shape = block_shapes[:, n]
        block_diag = np.diag(np.reshape(diagarr.data[block], shape))
        np.testing.assert_allclose(
            arr.data[np.squeeze(index._charges[0] == charges[n])], block_diag)
Esempio n. 9
0
def eye(column_index: Index,
        row_index: Optional[Index] = None,
        dtype: Optional[Type[np.number]] = None) -> BlockSparseTensor:
  """
  Return an identity matrix.
  Args:
    column_index: The column index of the matrix.
    row_index: The row index of the matrix.
    dtype: The dtype of the matrix.
  Returns:
    BlockSparseTensor
  """
  if row_index is None:
    row_index = column_index.copy().flip_flow()
  if dtype is None:
    dtype = np.float64

  blocks, _, shapes = _find_diagonal_sparse_blocks(
      column_index.flat_charges + row_index.flat_charges,
      column_index.flat_flows + row_index.flat_flows,
      len(column_index.flat_charges))
  data = np.empty(np.int64(np.sum(np.prod(shapes, axis=0))), dtype=dtype)
  for n, block in enumerate(blocks):
    data[block] = np.ravel(np.eye(shapes[0, n], shapes[1, n], dtype=dtype))
  order = [list(np.arange(0, len(column_index.flat_charges)))] + [
      list(
          np.arange(
              len(column_index.flat_charges),
              len(column_index.flat_charges) + len(row_index.flat_charges)))
  ]
  return BlockSparseTensor(
      data=data,
      charges=column_index.flat_charges + row_index.flat_charges,
      flows=column_index.flat_flows + row_index.flat_flows,
      order=order,
      check_consistency=False)
def tensordot(
    tensor1: BlockSparseTensor,
    tensor2: BlockSparseTensor,
    axes: Optional[Union[Sequence[Sequence[int]],
                         int]] = 2) -> BlockSparseTensor:
    """
  Contract two `BlockSparseTensor`s along `axes`.
  Args:
    tensor1: First tensor.
    tensor2: Second tensor.
    axes: The axes to contract.
  Returns:
      BlockSparseTensor: The result of the tensor contraction.
  """
    #process scalar input for `axes`
    if isinstance(axes, (np.integer, int)):
        axes = [
            np.arange(tensor1.ndim - axes, tensor1.ndim, dtype=np.int16),
            np.arange(0, axes, dtype=np.int16)
        ]
    elif isinstance(axes[0], (np.integer, int)):
        if len(axes) > 1:
            raise ValueError(
                "invalid input `axes = {}` to tensordot".format(axes))
        axes = [np.array(axes, dtype=np.int16), np.array(axes, dtype=np.int16)]
    axes1 = axes[0]
    axes2 = axes[1]

    if len(axes1) != len(axes2):
        raise ValueError(
            "`axes1 = {}` and `axes2 = {}` have to be of same length. ".format(
                axes1, axes2))

    if len(axes1) > len(tensor1.shape):
        raise ValueError(
            "`axes1 = {}` is incompatible with `tensor1.shape = {}. ".format(
                axes1, tensor1.shape))

    if len(axes2) > len(tensor2.shape):
        raise ValueError(
            "`axes2 = {}` is incompatible with `tensor2.shape = {}. ".format(
                axes2, tensor2.shape))

    if not np.all(np.unique(axes1) == np.sort(axes1)):
        raise ValueError(
            "Some values in axes[0] = {} appear more than once!".format(axes1))
    if not np.all(np.unique(axes2) == np.sort(axes2)):
        raise ValueError(
            "Some values in axes[1] = {} appear more than once!".format(axes2))

    #special case outer product
    if len(axes1) == 0:
        return outerproduct(tensor1, tensor2)

    #more checks
    if max(axes1) >= len(tensor1.shape):
        raise ValueError(
            "rank of `tensor1` is smaller than `max(axes1) = {}.`".format(
                max(axes1)))

    if max(axes2) >= len(tensor2.shape):
        raise ValueError(
            "rank of `tensor2` is smaller than `max(axes2) = {}`".format(
                max(axes1)))

    contr_flows_1 = []
    contr_flows_2 = []
    contr_charges_1 = []
    contr_charges_2 = []
    for a in axes1:
        contr_flows_1.extend(tensor1._flows[tensor1._order[a]])
        contr_charges_1.extend(
            [tensor1._charges[n] for n in tensor1._order[a]])
    for a in axes2:
        contr_flows_2.extend(tensor2._flows[tensor2._order[a]])
        contr_charges_2.extend(
            [tensor2._charges[n] for n in tensor2._order[a]])

    if len(contr_charges_2) != len(contr_charges_1):
        raise ValueError(
            "`axes1 = {}` and `axes2 = {}` have incompatible elementary"
            " shapes {} and {}".format(axes1, axes2,
                                       [e.dim for e in contr_charges_1],
                                       [e.dim for e in contr_charges_2]))
    if not np.all(
            np.asarray(contr_flows_1) == np.logical_not(
                np.asarray(contr_flows_2))):

        raise ValueError(
            "`axes1 = {}` and `axes2 = {}` have incompatible elementary"
            " flows {} and {}".format(axes1, axes2, contr_flows_1,
                                      contr_flows_2))
    charge_check = [
        charge_equal(c1, c2)
        for c1, c2 in zip(contr_charges_1, contr_charges_2)
    ]
    if not np.all(charge_check):
        inds = np.nonzero(np.logical_not(charge_check))[0]
        raise ValueError(
            "`axes = {}` of tensor1 and `axes = {}` of tensor2 have incompatible charges"
            " {} and {}".format(
                np.array(axes1)[inds],
                np.array(axes2)[inds], [contr_charges_1[i] for i in inds],
                [contr_charges_2[i] for i in inds]))

    #checks finished

    #special case inner product
    if (len(axes1) == tensor1.ndim) and (len(axes2) == tensor2.ndim):
        t1 = tensor1.transpose(axes1).transpose_data()
        t2 = tensor2.transpose(axes2).transpose_data()
        data = np.dot(t1.data, t2.data)
        charge = tensor1._charges[0]
        final_charge = charge.__new__(type(charge))

        final_charge.__init__(np.empty((charge.num_symmetries, 0),
                                       dtype=np.int16),
                              charge_labels=np.empty(0, dtype=np.int16),
                              charge_types=charge.charge_types)
        return BlockSparseTensor(data=data,
                                 charges=[final_charge],
                                 flows=[False],
                                 order=[[0]],
                                 check_consistency=False)

    #in all other cases we perform a regular tensordot
    free_axes1 = sorted(set(np.arange(tensor1.ndim)) - set(axes1))
    free_axes2 = sorted(set(np.arange(tensor2.ndim)) - set(axes2))

    new_order1 = [tensor1._order[n]
                  for n in free_axes1] + [tensor1._order[n] for n in axes1]
    new_order2 = [tensor2._order[n]
                  for n in axes2] + [tensor2._order[n] for n in free_axes2]

    flat_order_1 = flatten(new_order1)
    flat_order_2 = flatten(new_order2)

    flat_charges_1, flat_flows_1 = tensor1._charges, tensor1.flat_flows
    flat_charges_2, flat_flows_2 = tensor2._charges, tensor2.flat_flows

    left_charges = []
    right_charges = []
    left_flows = []
    right_flows = []
    left_order = []
    right_order = []

    s = 0
    for n in free_axes1:
        left_charges.extend([tensor1._charges[o] for o in tensor1._order[n]])
        left_order.append(list(np.arange(s, s + len(tensor1._order[n]))))
        s += len(tensor1._order[n])
        left_flows.extend([tensor1._flows[o] for o in tensor1._order[n]])

    s = 0
    for n in free_axes2:
        right_charges.extend([tensor2._charges[o] for o in tensor2._order[n]])
        right_order.append(
            list(len(left_charges) + np.arange(s, s + len(tensor2._order[n]))))
        s += len(tensor2._order[n])
        right_flows.extend([tensor2._flows[o] for o in tensor2._order[n]])

    tr_sparse_blocks_1, charges1, shapes_1 = _find_transposed_diagonal_sparse_blocks(
        flat_charges_1, flat_flows_1, len(left_charges), flat_order_1)

    tr_sparse_blocks_2, charges2, shapes_2 = _find_transposed_diagonal_sparse_blocks(
        flat_charges_2, flat_flows_2, len(contr_charges_2), flat_order_2)

    common_charges, label_to_common_1, label_to_common_2 = intersect(
        charges1.unique_charges,
        charges2.unique_charges,
        axis=1,
        return_indices=True)

    #Note: `cs` may contain charges that are not present in `common_charges`
    charges = left_charges + right_charges
    flows = left_flows + right_flows

    sparse_blocks, cs, _ = _find_diagonal_sparse_blocks(
        charges, flows, len(left_charges))
    num_nonzero_elements = np.int64(np.sum([len(v) for v in sparse_blocks]))

    #Note that empty is not a viable choice here.
    data = np.zeros(num_nonzero_elements,
                    dtype=np.result_type(tensor1.dtype, tensor2.dtype))

    label_to_common_final = intersect(cs.unique_charges,
                                      common_charges,
                                      axis=1,
                                      return_indices=True)[1]

    for n in range(common_charges.shape[1]):
        n1 = label_to_common_1[n]
        n2 = label_to_common_2[n]
        nf = label_to_common_final[n]
        data[sparse_blocks[nf].ravel()] = np.ravel(
            np.matmul(
                tensor1.data[tr_sparse_blocks_1[n1].reshape(shapes_1[:, n1])],
                tensor2.data[tr_sparse_blocks_2[n2].reshape(shapes_2[:, n2])]))

    res = BlockSparseTensor(data=data,
                            charges=charges,
                            flows=flows,
                            order=left_order + right_order,
                            check_consistency=False)
    return res