Exemple #1
0
def trace(tensor: BlockSparseTensor,
          axes: Optional[Tuple[int, ...]] = None) -> BlockSparseTensor:
  """
  Compute the trace of a matrix or tensor.
  Args:
    tensor: A `BlockSparseTensor`.
    axes: The axes over which the trace should be computed.
      Defaults to the last two indices of the tensor.
  Returns:
    BlockSparseTensor: The result of taking the trace.
  """
  if tensor.ndim > 1:
    if axes is None:
      axes = (tensor.ndim - 2, tensor.ndim - 1)
    if len(axes) != 2:
      raise ValueError(f"`len(axes)` has to be 2, found `axes = {axes}`")
    if not np.array_equal(tensor.flows[axes[0]],
                          np.logical_not(tensor.flows[axes[1]])):
      raise ValueError(
          f"trace indices for axes {axes} have non-matching flows.")

    sparse_shape = tensor.sparse_shape
    if sparse_shape[axes[0]].copy().flip_flow() != sparse_shape[axes[1]]:
      raise ValueError(f"trace indices for axes {axes} are not matching")

    #flatten the shape of `tensor`
    out = tensor.reshape(
        flatten([[tensor._charges[n].dim for n in o] for o in tensor._order]))
    _, _, labels0 = np.intersect1d(
        tensor._order[axes[0]], flatten(out._order), return_indices=True)
    _, _, labels1 = np.intersect1d(
        tensor._order[axes[1]], flatten(out._order), return_indices=True)

    a0 = list(labels0[np.argsort(tensor._order[axes[0]])])
    a1 = list(labels1[np.argsort(tensor._order[axes[1]])])

    while len(a0) > 0:
      i = a0.pop(0)
      j = a1.pop(0)
      identity = eye(
          Index([out._charges[out._order[i][0]]],
                [not out._flows[out._order[i][0]]]))
      out = tensordot(out, identity, ([i, j], [0, 1]))  # pytype: disable=wrong-arg-types
      a0ar = np.asarray(a0)

      mask_min = a0ar > np.min([i, j])
      mask_max = a0ar > np.max([i, j])
      a0ar[np.logical_and(mask_min, mask_max)] -= 2
      a0ar[np.logical_xor(mask_min, mask_max)] -= 1

      a1ar = np.asarray(a1)
      mask_min = a1ar > np.min([i, j])
      mask_max = a1ar > np.max([i, j])
      a1ar[np.logical_and(mask_min, mask_max)] -= 2
      a1ar[np.logical_xor(mask_min, mask_max)] -= 1
      a0 = list(a0ar)
      a1 = list(a1ar)
    return out  # pytype: disable=bad-return-type
  raise ValueError("trace can only be taken for tensors with ndim > 1")
 def todense(self) -> np.ndarray:
     """
 Map the sparse tensor to dense storage.
 
 """
     if len(self.shape) == 0:
         return self.data
     out = np.asarray(np.zeros(self.shape, dtype=self.dtype).flat)
     out[np.nonzero(
         fuse_charges(self._charges, self._flows) ==
         self._charges[0].identity_charges)[0]] = self.data
     result = np.reshape(out, [c.dim for c in self._charges])
     flat_order = flatten(self._order)
     return result.transpose(flat_order).reshape(self.shape)
def tensordot(
    tensor1: BlockSparseTensor,
    tensor2: BlockSparseTensor,
    axes: Optional[Union[Sequence[Sequence[int]],
                         int]] = 2) -> BlockSparseTensor:
    """
  Contract two `BlockSparseTensor`s along `axes`.
  Args:
    tensor1: First tensor.
    tensor2: Second tensor.
    axes: The axes to contract.
  Returns:
      BlockSparseTensor: The result of the tensor contraction.
  """
    #process scalar input for `axes`
    if isinstance(axes, (np.integer, int)):
        axes = [
            np.arange(tensor1.ndim - axes, tensor1.ndim, dtype=np.int16),
            np.arange(0, axes, dtype=np.int16)
        ]
    elif isinstance(axes[0], (np.integer, int)):
        if len(axes) > 1:
            raise ValueError(
                "invalid input `axes = {}` to tensordot".format(axes))
        axes = [np.array(axes, dtype=np.int16), np.array(axes, dtype=np.int16)]
    axes1 = axes[0]
    axes2 = axes[1]

    if len(axes1) != len(axes2):
        raise ValueError(
            "`axes1 = {}` and `axes2 = {}` have to be of same length. ".format(
                axes1, axes2))

    if len(axes1) > len(tensor1.shape):
        raise ValueError(
            "`axes1 = {}` is incompatible with `tensor1.shape = {}. ".format(
                axes1, tensor1.shape))

    if len(axes2) > len(tensor2.shape):
        raise ValueError(
            "`axes2 = {}` is incompatible with `tensor2.shape = {}. ".format(
                axes2, tensor2.shape))

    if not np.all(np.unique(axes1) == np.sort(axes1)):
        raise ValueError(
            "Some values in axes[0] = {} appear more than once!".format(axes1))
    if not np.all(np.unique(axes2) == np.sort(axes2)):
        raise ValueError(
            "Some values in axes[1] = {} appear more than once!".format(axes2))

    #special case outer product
    if len(axes1) == 0:
        return outerproduct(tensor1, tensor2)

    #more checks
    if max(axes1) >= len(tensor1.shape):
        raise ValueError(
            "rank of `tensor1` is smaller than `max(axes1) = {}.`".format(
                max(axes1)))

    if max(axes2) >= len(tensor2.shape):
        raise ValueError(
            "rank of `tensor2` is smaller than `max(axes2) = {}`".format(
                max(axes1)))

    contr_flows_1 = []
    contr_flows_2 = []
    contr_charges_1 = []
    contr_charges_2 = []
    for a in axes1:
        contr_flows_1.extend(tensor1._flows[tensor1._order[a]])
        contr_charges_1.extend(
            [tensor1._charges[n] for n in tensor1._order[a]])
    for a in axes2:
        contr_flows_2.extend(tensor2._flows[tensor2._order[a]])
        contr_charges_2.extend(
            [tensor2._charges[n] for n in tensor2._order[a]])

    if len(contr_charges_2) != len(contr_charges_1):
        raise ValueError(
            "`axes1 = {}` and `axes2 = {}` have incompatible elementary"
            " shapes {} and {}".format(axes1, axes2,
                                       [e.dim for e in contr_charges_1],
                                       [e.dim for e in contr_charges_2]))
    if not np.all(
            np.asarray(contr_flows_1) == np.logical_not(
                np.asarray(contr_flows_2))):

        raise ValueError(
            "`axes1 = {}` and `axes2 = {}` have incompatible elementary"
            " flows {} and {}".format(axes1, axes2, contr_flows_1,
                                      contr_flows_2))
    charge_check = [
        charge_equal(c1, c2)
        for c1, c2 in zip(contr_charges_1, contr_charges_2)
    ]
    if not np.all(charge_check):
        inds = np.nonzero(np.logical_not(charge_check))[0]
        raise ValueError(
            "`axes = {}` of tensor1 and `axes = {}` of tensor2 have incompatible charges"
            " {} and {}".format(
                np.array(axes1)[inds],
                np.array(axes2)[inds], [contr_charges_1[i] for i in inds],
                [contr_charges_2[i] for i in inds]))

    #checks finished

    #special case inner product
    if (len(axes1) == tensor1.ndim) and (len(axes2) == tensor2.ndim):
        t1 = tensor1.transpose(axes1).transpose_data()
        t2 = tensor2.transpose(axes2).transpose_data()
        data = np.dot(t1.data, t2.data)
        charge = tensor1._charges[0]
        final_charge = charge.__new__(type(charge))

        final_charge.__init__(np.empty((charge.num_symmetries, 0),
                                       dtype=np.int16),
                              charge_labels=np.empty(0, dtype=np.int16),
                              charge_types=charge.charge_types)
        return BlockSparseTensor(data=data,
                                 charges=[final_charge],
                                 flows=[False],
                                 order=[[0]],
                                 check_consistency=False)

    #in all other cases we perform a regular tensordot
    free_axes1 = sorted(set(np.arange(tensor1.ndim)) - set(axes1))
    free_axes2 = sorted(set(np.arange(tensor2.ndim)) - set(axes2))

    new_order1 = [tensor1._order[n]
                  for n in free_axes1] + [tensor1._order[n] for n in axes1]
    new_order2 = [tensor2._order[n]
                  for n in axes2] + [tensor2._order[n] for n in free_axes2]

    flat_order_1 = flatten(new_order1)
    flat_order_2 = flatten(new_order2)

    flat_charges_1, flat_flows_1 = tensor1._charges, tensor1.flat_flows
    flat_charges_2, flat_flows_2 = tensor2._charges, tensor2.flat_flows

    left_charges = []
    right_charges = []
    left_flows = []
    right_flows = []
    left_order = []
    right_order = []

    s = 0
    for n in free_axes1:
        left_charges.extend([tensor1._charges[o] for o in tensor1._order[n]])
        left_order.append(list(np.arange(s, s + len(tensor1._order[n]))))
        s += len(tensor1._order[n])
        left_flows.extend([tensor1._flows[o] for o in tensor1._order[n]])

    s = 0
    for n in free_axes2:
        right_charges.extend([tensor2._charges[o] for o in tensor2._order[n]])
        right_order.append(
            list(len(left_charges) + np.arange(s, s + len(tensor2._order[n]))))
        s += len(tensor2._order[n])
        right_flows.extend([tensor2._flows[o] for o in tensor2._order[n]])

    tr_sparse_blocks_1, charges1, shapes_1 = _find_transposed_diagonal_sparse_blocks(
        flat_charges_1, flat_flows_1, len(left_charges), flat_order_1)

    tr_sparse_blocks_2, charges2, shapes_2 = _find_transposed_diagonal_sparse_blocks(
        flat_charges_2, flat_flows_2, len(contr_charges_2), flat_order_2)

    common_charges, label_to_common_1, label_to_common_2 = intersect(
        charges1.unique_charges,
        charges2.unique_charges,
        axis=1,
        return_indices=True)

    #Note: `cs` may contain charges that are not present in `common_charges`
    charges = left_charges + right_charges
    flows = left_flows + right_flows

    sparse_blocks, cs, _ = _find_diagonal_sparse_blocks(
        charges, flows, len(left_charges))
    num_nonzero_elements = np.int64(np.sum([len(v) for v in sparse_blocks]))

    #Note that empty is not a viable choice here.
    data = np.zeros(num_nonzero_elements,
                    dtype=np.result_type(tensor1.dtype, tensor2.dtype))

    label_to_common_final = intersect(cs.unique_charges,
                                      common_charges,
                                      axis=1,
                                      return_indices=True)[1]

    for n in range(common_charges.shape[1]):
        n1 = label_to_common_1[n]
        n2 = label_to_common_2[n]
        nf = label_to_common_final[n]
        data[sparse_blocks[nf].ravel()] = np.ravel(
            np.matmul(
                tensor1.data[tr_sparse_blocks_1[n1].reshape(shapes_1[:, n1])],
                tensor2.data[tr_sparse_blocks_2[n2].reshape(shapes_2[:, n2])]))

    res = BlockSparseTensor(data=data,
                            charges=charges,
                            flows=flows,
                            order=left_order + right_order,
                            check_consistency=False)
    return res
 def flat_order(self) -> List:
     """
 The flattened `ChargeArray._oder`.
 """
     return flatten(self._order)
def test_flatten():
    listoflist = [[1, 2], [3, 4], [5]]
    flat = flatten(listoflist)
    np.testing.assert_allclose(flat, [1, 2, 3, 4, 5])