def test_fuse_charges_raises(): num_charges = 5 B = 6 D = 10 np_charges = [ np.random.randint(-B // 2, B // 2 + 1, D).astype(np.int16) for _ in range(num_charges) ] charges = [U1Charge(c) for c in np_charges] flows = [True, False, True, False] with pytest.raises(ValueError): fuse_charges(charges, flows)
def compute_sparse_lookup( charges: List[BaseCharge], flows: Union[np.ndarray, List[bool]], target_charges: BaseCharge) -> Tuple[np.ndarray, np.ndarray, np.ndarray]: """ Compute lookup table for how dense index positions map to sparse index positions, treating only those elements as non-zero whose charges fuse to `target_charges`. Args: charges: List of `BaseCharge` objects. flows: A list of `bool`; the flow directions. target_charges: A `BaseCharge`; the target charges for which the fusion of `charges` is non-zero. Returns: lookup: An np.ndarray of positive numbers between `0` and `len(unique_charges)`. The position of values `n` in `lookup` are positions with charge values `unique_charges[n]`. unique_charges: The unique charges of fusion of `charges` label_to_unique: The integer labels of the unique charges. """ fused_charges = fuse_charges(charges, flows) unique_charges, inverse = fused_charges.unique( return_inverse=True, sort=False) _, label_to_unique, _ = unique_charges.intersect( target_charges, return_indices=True) tmp = np.full( len(unique_charges), fill_value=-1, dtype=charges[0].label_dtype) tmp[label_to_unique] = label_to_unique lookup = tmp[inverse] lookup = lookup[lookup >= 0] return lookup, unique_charges, np.sort(label_to_unique)
def test_BlockSparseTensor_init(): np.random.seed(10) D = 10 rank = 4 flows = np.random.choice([True, False], size=rank, replace=True) charges = [ U1Charge.random(dimension=D, minval=-5, maxval=5) for _ in range(rank) ] fused = fuse_charges(charges, flows) data = np.random.uniform(0, 1, size=len( np.nonzero(fused == np.zeros((1, 1)))[0])) order = [[n] for n in range(rank)] arr = BlockSparseTensor(data, charges, flows, order=order) np.testing.assert_allclose(data, arr.data) for c1, c2 in zip(charges, arr.charges): assert charge_equal(c1, c2[0]) for c1, c2 in zip(charges, arr._charges): assert charge_equal(c1, c2) data = np.random.uniform( 0, 1, size=len(np.nonzero(fused == np.zeros((1, 1)))[0]) + 1) with pytest.raises(ValueError): arr = BlockSparseTensor(data, charges, flows, order=order, check_consistency=True)
def test_diagonal(Ds, dtype, num_charges, flow): np.random.seed(10) backend = symmetric_backend.SymmetricBackend() np_flow = -np.int((np.int(flow) - 0.5) * 2) indices = [ Index( BaseCharge(np.random.randint(-2, 3, (Ds[n], num_charges)), charge_types=[U1Charge] * num_charges), flow) for n in range(2) ] arr = BlockSparseTensor.random(indices, dtype=dtype) fused = fuse_charges(arr.flat_charges, arr.flat_flows) inds = np.nonzero(fused == np.zeros((1, num_charges), dtype=np.int16))[0] # pylint: disable=no-member left, _ = np.divmod(inds, Ds[1]) unique = np.unique(np_flow * (indices[0]._charges[0].charges[left, :]), axis=0) diagonal = backend.diagonal(arr) sparse_blocks, _, block_shapes = _find_diagonal_sparse_blocks( arr.flat_charges, arr.flat_flows, 1) data = np.concatenate([ np.diag(np.reshape(arr.data[sparse_blocks[n]], block_shapes[:, n])) for n in range(len(sparse_blocks)) ]) np.testing.assert_allclose(data, diagonal.data) np.testing.assert_allclose(unique, diagonal.flat_charges[0].unique_charges) with pytest.raises(NotImplementedError): diagonal = backend.diagonal(arr, axis1=0) with pytest.raises(NotImplementedError): diagonal = backend.diagonal(arr, axis2=1) with pytest.raises(NotImplementedError): diagonal = backend.diagonal(arr, offset=1)
def test_get_diag(dtype, num_charges, Ds, flow): np.random.seed(10) np_flow = -np.int((np.int(flow) - 0.5) * 2) indices = [ Index( BaseCharge( np.random.randint(-2, 3, (num_charges, Ds[n])), charge_types=[U1Charge] * num_charges), flow) for n in range(2) ] arr = BlockSparseTensor.random(indices, dtype=dtype) fused = fuse_charges(arr.flat_charges, arr.flat_flows) inds = np.nonzero(fused == np.zeros((num_charges, 1), dtype=np.int16))[0] # pylint: disable=no-member left, _ = np.divmod(inds, Ds[1]) unique = np.unique( np_flow * (indices[0]._charges[0].charges[:, left]), axis=1) diagonal = diag(arr) sparse_blocks, _, block_shapes = _find_diagonal_sparse_blocks( arr.flat_charges, arr.flat_flows, 1) data = np.concatenate([ np.diag(np.reshape(arr.data[sparse_blocks[n]], block_shapes[:, n])) for n in range(len(sparse_blocks)) ]) np.testing.assert_allclose(data, diagonal.data) np.testing.assert_allclose(unique, diagonal.flat_charges[0].unique_charges)
def test_index_charges(): D = 10 B = 4 dtype = np.int16 np.random.seed(10) q1 = U1Charge(np.random.randint(-B // 2, B // 2 + 1, D).astype(dtype)) q2 = U1Charge(np.random.randint(-B // 2, B // 2 + 1, D).astype(dtype)) i = Index(charges=[q1, q2], flow=[False, True]) fused = fuse_charges([q1, q2], [False, True]) np.testing.assert_allclose(i.charges.charges, fused.charges)
def compute_fused_charge_degeneracies( charges: List[BaseCharge], flows: Union[np.ndarray, List[bool]]) -> Tuple[BaseCharge, np.ndarray]: """ For a list of charges, computes all possible fused charges resulting from fusing `charges` and their respective degeneracies Args: charges: List of `BaseCharge`, one for each leg of a tensor. flows: A list of bool, one for each leg of a tensor. with values `False` or `True` denoting inflowing and outflowing charge direction, respectively. Returns: BaseCharge: The unique fused charges. np.ndarray: The degeneracies of each unqiue fused charge. """ if len(charges) == 1: return (charges[0] * flows[0]).unique(return_counts=True) dims = [c.dim for c in charges] # for small dims is faster to fuse all and use unique # directly if reduce(mul, dims, 1) < 20000: fused = fuse_charges(charges, flows) return fused.unique(return_counts=True) partition = _find_best_partition(dims) fused_left = fuse_charges(charges[:partition], flows[:partition]) fused_right = fuse_charges(charges[partition:], flows[partition:]) left_unique, left_degens = fused_left.unique(return_counts=True) right_unique, right_degens = fused_right.unique(return_counts=True) fused = left_unique + right_unique unique_charges, charge_labels = fused.unique(return_inverse=True) fused_degeneracies = fuse_degeneracies(left_degens, right_degens) new_ord = np.argsort(charge_labels) all_degens = np.cumsum(fused_degeneracies[new_ord]) cum_degens = all_degens[np.flatnonzero(np.diff(charge_labels[new_ord]))] final_degeneracies = np.append(cum_degens, all_degens[-1]) - np.append( 0, cum_degens) return unique_charges, final_degeneracies
def todense(self) -> np.ndarray: """ Map the sparse tensor to dense storage. """ if len(self.shape) == 0: return self.data out = np.asarray(np.zeros(self.shape, dtype=self.dtype).flat) out[np.nonzero( fuse_charges(self._charges, self._flows) == self._charges[0].identity_charges)[0]] = self.data result = np.reshape(out, [c.dim for c in self._charges]) flat_order = flatten(self._order) return result.transpose(flat_order).reshape(self.shape)
def test_fuse_charges(): num_charges = 5 B = 6 D = 10 np_charges = [ np.random.randint(-B // 2, B // 2 + 1, D).astype(np.int16) for _ in range(num_charges) ] charges = [U1Charge(c) for c in np_charges] flows = [True, False, True, False, True] np_flows = np.ones(5, dtype=np.int16) np_flows[flows] = -1 fused = fuse_charges(charges, flows) np_fused = fuse_ndarrays([c * f for c, f in zip(np_charges, np_flows)]) np.testing.assert_allclose(np.squeeze(fused.charges), np_fused)
def test_todense(num_charges, chargetype): np.random.seed(10) Ds = [8, 9, 10, 11] rank = 4 flows = np.random.choice([True, False], size=rank, replace=True) charges = [get_charge(chargetype, num_charges, Ds[n]) for n in range(rank)] fused = fuse_charges(charges, flows) mask = fused == np.zeros((num_charges, 1)) inds = np.nonzero(mask)[0] inds2 = np.nonzero(np.logical_not(mask))[0] indices = [Index(charges[n], flows[n]) for n in range(rank)] arr = BlockSparseTensor.randn(indices) dense = np.array(arr.todense().flat) np.testing.assert_allclose(dense[inds], arr.data) np.testing.assert_allclose(dense[inds2], 0)
def test_fromdense(num_charges, chargetype): np.random.seed(10) Ds = [8, 9, 10, 11] rank = 4 flows = np.random.choice([True, False], size=rank, replace=True) charges = [get_charge(chargetype, num_charges, Ds[n]) for n in range(rank)] fused = fuse_charges(charges, flows) mask = fused == np.zeros((1, num_charges)) inds = np.nonzero(mask)[0] inds2 = np.nonzero(np.logical_not(mask))[0] indices = [Index(charges[n], flows[n]) for n in range(rank)] dense = np.random.random_sample(Ds) arr = BlockSparseTensor.fromdense(indices, dense) dense_arr = arr.todense() np.testing.assert_allclose(np.ravel(dense)[inds], arr.data) np.testing.assert_allclose(np.ravel(dense_arr)[inds2], 0)
def _find_transposed_diagonal_sparse_blocks( charges: List[BaseCharge], flows: Union[np.ndarray, List[bool]], tr_partition: int, order: Optional[Union[List, np.ndarray]] = None, ) -> Tuple[List, BaseCharge, np.ndarray]: """ Find the diagonal blocks of a transposed tensor with meta-data `charges` and `flows`. `charges` and `flows` are the charges and flows of the untransposed tensor, `order` is the final transposition, and `tr_partition` is the partition of the transposed tensor according to which the diagonal blocks should be found. Args: charges: List of `BaseCharge`, one for each leg of a tensor. flows: A list of bool, one for each leg of a tensor. with values `False` or `True` denoting inflowing and outflowing charge direction, respectively. tr_partition: Location of the transposed tensor partition (i.e. such that the tensor is viewed as a matrix between `charges[order[:partition]]` and `charges[order[partition:]]`). order: Order with which to permute the tensor axes. Returns: block_maps (List[np.ndarray]): list of integer arrays, which each containing the location of a symmetry block in the data vector. block_qnums (BaseCharge): The charges of the corresponding blocks. block_dims (np.ndarray): 2-by-m array of matrix dimensions of each block. """ flows = np.asarray(flows) cacher = get_cacher() if cacher.do_caching: hash_val = _to_string(charges, flows, tr_partition, order) if hash_val in cacher.cache: return cacher.cache[hash_val] if np.array_equal(order, None) or (np.array_equal( np.array(order), np.arange(len(charges)))): # no transpose order return _find_diagonal_sparse_blocks(charges, flows, tr_partition) # general case: non-trivial transposition is required num_inds = len(charges) tensor_dims = np.array([charges[n].dim for n in range(num_inds)], dtype=int) strides = np.append(np.flip(np.cumprod(np.flip(tensor_dims[1:]))), 1) # compute qnums of row/cols in original tensor orig_partition = _find_best_partition(tensor_dims) orig_width = np.prod(tensor_dims[orig_partition:]) orig_unique_row_qnums = compute_unique_fused_charges(charges[:orig_partition], flows[:orig_partition]) orig_unique_col_qnums, orig_col_degen = compute_fused_charge_degeneracies( charges[orig_partition:], np.logical_not(flows[orig_partition:])) orig_block_qnums, row_map, col_map = intersect( orig_unique_row_qnums.unique_charges, orig_unique_col_qnums.unique_charges, axis=0, return_indices=True) orig_num_blocks = orig_block_qnums.shape[0] if orig_num_blocks == 0: # special case: trivial number of non-zero elements obj = charges[0].__new__(type(charges[0])) obj.__init__( np.empty((0, charges[0].num_symmetries), dtype=charges[0].dtype), np.arange(0, dtype=charges[0].label_dtype), charges[0].charge_types) return [], obj, np.empty((2, 0), dtype=SIZE_T) orig_row_ind = fuse_charges(charges[:orig_partition], flows[:orig_partition]) orig_col_ind = fuse_charges(charges[orig_partition:], np.logical_not(flows[orig_partition:])) inv_row_map = -np.ones( orig_unique_row_qnums.unique_charges.shape[0], dtype=charges[0].label_dtype) inv_row_map[row_map] = np.arange(len(row_map), dtype=charges[0].label_dtype) all_degens = np.append(orig_col_degen[col_map], 0)[inv_row_map[orig_row_ind.charge_labels]] all_cumul_degens = np.cumsum(np.insert(all_degens[:-1], 0, 0)).astype(SIZE_T) dense_to_sparse = np.empty(orig_width, dtype=SIZE_T) for n in range(orig_num_blocks): dense_to_sparse[orig_col_ind.charge_labels == col_map[n]] = np.arange( orig_col_degen[col_map[n]], dtype=SIZE_T) # define properties of new tensor resulting from transposition new_strides = strides[order] new_row_charges = [charges[n] for n in order[:tr_partition]] new_col_charges = [charges[n] for n in order[tr_partition:]] new_row_flows = flows[order[:tr_partition]] new_col_flows = flows[order[tr_partition:]] if tr_partition == 0: # special case: reshape into row vector # compute qnums of row/cols in transposed tensor unique_col_qnums, new_col_degen = compute_fused_charge_degeneracies( new_col_charges, np.logical_not(new_col_flows)) identity_charges = charges[0].identity_charges(dim=1) block_qnums, new_row_map, new_col_map = intersect( identity_charges.unique_charges, unique_col_qnums.unique_charges, axis=0, return_indices=True) block_dims = np.array([[1], new_col_degen[new_col_map]], dtype=SIZE_T) num_blocks = 1 col_ind, col_locs = reduce_charges( new_col_charges, np.logical_not(new_col_flows), block_qnums, return_locations=True, strides=new_strides[tr_partition:]) # find location of blocks in transposed tensor (w.r.t positions in original) #pylint: disable=no-member orig_row_posR, orig_col_posR = np.divmod( col_locs[col_ind.charge_labels == 0], orig_width) block_maps = [(all_cumul_degens[orig_row_posR] + dense_to_sparse[orig_col_posR]).ravel()] obj = charges[0].__new__(type(charges[0])) obj.__init__(block_qnums, np.arange(block_qnums.shape[0], dtype=charges[0].label_dtype), charges[0].charge_types) elif tr_partition == len(charges): # special case: reshape into col vector # compute qnums of row/cols in transposed tensor unique_row_qnums, new_row_degen = compute_fused_charge_degeneracies( new_row_charges, new_row_flows) identity_charges = charges[0].identity_charges(dim=1) block_qnums, new_row_map, new_col_map = intersect( unique_row_qnums.unique_charges, identity_charges.unique_charges, axis=0, return_indices=True) block_dims = np.array([new_row_degen[new_row_map], [1]], dtype=SIZE_T) num_blocks = 1 row_ind, row_locs = reduce_charges( new_row_charges, new_row_flows, block_qnums, return_locations=True, strides=new_strides[:tr_partition]) # find location of blocks in transposed tensor (w.r.t positions in original) #pylint: disable=no-member orig_row_posL, orig_col_posL = np.divmod( row_locs[row_ind.charge_labels == 0], orig_width) block_maps = [(all_cumul_degens[orig_row_posL] + dense_to_sparse[orig_col_posL]).ravel()] obj = charges[0].__new__(type(charges[0])) obj.__init__(block_qnums, np.arange(block_qnums.shape[0], dtype=charges[0].label_dtype), charges[0].charge_types) else: unique_row_qnums, new_row_degen = compute_fused_charge_degeneracies( new_row_charges, new_row_flows) unique_col_qnums, new_col_degen = compute_fused_charge_degeneracies( new_col_charges, np.logical_not(new_col_flows)) block_qnums, new_row_map, new_col_map = intersect( unique_row_qnums.unique_charges, unique_col_qnums.unique_charges, axis=0, return_indices=True) block_dims = np.array( [new_row_degen[new_row_map], new_col_degen[new_col_map]], dtype=SIZE_T) num_blocks = len(new_row_map) row_ind, row_locs = reduce_charges( new_row_charges, new_row_flows, block_qnums, return_locations=True, strides=new_strides[:tr_partition]) col_ind, col_locs = reduce_charges( new_col_charges, np.logical_not(new_col_flows), block_qnums, return_locations=True, strides=new_strides[tr_partition:]) block_maps = [0] * num_blocks for n in range(num_blocks): #pylint: disable=no-member orig_row_posL, orig_col_posL = np.divmod( row_locs[row_ind.charge_labels == n], orig_width) #pylint: disable=no-member orig_row_posR, orig_col_posR = np.divmod( col_locs[col_ind.charge_labels == n], orig_width) block_maps[n] = ( all_cumul_degens[np.add.outer(orig_row_posL, orig_row_posR)] + dense_to_sparse[np.add.outer(orig_col_posL, orig_col_posR)]).ravel() obj = charges[0].__new__(type(charges[0])) obj.__init__(block_qnums, np.arange(block_qnums.shape[0], dtype=charges[0].label_dtype), charges[0].charge_types) if cacher.do_caching: cacher.cache[hash_val] = (block_maps, obj, block_dims) return cacher.cache[hash_val] return block_maps, obj, block_dims
def reduce_charges(charges: List[BaseCharge], flows: Union[np.ndarray, List[bool]], target_charges: np.ndarray, return_locations: Optional[bool] = False, strides: Optional[np.ndarray] = None) -> Any: """ Add quantum numbers arising from combining two or more charges into a single index, keeping only the quantum numbers that appear in `target_charges`. Equilvalent to using "combine_charges" followed by "reduce", but is generally much more efficient. Args: charges: List of `BaseCharge`, one for each leg of a tensor. flows: A list of bool, one for each leg of a tensor. with values `False` or `True` denoting inflowing and outflowing charge direction, respectively. target_charges: n-by-D array of charges which should be kept, with `n` the number of symmetries. return_locations: If `True` return the location of the kept values of the fused charges strides: Index strides with which to compute the retured locations of the kept elements. Defaults to trivial strides (based on row major order). Returns: BaseCharge: the fused index after reduction. np.ndarray: Locations of the fused BaseCharge charges that were kept. """ tensor_dims = [len(c) for c in charges] if len(charges) == 1: # reduce single index if strides is None: strides = np.array([1], dtype=SIZE_T) return charges[0].dual(flows[0]).reduce( target_charges, return_locations=return_locations, strides=strides[0]) # find size-balanced partition of charges partition = _find_best_partition(tensor_dims) # compute quantum numbers for each partition left_ind = fuse_charges(charges[:partition], flows[:partition]) right_ind = fuse_charges(charges[partition:], flows[partition:]) # compute combined qnums comb_qnums = fuse_ndarray_charges(left_ind.unique_charges, right_ind.unique_charges, charges[0].charge_types) #special case of empty charges #pylint: disable=unsubscriptable-object if (comb_qnums.shape[0] == 0) or (len(left_ind.charge_labels) == 0) or (len( right_ind.charge_labels) == 0): obj = charges[0].__new__(type(charges[0])) obj.__init__( np.empty((0, charges[0].num_symmetries), dtype=charges[0].dtype), np.empty(0, dtype=charges[0].label_dtype), charges[0].charge_types) if return_locations: return obj, np.empty(0, dtype=SIZE_T) return obj unique_comb_qnums, comb_labels = np.unique( comb_qnums, return_inverse=True, axis=0) num_unique = unique_comb_qnums.shape[0] # intersect combined qnums and target_charges reduced_qnums, label_to_unique, _ = intersect( unique_comb_qnums, target_charges, axis=0, return_indices=True) map_to_kept = -np.ones(num_unique, dtype=charges[0].label_dtype) map_to_kept[label_to_unique] = np.arange(len(label_to_unique)) # new_comb_labels is a matrix of shape # (left_ind.num_unique, right_ind.num_unique) # each row new_comb_labels[n,:] contains integers values. # Positions where values > 0 # denote labels of right-charges that are kept. new_comb_labels = map_to_kept[comb_labels].reshape( [left_ind.num_unique, right_ind.num_unique]) reduced_rows = [0] * left_ind.num_unique for n in range(left_ind.num_unique): temp_label = new_comb_labels[n, right_ind.charge_labels] reduced_rows[n] = temp_label[temp_label >= 0] reduced_labels = np.concatenate( [reduced_rows[n] for n in left_ind.charge_labels]) obj = charges[0].__new__(type(charges[0])) obj.__init__(reduced_qnums, reduced_labels, charges[0].charge_types) if return_locations: row_locs = [0] * left_ind.num_unique if strides is not None: # computed locations based on non-trivial strides row_pos = fuse_stride_arrays(tensor_dims[:partition], strides[:partition]) col_pos = fuse_stride_arrays(tensor_dims[partition:], strides[partition:]) for n in range(left_ind.num_unique): temp_label = new_comb_labels[n, right_ind.charge_labels] temp_keep = temp_label >= 0 if strides is not None: row_locs[n] = col_pos[temp_keep] else: row_locs[n] = np.where(temp_keep)[0] if strides is not None: reduced_locs = np.concatenate([ row_pos[n] + row_locs[left_ind.charge_labels[n]] for n in range(left_ind.dim) ]) else: reduced_locs = np.concatenate([ n * right_ind.dim + row_locs[left_ind.charge_labels[n]] for n in range(left_ind.dim) ]) return obj, reduced_locs return obj
def charges(self): return fuse_charges(self.flat_charges, self.flat_flows)
def charges(self) -> BaseCharge: """ Return the fused charges of the index. Note that flows are merged into the charges. """ return fuse_charges(self.flat_charges, self.flat_flows)