def test_sparse_multiply(self, algebra): layout = algebra[0] # Make two random multivectors a = layout.randomMV() b = layout.randomMV() # Choose the grades we care about. # We skip the cases of: # - all grades # - no grades # - any multiplications including the pseudoscalar grades_possibilities = list(_powerset(range(layout.dims)))[1:-1] for i, grades_a in enumerate(grades_possibilities): sparse_mv_a = sum([a(k) for k in grades_a], layout.MultiVector()) for j, grades_b in enumerate(grades_possibilities): sparse_mv_b = sum([b(k) for k in grades_b], layout.MultiVector()) # Compute results gp = layout.gmt_func_generator(grades_a=grades_a, grades_b=grades_b) result_sparse = gp(sparse_mv_a.value, sparse_mv_b.value) result_dense = (sparse_mv_a * sparse_mv_b).value # Check they are the same np.testing.assert_almost_equal(result_sparse, result_dense) print(j + i * len(grades_possibilities), len(grades_possibilities)**2)
def __init__(self, n_vectors: int): # deliberately skip the base class init, we can do a little better self._n = n_vectors # could attempt to optimize this by avoiding going via python integers # and copying the raw logic from # python/cpython.git:Modules/itertoolsmodule.c@combinations_next. from clifford import _powerset self.index_to_bitmap = np.empty(2**n_vectors, dtype=int) self.grades = np.empty(2**n_vectors, dtype=int) self.bitmap_to_index = np.empty(2**n_vectors, dtype=int) for i, t in enumerate(_powerset([1 << i for i in range(n_vectors)])): bitmap = functools.reduce(operator.or_, t, 0) self.index_to_bitmap[i] = bitmap self.grades[i] = len(t) self.bitmap_to_index[bitmap] = i del t # enables an optimization inside itertools.combinations