def __mul__(self, l): if type(l) == expr: lhs = gpt.apply_expr_unary(self) rhs = gpt.apply_expr_unary(l) # Attempt to close before product to avoid exponential growth of terms. # This does not work for sub-expressions without lattice fields, so # lhs and rhs may still contain multiple terms. if len(lhs.val) > 1: lhs = expr(gpt.eval(lhs)) if len(rhs.val) > 1: rhs = expr(gpt.eval(rhs)) return expr([(a[0] * b[0], a[1] + b[1]) for a in lhs.val for b in rhs.val]) elif type(l) == gpt.tensor and self.is_single(gpt.tensor): ue, uf, to = self.get_single() if ue == 0 and uf & factor_unary.BIT_TRANS != 0: tag = l.otype.__name__ assert tag in to.otype.itab mt = to.otype.itab[tag] lhs = to.array if uf & gpt.factor_unary.BIT_CONJ != 0: lhs = lhs.conj() res = gpt.tensor(np.tensordot(lhs, l.array, axes=mt[1]), mt[0]()) if res.otype == gpt.ot_singlet: res = complex(res.array) return res assert 0 else: return self.__mul__(expr(l))
def __mul__(self, l): if type(l) == expr: lhs = gpt.apply_expr_unary(self) rhs = gpt.apply_expr_unary(l) # close before product to avoid exponential growth of terms if len(lhs.val) > 1: lhs = expr(gpt.eval(lhs)) if len(rhs.val) > 1: rhs = expr(gpt.eval(rhs)) assert (len(lhs.val) == 1 or len(rhs.val) == 1) return expr([(a[0] * b[0], a[1] + b[1]) for a in lhs.val for b in rhs.val]) elif type(l) == gpt.tensor and self.is_single(gpt.tensor): ue, uf, to = self.get_single() if ue == 0 and uf & factor_unary.BIT_TRANS != 0: tag = (to.otype, l.otype) assert (tag in gpt.otype.itab) mt = gpt.otype.itab[tag] lhs = to.array if uf & gpt.factor_unary.BIT_CONJ != 0: lhs = lhs.conj() res = gpt.tensor(np.tensordot(lhs, l.array, axes=mt[1]), mt[0]) if res.otype == gpt.ot_complex: res = complex(res.array) return res assert (0) else: return self.__mul__(expr(l))
def gpt_object(first, ot): if type(first) == gpt.grid: return gpt.lattice(first, ot) elif type(first) == list or type(first) == numpy.ndarray: return gpt.tensor(numpy.array(first, dtype=numpy.complex128), ot) else: assert (0)
def __rmul__(self, other): if type(other) == gpt.tensor: return gpt.tensor( cgpt.gamma_tensor_mul(other.array, other.otype.v_otype[0], self.gamma, 0), other.otype, ) else: return super().__rmul__(other)
def __setitem__(self, key, value): if type(key) == slice: if key == slice(None, None, None): key = () if type(key) == tuple: if len(self.v_obj) == 1: cgpt.lattice_set_val(self.v_obj[0], key, gpt.util.tensor_to_value(value)) elif type(value) == int and value == 0: for i in self.otype.v_idx: cgpt.lattice_set_val(self.v_obj[i], key, 0) else: for i in self.otype.v_idx: cgpt.lattice_set_val( self.v_obj[i], key, gpt.tensor( value.array[self.otype.v_n0[i]:self.otype.v_n1[i]], self.otype.v_otype[i]).array) elif type(key) == numpy.ndarray: cgpt.lattice_import(self.v_obj, key, value) else: assert (0)
def tensor(self): assert self.gamma in matrices return gpt.tensor(matrices[self.gamma], self.otype)
def gpt_object(first, ot): if type(first) == gpt.grid: return gpt.lattice(first, ot) return gpt.tensor(numpy.array(first, dtype=numpy.complex128), ot)
def value_to_tensor(val, otype): if otype == gpt.ot_singlet: return complex(val) return gpt.tensor(val, otype)
def __init__(self, like): self.value = g.tensor( np.zeros(like.otype.shape, dtype=np.complex128), like.otype )
def value_to_tensor(val, otype): if otype.data_otype() == gpt.ot_singlet: # this is not ideal, can we do a subclass of complex that preserves otype info? return complex(val) return gpt.tensor(val, otype)