def expr_eval(first, second=None, ac=False): if not second is None: t_obj = first.v_obj e = gpt.expr(second) else: if type(first) == gpt.lattice: return first e = gpt.expr(first) lat = get_lattice(e) grid = lat.grid otype = lat.otype n = len(otype.v_idx) t_obj = None if gpt.default.is_verbose("eval"): gpt.message("GPT::verbose::eval: " + str(e)) if not t_obj is None: for i, t in enumerate(t_obj): assert (0 == cgpt.eval(t, e.val, e.unary, ac, i)) return first else: assert (ac == False) t_obj, s_ot, s_pr = [0] * n, [0] * n, [0] * n for i in otype.v_idx: t_obj[i], s_ot[i], s_pr[i] = cgpt.eval(t_obj[i], e.val, e.unary, False, i) if len(s_ot) == 1: otype = eval("gpt.otype." + s_ot[0]) else: otype = gpt.otype.from_v_otype(s_ot) return gpt.lattice(grid, otype, t_obj)
def transpose(l): if type(l) == gpt.expr: return gpt.expr([(a[0], [(x[0] ^ (gpt.factor_unary.BIT_TRANS), x[1]) for x in reversed(a[1])]) for a in l.val]) elif type(l) == gpt.tensor and l.transposable(): return l.transpose() else: return transpose(gpt.expr(l))
def conj(l): if type(l) == gpt.expr: return gpt.expr([(complex(a[0]).conjugate(), [ (x[0] ^ (gpt.factor_unary.BIT_CONJ), x[1]) for x in a[1] ]) for a in l.val]) elif type(l) == gpt.tensor: return l.conj() else: return conj(gpt.expr(l))
def adj(l): if type(l) == gpt.expr: return gpt.expr([(complex(a[0]).conjugate(), [ (x[0] ^ (gpt.factor_unary.BIT_TRANS | gpt.factor_unary.BIT_CONJ), x[1]) for x in reversed(a[1]) ]) for a in l.val]) elif type(l) == gpt.tensor and l.transposable(): return l.adj() else: return adj(gpt.expr(l))
def __mul__(self, other): if type(other) == matrix_operator: # mat = self * other # mat^dag = other^dag self^dag # (mat^dag)^-1 = (other^dag self^dag)^-1 = self^dag^-1 other^dag^-1 # TODO: # Depending on other.accept_guess flag and if self.inv_mat is set, we should # attempt to properly propagate dst as well. adj_other = other.adj() adj_self = self.adj() inv_other = other.inv() inv_self = self.inv() adj_inv_other = adj_other.inv() adj_inv_self = adj_self.inv() return matrix_operator( mat=lambda dst, src: self(dst, other(src)), adj_mat=lambda dst, src: adj_other(dst, adj_self(src)), inv_mat=lambda dst, src: inv_other(dst, inv_self(src)), adj_inv_mat=lambda dst, src: adj_inv_self( dst, adj_inv_other(src)), otype=(self.otype[0], other.otype[1]), grid=(self.grid[0], other.grid[1]), accept_guess=(self.accept_guess[0], other.accept_guess[1]), cb=(self.cb[0], other.cb[1]), accept_list=True, ) else: return gpt.expr(other).__rmul__(self)
def trace(l, t=None): if isinstance(l, gpt.expr): l = gpt.eval(l) if t is None: t = gpt.expr_unary.BIT_SPINTRACE | gpt.expr_unary.BIT_COLORTRACE if type(l) == gpt.tensor: return l.trace(t) return gpt.expr(l, t)
def exp_ixp(self, mom, origin): r = gpt.expr(None) if origin is None: for x, p in zip(self.coordinate_lattices(), mom): r = r + x * p * 1j else: for _x, p, _o, _l in zip(self.coordinate_lattices(), mom, origin, self.grid.fdimensions): lhalf, l, o = int(_l // 2), int(_l), int(_o) x = gpt( gpt.component.mod(l)(_x + self.one_mask() * (l + lhalf - o)) - lhalf * self.one_mask()) r = r + x * p * 1j return gpt.component.exp(r)
def projected_gradient(self, weights, layer_input, left): assert len(weights) == self.n_weights shifts = [self.shift(layer_input, j) for j in range(self.n_weights)] s = g.expr(weights[0]) for w, sh in zip(weights[1:], shifts[1:]): s += w * sh dactivation = self.activation.gradient(g(s)) left_dactivation = g.component.multiply(left, dactivation) dinput = g.adj(weights[1]) * left_dactivation for i in range(2, len(weights)): dinput += g(self.ishift(g(g.adj(weights[i]) * left_dactivation), i)) r = [left_dactivation] for i in range(1, self.n_weights): o = g.group.cartesian(weights[i]) o @= left_dactivation * g.adj(shifts[i]) r.append(o) r.append(g(dinput)) return r
def __rmul__(self, other): return gpt.expr(other).__mul__(self)
def __neg__(self): return gpt.expr(self) * (-1.0)
def __sub__(self, l): return gpt.expr(self) - gpt.expr(l)
def __add__(self, l): return gpt.expr(self) + gpt.expr(l)
def __truediv__(self, l): assert (gpt.util.isnum(l)) return gpt.expr(self) * (1.0 / l)
def __call__(self, weights, layer_input): assert len(weights) == self.n_weights s = g.expr(weights[0]) for i in range(1, self.n_weights): s += weights[i] * self.shift(layer_input, i) return self.activation(g(s))
def apply_expr_unary(l): if l.unary == gpt.expr_unary.NONE: return l return gpt.expr(gpt.eval(l))
def transpose(self): if not self.transposable(): return gpt.transpose(gpt.expr(self)) return tensor(np.transpose(self.array, self.otype.transposed), self.otype)
def adj(self): if not self.transposable(): return gpt.adj(gpt.expr(self)) return tensor(np.transpose(self.array.conj(), self.otype.transposed), self.otype)
def __mul__(self, l): return gpt.expr(self) * gpt.expr(l)