def __addsub__(self, y, sign): g0 = pyobs.gradient(lambda x: x, self.mean, gtype='diag') if isinstance(y, observable): g1 = pyobs.gradient(lambda x: sign * x, y.mean, gtype='diag') return pyobs.derobs([self, y], self.mean + sign * y.mean, [g0, g1]) else: return pyobs.derobs([self], self.mean + sign * y, [g0])
def __mul__(self, y): if isinstance(y, observable): if self.shape == y.shape: g0 = pyobs.gradient(lambda x: x * y.mean, self.mean, gtype='diag') g1 = pyobs.gradient(lambda x: self.mean * x, y.mean, gtype='diag') elif self.shape == (1, ): g0 = pyobs.gradient(lambda x: x * y.mean, self.mean, gtype='full') g1 = pyobs.gradient(lambda x: self.mean * x, y.mean, gtype='diag') elif y.shape == (1, ): g0 = pyobs.gradient(lambda x: x * y.mean, self.mean, gtype='diag') g1 = pyobs.gradient(lambda x: self.mean * x, y.mean, gtype='full') else: raise pyobs.PyobsError('Shape mismatch, cannot multiply') return pyobs.derobs([self, y], self.mean * y.mean, [g0, g1]) else: # if gradient below was 'full' it would allow scalar_obs * array([4,5,6]) # which would create a vector obs. right now that generates an error # but is faster for large gradients g0 = pyobs.gradient(lambda x: x * y, self.mean, gtype='diag') return pyobs.derobs([self], self.mean * y, [g0])
def __matmul__(self, y): if isinstance(y, observable): g0 = pyobs.gradient(lambda x: x @ y.mean, self.mean) g1 = pyobs.gradient(lambda x: self.mean @ x, y.mean) return pyobs.derobs([self, y], self.mean @ y.mean, [g0, g1]) else: g0 = pyobs.gradient(lambda x: x @ y, self.mean) return pyobs.derobs([self], self.mean @ y, [g0])
def einsum(subscripts, *operands): """ Evaluates the Einstein summation convention on the input observables or arrays. Please check the documentation of `numpy.einsum` """ inps = [] means = [] for o in operands: if isinstance(o, pyobs.observable): inps.append(o) means.append(o.mean) else: means.append(numpy.array(o)) grads = [] for i in range(len(operands)): if not isinstance(operands[i], pyobs.observable): continue f = lambda x: numpy.einsum( subscripts, *[means[j] for j in range(i)], x, * [means[j] for j in range(i + 1, len(operands))]) grads.append(pyobs.gradient( f, operands[i].mean)) # non-optimized for large number of observables new_mean = numpy.einsum(subscripts, *means) return pyobs.derobs(inps, new_mean, grads)
def slice(self, *args): na = len(args) if na != len(self.shape): raise pyobs.PyobsError('Unexpected argument') f = lambda x: pyobs.slice_ndarray(x, *args) g0 = pyobs.gradient(f, self.mean, gtype='slice') return pyobs.derobs([self], f(self.mean), [g0])
def trace(x, offset=0, axis1=0, axis2=1): """ Return the sum along diagonals of the array. Parameters: x (obs): observable whose diagonal elements are taken offset (int, optional): offset of the diagonal from the main diagonal; can be both positive and negative. Defaults to 0. axis1, axis2 (int, optional): axes to be used as the first and second axis of the 2-D sub-arrays whose diagonals are taken; defaults are the first two axes of `x`. Returns: obs : the sum of the diagonal elements Notes: If `x` is 2-D, the sum along its diagonal with the given offset is returned, i.e., the sum of elements `x[i,i+offset]` for all i. If `x` has more than two dimensions, then the axes specified by `axis1` and `axis2` are used to determine the 2-D sub-arrays whose traces are returned. The shape of the resulting array is the same as that of a with `axis1` and `axis2` removed. Examples: >>> tr = pyobs.trace(mat) """ new_mean = numpy.trace(x.mean, offset, axis1, axis2) g = pyobs.gradient(lambda x: numpy.trace(x, offset, axis1, axis2), x.mean) return pyobs.derobs( [x], new_mean, [g], description=f'trace for axes ({axis1,axis2}) of {x.description}')
def sum(x, axis=None): """ Sum of array elements over a given axis. Parameters: x (obs): array with elements to sum axis (None or int or tuple of ints, optional): Axis or axes along which a sum is performed. The default, axis=None, will sum all elements of the input array. Returns: obs: sum along the axis Examples: >>> import pyobs >>> pyobs.sum(a) >>> pyobs.sum(a,axis=0) """ if axis is None: f = lambda a: numpy.reshape(numpy.sum(a, axis=axis), (1, )) t = f'sum all elements of {x.description}' else: f = lambda a: numpy.sum(a, axis=axis) t = f'sum over axis {axis} of {x.description}' g = pyobs.gradient(f, x.mean) return pyobs.derobs([x], f(x.mean), [g], description=t)
def inv(x): """ Compute the inverse of a square matrix Parameters: x (obs): Matrix to be inverted Returns: obs: (Multiplicative) inverse of `x` Examples: >>> from pyobs.linalg import inv >>> a = pyobs.observable() >>> a.create('A',data,shape=(2,2)) >>> ainv = pyobs.inv(a) Notes: If the number of dimensions is bigger than 2, `x` is treated as a stack of matrices residing in the last two indexes and broadcast accordingly. """ if (x.shape[-2] != x.shape[-1]): # pragma: no cover raise pyobs.PyobsError( f'Unexpected matrix for inverse with shape={x.shape}') mean = numpy.linalg.inv(x.mean) # V Vinv = 1, dV Vinv + V dVinv = 0 , dVinv = - Vinv dV Vinv g = pyobs.gradient(lambda x: -mean @ x @ mean, x.mean) return pyobs.derobs([x], mean, [g])
def eig(x): """ Computes the eigenvalues and eigenvectors of a square matrix observable. The central values are computed using the `numpy.linalg.eig` routine. Parameters: x (obs): a symmetric square matrix (observable) with dimensions `NxN` Returns: list of obs: a vector observable with the eigenvalues and a matrix observable whose columns correspond to the eigenvectors Notes: The error on the eigenvectors is based on the assumption that the input matrix is symmetric. If this not respected, the returned eigenvectors will have under or over-estimated errors. Examples: >>> [w,v] = pyobs.linalg.eig(mat) >>> for i in range(N): >>> # check eigenvalue equation >>> print(mat @ v[:,i] - v[:,i] * w[i]) """ if len(x.shape) > 2: # pragma: no cover raise pyobs.PyobsError( f'Unexpected matrix with shape {x.shape}; only 2-D arrays are supported' ) if numpy.any(numpy.fabs(x.mean - x.mean.T) > 1e-10): # pragma: no cover raise pyobs.PyobsError(f'Unexpected non-symmetric matrix: user eigLR') [w, v] = numpy.linalg.eig(x.mean) # d l_n = (v_n, dA v_n) gw = pyobs.gradient(lambda x: numpy.diag(v.T @ x @ v), x.mean) # d v_n = sum_{m \neq n} (v_m, dA v_n) / (l_n - l_m) v_m def gradv(y): tmp = v.T @ y @ v h = [] for m in range(x.shape[0]): h.append((w != w[m]) * 1.0 / (w - w[m] + 1e-16)) h = numpy.array(h) return v @ (tmp * h) gv = pyobs.gradient(gradv, x.mean) return [pyobs.derobs([x], w, [gw]), pyobs.derobs([x], v, [gv])]
def __getitem__(self, args): if isinstance(args, (int, numpy.int32, numpy.int64, slice, numpy.ndarray)): args = [args] na = len(args) if na != len(self.shape): raise pyobs.PyobsError('Unexpected argument') if self.mean[tuple(args)].size == 1: f = lambda x: numpy.reshape(x[tuple(args)], (1, )) else: f = lambda x: x[tuple(args)] g0 = pyobs.gradient(f, self.mean, gtype='slice') return pyobs.derobs([self], f(self.mean), [g0])
def __call__(self, yobs, p0=None, min_search=None): if len(self.csq) > 1: pyobs.check_type(yobs, 'yobs', list) else: if isinstance(yobs, pyobs.observable): yobs = [yobs] if len(yobs) != len(self.csq): raise pyobs.PyobsError( f'Unexpected number of observables for {len(self.csq)} fits') if p0 is None: p0 = [1.0] * len(self.pdict) if min_search is None: min_search = lm def csq(p0): res = 0.0 for i in range(len(yobs)): self.csq[i].set_pars(self.pdict, p0) res += self.csq[i](yobs[i].mean) return res dcsq = lambda x: sum([ self.csq[i].grad(yobs[i].mean, self.pdict) for i in range(len(yobs)) ]) ddcsq = lambda x: sum([ self.csq[i].hess(yobs[i].mean, self.pdict) for i in range(len(yobs)) ]) t0 = time() res = min_search(csq, p0, jac=dcsq, hess=ddcsq) # properly create gradients H = self.csq[0].Hmat(self.pdict, res.x) for i in range(1, len(yobs)): H += self.csq[i].Hmat(self.pdict, res.x) Hinv = numpy.linalg.inv(H) g = [] for i in range(len(yobs)): tmp = self.csq[i].gvec(self.pdict, res.x) g.append(pyobs.gradient(Hinv @ tmp)) if pyobs.is_verbose('mfit.run') or pyobs.is_verbose('mfit'): print(f'chisquare = {res.fun}') print(f'minimizer iterations = {res.nit}') print(f'minimizer status: {res.message}') print(f'mfit.run executed in {time()-t0:g} secs') return pyobs.derobs(yobs, res.x, g)
def besselk(v, x): """ Modified Bessel function of the second kind of real order `v`, element-wise. Parameters: v (float): order of the Bessel function x (obs): real observable where to evaluate the Bessel function Returns: obs : the modified bessel function computed for the input observable """ new_mean = scipy.special.kv(v, x.mean) aux = scipy.special.kv(v - 1, x.mean) + scipy.special.kv(v + 1, x.mean) g = pyobs.gradient(lambda x: -0.5 * aux * x, x.mean, gtype='diag') return pyobs.derobs([x], new_mean, [g], description=f'BesselK[{v}] of {x.description}')
def exp(x): """ Return the exponential element-wise. Parameters: x (obs): input observable Returns: obs : the exponential of the input observable, element-wise. Examples: >>> expA = pyobs.exp(obsA) """ new_mean = numpy.exp(x.mean) g = pyobs.gradient(lambda xx: xx * new_mean, x.mean, gtype='diag') return pyobs.derobs([x], new_mean, [g], description=f'exp of {x.description}')
def sinh(x): """ Return the Hyperbolic sine element-wise. Parameters: x (obs): input observable Returns: obs : the hyperbolic sine of the input observable, element-wise. Examples: >>> B = pyobs.sinh(obsA) """ new_mean = numpy.sinh(x.mean) aux = numpy.cosh(x.mean) g = pyobs.gradient(lambda xx: xx * aux, x.mean, gtype='diag') return pyobs.derobs([x], new_mean, [g], description=f'sinh of {x.description}')
def log(x): """ Return the Natural logarithm element-wise. Parameters: x (obs): input observable Returns: obs : the logarithm of the input observable, element-wise. Examples: >>> logA = pyobs.log(obsA) """ new_mean = numpy.log(x.mean) aux = numpy.reciprocal(x.mean) g = pyobs.gradient(lambda xx: xx * aux, x.mean, gtype='diag') return pyobs.derobs([x], new_mean, [g], description=f'log of {x.description}')
def arccosh(x): """ Return the inverse Hyperbolic cosine element-wise. Parameters: x (obs): input observable Returns: obs : the inverse hyperbolic cosine of the input observable, element-wise. Examples: >>> B = pyobs.arccosh(obsA) """ new_mean = numpy.arccosh(x.mean) aux = numpy.reciprocal(numpy.sqrt(x.mean**2 - numpy.ones(x.shape))) # 1/sqrt(x^2-1) g = pyobs.gradient(lambda x: x * aux, x.mean, gtype='diag') return pyobs.derobs([x], new_mean, [g], description=f'arccosh of {x.description}')
def eval(self, xax, pars): """ Evaluates the function on a list of coordinates using the parameters obtained from a :math:`\chi^2` minimization. Parameters: xax (array,list of arrays) : the coordinates :math:`x_i^\mu` where the function must be evaluated. For combined fits, a list of arrays must be passed, one for each fit. pars (obs) : the observable returned by calling this class Returns: list of obs : observables corresponding to the functions evaluated at the coordinates `xax`. Examples: >>> fit1 = mfit(xax,W,f,df) >>> pars = fit1(yobs1) >>> print(pars) 0.925(35) 2.050(19) >>> xax = numpy.arange(0,10,0.2) >>> yeval = fit1.eval(xax, pars) """ if not type(xax) is list: xax = [xax] pyobs.check_type(pars, 'pars', pyobs.observable) N = len(xax) if N != len(self.csq): raise pyobs.PyobsError( f'Coordinates and Paramters do not match number of internal functions' ) out = [] for ic in self.csq: [m, g] = self.csq[ic].eval(xax[ic], self.pdict, pars.mean) out.append(pyobs.derobs([pars], m, [pyobs.gradient(g)])) return out
N = 1000 tau = 0.0 data = pyobs.random.acrand(val, sig, tau, N) obsA = pyobs.observable() obsA.create('EnsA', data) logobsA = pyobs.log(obsA) print('obsA = ', obsA) print('log(obsA) =', logobsA) [a, da] = logobsA.error() dda = logobsA.error_of_error() def func(x): return numpy.log(x) b4 = pyobs.error_bias4(obsA, func) print(f'Error log(obsA) {da}; 4th moment {b4}; ratio {da/b4}') print(f'Error of error log(obsA) {dda}; 4th moment {b4}; ratio {dda/b4}') assert b4 < dda try: pyobs.derobs([obsA], a, []) except pyobs.PyobsError: print('Error catched')
def __pow__(self, a): new_mean = self.mean**a g0 = pyobs.gradient(lambda x: a * x * self.mean**(a - 1), self.mean, gtype='diag') return pyobs.derobs([self], new_mean, [g0])
def __rmatmul__(self, y): g0 = pyobs.gradient(lambda x: y @ x, self.mean) return pyobs.derobs([self], y @ self.mean, [g0])
def reciprocal(self): new_mean = numpy.reciprocal(self.mean) g0 = pyobs.gradient(lambda x: -x * (new_mean**2), self.mean, gtype='diag') return pyobs.derobs([self], new_mean, [g0])
def __neg__(self): g0 = pyobs.gradient(lambda x: -x, self.mean, gtype='diag') return pyobs.derobs([self], -self.mean, [g0])
def eigLR(x): """ Computes the eigenvalues and the left and right eigenvectors of a square matrix observable. The central values are computed using the `numpy.linalg.eig` routine. Parameters: x (obs): a square matrix (observable) with dimensions `NxN`; Returns: list of obs: a vector observable with the eigenvalues and two matrix observables whose columns correspond to the right and left eigenvectors respectively. Notes: This input matrix is not expected to be symmetric. If it is the usage of `eig` is recommended for better performance. Examples: >>> [l,v,w] = pyobs.linalg.eigLR(mat) >>> for i in range(N): >>> # check eigenvalue equation >>> print(mat @ v[:,i] - v[:,i] * l[i]) >>> print(w[:,i] @ mat - w[:,i] * l[i]) """ if len(x.shape) > 2: # pragma: no cover raise pyobs.PyobsError( f'Unexpected matrix with shape {x.shape}; only 2-D arrays are supported' ) # left and right eigenvectors [l, v] = numpy.linalg.eig(x.mean) [l, w] = numpy.linalg.eig(x.mean.T) # d l_n = (w_n, dA v_n) / (w_n, v_n) gl = pyobs.gradient( lambda x: numpy.diag(w.T @ x @ v) / numpy.diag(w.T @ v), x.mean) # d v_n = sum_{m \neq n} (w_m, dA v_n) / (l_n - l_m) w_m def gradv(y): tmp = w.T @ y @ v gv = numpy.zeros(x.shape) for n in range(x.shape[0]): for m in range(x.shape[1]): if n != m: gv[:, n] += tmp[m, n] / (l[n] - l[m]) * w[:, m] return gv gv = pyobs.gradient(gradv, x.mean) # d w_n = sum_{m \neq n} (v_m, dA^T w_n) / (l_n - l_m) v_m def gradw(y): tmp = v.T @ y.T @ w gw = numpy.zeros(x.shape) for n in range(x.shape[0]): for m in range(x.shape[1]): if n != m: gw[:, n] += tmp[m, n] / (l[n] - l[m]) * v[:, m] return gw gw = pyobs.gradient(gradw, x.mean) return [ pyobs.derobs([x], l, [gl]), pyobs.derobs([x], v, [gv]), pyobs.derobs([x], w, [gw]) ]
data = pyobs.random.acrandn(mat, cov, 1.0, 4000) omat = pyobs.observable() omat.create('test', data.flatten(), shape=(2, 2)) [v, e] = omat.error() print(pyobs.valerr(v, e)) #check inverse def func(x): return numpy.linalg.inv(x) g = pyobs.num_grad(omat, func) g0 = pyobs.gradient(g) [v0, e0] = pyobs.derobs([omat], func(omat.mean), [g0]).error() [v1, e1] = pyobs.linalg.inv(omat).error() assert numpy.all(numpy.fabs(v0 - v1) < 1e-12) assert numpy.all(numpy.fabs(e0 - e1) < 1e-10) # check eigenvalues, both symmetric and non-symmetric cases def func(x): return numpy.linalg.eig(x)[0] omatsym = (omat + pyobs.transpose(omat)) * 0.5 g = pyobs.num_grad(omatsym, func) g0 = pyobs.gradient(g) [v0, e0] = pyobs.derobs([omatsym], func(omatsym.mean), [g0]).error()
corr = pyobs.observable() corr.create(f'm{mass:g}-{L}x{T}', data.flatten(), shape=(len(xax),)) print(corr) [c,dc] = corr.error() mat = pyobs.reshape(corr, (T//8,T//8)) flist = ['sum', 'trace','log','exp','cosh','sinh','arccosh'] for f in ['sum']: [v0, e0] = pyobs.__dict__[f](mat,axis=0).error() func = lambda x: numpy.__dict__[f](x,axis=0) mean = func(mat.mean) g = pyobs.num_grad(mat, func) g0 = pyobs.gradient(g) [v1, e1] = pyobs.derobs([mat], mean, [g0]).error() assert numpy.all(numpy.fabs(e1-e0) < 1e-10) for f in flist: [v0, e0] = pyobs.__dict__[f](mat).error() mean = numpy.__dict__[f](mat.mean) g = pyobs.num_grad(mat, numpy.__dict__[f]) g0 = pyobs.gradient(g) [v1, e1] = pyobs.derobs([mat], mean, [g0]).error() assert numpy.all(numpy.fabs(e1-e0) < 1e-10) mat = pyobs.reshape(corr, (T//8,T//8)) flist = ['besselk'] slist = ['kv'] args = [1.0]