def precompute(z, gamma, alpha, run_time, x, taus): """ calculating Kzz, Lzz and Psi""" precomp = wh.RaisingDotDict() taus = np.concatenate([tau.flatten() for tau in taus]).reshape((1, -1)) precomp.Kzz = k(z, None, gamma, alpha) try: precomp.Lzz = np.linalg.cholesky(precomp.Kzz) precomp.Lzzinv = np.linalg.inv(precomp.Lzz) precomp.Kzzinv = precomp.Lzzinv.T @ precomp.Lzzinv except Exception as e: print('gamma:', gamma, 'alpha:', alpha, 'Kzz:', precomp.Kzz) raise tmin = 0 exp = ((np.pi * alpha / 4) ** (1 / 2)) * (gamma ** 2) * np.exp(-wh.sqdist(z, None) / (4 * alpha)) zy = z zbar = 0.5 * (z.reshape(z.shape[0], z.shape[1], 1) + zy.reshape(zy.shape[0], 1, zy.shape[1])) dmin_array = special.erf((zbar - tmin) / np.sqrt(alpha)) dprod_sum = np.sum( np.array([np.prod(dmin_array - special.erf((zbar - (run_time - x[i])) / np.sqrt(alpha)), axis=0) for i in range(len(x))]), axis=0) r = exp * dprod_sum r = 0.5 * (r + r.T) precomp.psi_sum = r + 2 * gamma * __nugget_scalar + np.eye(r.shape[0]) * __nugget_scalar ** 2 precomp.Kzzinv_psi_sum = [email protected]_sum precomp.Kzzinv_psi_sum_Kzzinv = precomp.Kzzinv @ precomp.psi_sum @ precomp.Kzzinv precomp.Kxz = k(taus, z, gamma, alpha) precomp.Kzzinv_kzx = precomp.Kzzinv @ precomp.Kxz.T precomp.sigmas = kdiag(taus, gamma) return precomp
def kernelpdf(scale, sigma, dataset, datasetGen): #dataset is binned as eta1,eta2,mass,pt2,pt1 maxR = np.full((100), 3.3) minR = np.full((100), 2.9) valsReco = np.linspace(minR[0], maxR[0], 100) valsGen = valsReco h = np.tensordot( scale, valsGen, axes=0 ) #get a 5D vector with np.newaxis with all possible combos of kinematics and gen mass values h_ext = np.swapaxes(np.swapaxes(h, 2, 4), 3, 4)[:, :, np.newaxis, :, :, :] sigma_ext = sigma[:, :, np.newaxis, np.newaxis, :, :] xscale = np.sqrt(2.) * sigma_ext maxR_ext = maxR[np.newaxis, np.newaxis, :, np.newaxis, np.newaxis, np.newaxis] minR_ext = minR[np.newaxis, np.newaxis, :, np.newaxis, np.newaxis, np.newaxis] maxZ = ((maxR_ext - h_ext.astype('float64')) / xscale) minZ = ((minR_ext - h_ext.astype('float64')) / xscale) arg = np.sqrt(np.pi / 2.) * sigma_ext * (erf(maxZ) - erf(minZ)) #take tensor product between mass and genMass dimensions and sum over gen masses #divide each bin by the sum of gen events in that bin den = np.where( np.sum(datasetGen, axis=2) > 1000., np.sum(datasetGen, axis=2), -1)[:, :, np.newaxis, :, :] I = np.sum(arg * datasetGen[:, :, np.newaxis, :, :, :], axis=3) / den #give vals the right shape -> add dimension for gen mass (axis = 3) vals_ext = valsReco[np.newaxis, np.newaxis, :, np.newaxis, np.newaxis, np.newaxis] gaus = np.exp(-np.power(vals_ext - h_ext.astype('float64'), 2.) / (2 * np.power(sigma_ext, 2.))) #take tensor product between mass and genMass dimensions and sum over gen masses #divide each bin by the sum of gen events in that bin den2 = np.where( np.sum(datasetGen, axis=2) > 1000., np.sum(datasetGen, axis=2), 1)[:, :, np.newaxis, :, :] pdf = np.sum(gaus * datasetGen[:, :, np.newaxis, :, :, :], axis=3) / den2 / np.where(I > 0., I, -1) pdf = np.where(pdf > 0., pdf, 0.) massbinwidth = (maxR[0] - minR[0]) / 100 pdf = pdf * massbinwidth return pdf
def psi(x, gamma, alpha, trange): tmin, tmax = trange y = x d = x.shape[0] exp1 = ((np.pi * alpha / 4) ** (d/2)) * (gamma**2) * np.exp(-wh.sqdist(x,None) / (4*alpha)) xbar = 0.5 * (x.reshape(x.shape[0],x.shape[1],1)+y.reshape(y.shape[0],1,y.shape[1])) d = special.erf((xbar-tmin) / np.sqrt(alpha)) - special.erf((xbar - tmax) / np.sqrt(alpha)) prodd = np.prod(d, axis=0) rval = exp1 * prodd rval = 0.5 * (rval + rval.T) rval += 2 * gamma * __nugget_scalar rval += np.eye(rval.shape[0]) * __nugget_scalar ** 2 return rval
def visit_Function(self, node): f = node.value if f == EXP: return np.exp(self.visit(node.expr)) if (f == LOG) or (f == LN): return np.log(self.visit(node.expr)) if f == LOG10: return np.log10(self.visit(node.expr)) if f == SQRT: return np.sqrt(self.visit(node.expr)) if f == ABS: return np.abs(self.visit(node.expr)) if f == SIGN: return np.sign(self.visit(node.expr)) if f == SIN: return np.sin(self.visit(node.expr)) if f == COS: return np.cos(self.visit(node.expr)) if f == TAN: return np.tan(self.visit(node.expr)) if f == ASIN: return np.arcsin(self.visit(node.expr)) if f == ACOS: return np.arccos(self.visit(node.expr)) if f == ATAN: return np.arctan(self.visit(node.expr)) if f == MAX: raise NotImplementedError(MAX) if f == MIN: raise NotImplementedError(MIN) if f == NORMCDF: raise NotImplementedError(NORMCDF) if f == NORMPDF: raise NotImplementedError(NORMPDF) if f == ERF: return erf(self.visit(node.expr))
def _ncx2_cdf(self, t, k_, nc): """ Approximation of the cumulative distribution function for a noncentral Chi-squared distributed variable. """ r1 = k_ + nc r2 = 2 * (k_ + 2 * nc) r3 = 8 * (k_ + 3 * nc) m = 1 - r1 * r3 / (3 * r2**2) z = (t / (k_ + nc))**m alpha = 1 + m * (m - 1) * (r2 / (2 * r1**2) - (2 - m) * (1 - 3 * m) * r2**2 / (8 * r1**4)) rho = m * np.sqrt(r2) / r1 * (1 - (1 - m) * (1 - 3 * m) / (4 * r1**2) * r2) norm_cdf = 0.5 * (1 + erf((z - alpha) / (rho * np.sqrt(2)))) return norm_cdf
def fprime_m_miller_troyer(mu, s2): # firing rate function, gaussian convolved with ReLU, derived in Miller and Troyer 2002 u = mu / np.sqrt(2 * s2) A = 0.5 * (1 + ssp.erf(u)) return A
def f_miller_troyer(mu, s2): # firing rate function, gaussian convolved with ReLU, derived in Miller and Troyer 2002 u = mu / np.sqrt(2 * s2) A = 0.5 * mu * (1 + ssp.erf(u)) B = np.sqrt(s2) / np.sqrt(2 * np.pi) * np.exp(-u**2) return A + B
def normal_log_pdf(x, mu, sigma2, max_d=1e100): ''' Truncated normal log pdf with mean mu, variance sigma2, and max distance from the mean max_d ''' return -0.5 * ( (x - mu)**2 / sigma2 + np.log(sigma2 * 2.0 * np.pi)) - np.log( erf(max_d / (np.sqrt(sigma2 * 2.0))))
def standard_normal_cdf(x): ''' Standard normal CDF ''' return (1.0 + erf(x / np.sqrt(2.0))) / 2.0
def erf(a: Numeric): return asps.erf(a)
def nllJ(x, etas, phis, pts, dataset, datasetGen): #etas = np.arange(-0.8, 1.2, 0.4) #pts = np.array((3.,7.,15.,20.)) etas = np.array((-0.8, 0.8)) pts = np.array((3., 20.)) #phis = np.arange(-np.pi, np.pi+2.*np.pi/6.,2.*np.pi/6.) #etas = np.array((-0.8,-0.4)) phis = np.array((-np.pi, np.pi)) #print datasetGen.shape, "datasetGen" #dataset is binned as eta1,eta2,mass,phi2,phi1,pt2,pt1 #retrieve parameter value (functions of eta and phi only) A = x[:len(etas) - 1, np.newaxis] e = x[(len(etas) - 1):2 * (len(etas) - 1)] M = x[2 * (len(etas) - 1):3 * (len(etas) - 1)] #assuming 1 bin in phi shape = dataset.shape[0] * dataset.shape[1] * dataset.shape[ 2] * dataset.shape[3] * dataset.shape[4] * dataset.shape[ 5] * dataset.shape[6] #sigma = x[3*(len(etas)-1):3*(len(etas)-1)+shape].reshape((dataset.shape[0],dataset.shape[1],dataset.shape[2],dataset.shape[3],dataset.shape[4],dataset.shape[5],dataset.shape[6])) #nsig = x[4*(len(etas)-1)+shape:,] sigma = x[3 * (len(etas) - 1):3 * (len(etas) - 1) + 1] nsig = x[3 * (len(etas) - 1) + 1:] etasC = (etas[:-1] + etas[1:]) / 2. ptsC = (pts[:-1] + pts[1:]) / 2. #s = np.sin(2*np.atan(exp(-etas))); #calcolato al centro del bin in eta s = 1. c = 1. / ptsC term1 = A - s * np.tensordot(e, c, axes=0) + np.tensordot( M, 1. / c, axes=0) #da vettore a matrice (eta1,pt1) term2 = A - s * np.tensordot(e, c, axes=0) - np.tensordot( M, 1. / c, axes=0) #da vettore a matrice (eta2,pt2) #print term1.shape, "term1.shape" #combinations of all possible parameters in eta1, eta2, pt1, pt2 space combos = np.swapaxes(np.tensordot(term1, term2, axes=0), 1, 2) #print combos.shape, "combos.shape" #print dataset.shape, 'should be eta1,eta2,mass,phi2,phi1,pt2,pt1' vals = np.linspace(2.9, 3.3, 100) h = np.tensordot( np.sqrt(combos), vals, axes=0 ) #get a 7-D vector with np.newaxis with all possible combos ok kinematics and mass values #(eta1, eta2, mass, phi1, phi2, pt1, pt2) h.shape #print h.shape, "h.shape" h_ext = np.swapaxes(np.swapaxes(h, 2, 4), 3, 4)[:, :, :, np.newaxis, np.newaxis, :, :] #print h_ext.shape, "h_ext.shape" xscale = np.sqrt(2.) * sigma maxZ = ((3.3 - h_ext.astype('float64')) / xscale) minZ = ((2.9 - h_ext.astype('float64')) / xscale) #print maxZ.shape, "maxZ.shape" arg = np.sqrt(np.pi / 2.) * sigma * (erf(maxZ) - erf(minZ)) #print arg.shape, "arg.shape" #take tensor product between mass and genMass dimensions and sum over gen masses #divide each bin by the sum of gen events in that bin den = np.where( np.sum(datasetGen, axis=2) != 0., np.sum(datasetGen, axis=2), -1)[:, :, np.newaxis, :, :, :, :] I = np.sum(np.einsum("ijplmnk,ijqlmnk->ijpqlmnk", arg, datasetGen), axis=3) / den #print I.shape, "I.shape" #eta1,eta2,mass,phi2,phi1,pt2,pt1 print A, e, M, sigma, "pars" #give vals the right shape vals_ext = vals[np.newaxis, np.newaxis, :, np.newaxis, np.newaxis, np.newaxis, np.newaxis] gaus = np.exp(-np.power(vals_ext - h_ext.astype('float64'), 2.) / (2 * np.power(sigma, 2.))) #print gaus.shape, "gaus.shape" #take tensor product between mass and genMass dimensions and sum over gen masses #divide each bin by the sum of gen events in that bin den2 = np.where( np.sum(datasetGen, axis=2) != 0., np.sum(datasetGen, axis=2), 1)[:, :, np.newaxis, :, :, :, :] pdf = np.sum(np.einsum("ijplmnk,ijqlmnk->ijpqlmnk", gaus, datasetGen), axis=3) / den2 / np.where(I > 0., I, -1) pdf = np.where(pdf > 0., pdf, 0.) #print pdf.shape, "pdf.shape" #print I #print pdf massbinwidth = (3.3 - 2.9) / 100 norm_pdf = nsig * pdf * massbinwidth #print norm_pdf.shape, "norm pdf.shape" #nexp - nobs*ln(nexp) #nexp = Nsig*masspdf(mass|parameters)*massbinwidth nll = np.sum(norm_pdf - dataset * np.log(np.where(norm_pdf > 0., norm_pdf, 1.)), axis=2) #print nll.shape, "nll.shape" #print np.sum(nll), "final nll" return np.sum(nll)
def normal_cdf(x): return (1 + sp.erf(x / anp.sqrt(2))) / 2