def speed_test(self): '''Over all speed test''' N = int(2e3) xmax = 5 x = xmax * (np.random.rand(N) - 0.5) T = {} tic = time.perf_counter() erfcx(x) T['Benchmark (erfcx)'] = time.perf_counter() - tic tic = time.perf_counter() self.int_brute_force(x) T['Brute force integration'] = time.perf_counter() - tic tic = time.perf_counter() self.int_exact(x) T['Simplified integration'] = time.perf_counter() - tic tic = time.perf_counter() self.int_fast(x) T['Fast approximation'] = time.perf_counter() - tic rep = [ 'Speed Test Result', 'Number of samples: {}'.format(N), 'Sample Range: [-{},{}]'.format(xmax, xmax) ] rep += ['Time Elapsed | Relative to benchmark'] for k in T: rep.append('{}: {:.1e} | {:.1e}'.format( k, T[k], T[k] / T['Benchmark (erfcx)'])) print('\n'.join(rep))
def _bemg(x_in, la, lb, u, s, A): a = math.exp(la) #la,lb default 2 b = math.exp(lb) x = (x_in - u) / s Ea = (a / 2 + x) Eb = (b / 2 - x) Ea[Ea < -25] = -25 Eb[Eb < -25] = -25 return A * a * b / (a + b) * np.exp(-x**2) * ( erfcx(Eb) + erfcx(Ea)) / 1.1273434771994788
def __call__(self, w): sigma = self.p[3] x0 = self.p[1] g = self.p[2] a = sqrt(w**2 - 1j*g*w) a = a.real - 1j*a.imag #we want the imaginary part to the root to be positive prefac = 1j*sqrt(3.14149)*self.p[0]*x0**2/(sqrt(22)*sigma) eps = prefac*exp(-.5)*(1/a)*( sp.erfcx(-1j*(a-x0)/sigma) + sp.erfcx(-1j*(a+x0)/sigma) ) #self.f = 2*prefac*exp(-x0**2/(2*sigma**2))*(1 + sp.erf(1j*x0/sigma)) self.f = eps.real[1] return (eps.real, eps.imag)
def func_dawson(self, x: Tensor) -> Tensor: if x.is_cuda: if x.numel() > mnn_config.get_value('cpu_or_gpu'): return self._gpu_dawson(x) else: device = x.device return torch.from_numpy( scipy.erfcx(-x.cpu().numpy()) * math.sqrt(math.pi) / 2).to(device=device) else: return torch.from_numpy( scipy.erfcx(-x.numpy()) * math.sqrt(math.pi) / 2)
def __call__(self, w): sigma = self.p[3] x0 = self.p[1] g = self.p[2] a = sqrt(w**2 - 1j * g * w) a = a.real - 1j * a.imag #we want the imaginary part to the root to be positive prefac = 1j * sqrt(3.14149) * self.p[0] * x0**2 / (sqrt(22) * sigma) eps = prefac * exp(-.5) * (1 / a) * (sp.erfcx(-1j * (a - x0) / sigma) + sp.erfcx(-1j * (a + x0) / sigma)) #self.f = 2*prefac*exp(-x0**2/(2*sigma**2))*(1 + sp.erf(1j*x0/sigma)) self.f = eps.real[1] return (eps.real, eps.imag)
def int_exact(self, X): q = np.zeros(X.size) i = 0 fun1 = lambda x: np.power(erfcx(-x), 2) * dawsn(x) fun2 = lambda x: np.exp(-x * x) * np.power(erfcx(-x), 2) for x in X: y1, _ = quad(fun1, -np.inf, x) y2, _ = quad(fun2, -np.inf, x) q[i] = -np.pi / 4 * y1 + np.power(np.sqrt(np.pi) / 2, 3) * erfi(x) * y2 i += 1 return q
def test(): relative_error = 0 for i in range(100): x = -1 + i * (10 - (-1)) / 100 my_erfcx = erfcx(torch.FloatTensor([x])) relative_error = relative_error + np.abs( my_erfcx.item() - special.erfcx(x)) / special.erfcx(x) average_error = relative_error / 100 print(average_error) normal = Normal(loc=torch.Tensor([0.0]), scale=torch.Tensor([1.0])) # cdf from 0 to x print(normal.cdf(1.6449)) print(normal.icdf(torch.Tensor([0.95])))
def int_exact(self, X): '''Integral of the 2nd order Dawson function (with a change of order of integration)''' q = np.zeros(X.size) i = 0 fun1 = lambda x: np.power(erfcx(-x), 2) * dawsn(x) fun2 = lambda x: np.exp(-x * x) * np.power(erfcx(-x), 2) for x in X: if x < -25: #== -np.inf: q[i] = self.int_asym_neginf(x) else: y1, _ = quad(fun1, -np.inf, x) y2, _ = quad(fun2, -np.inf, x) q[i] = -np.pi / 4 * y1 + np.power(np.sqrt(np.pi) / 2, 3) * erfi(x) * y2 i += 1 return q
def mushroom_math(x, delta=.1, vf=.1): """ new method, rewrite for numerical stability at high delta delta=0 causes divide by zero error!! Compensate elsewhere. http://ab-initio.mit.edu/wiki/index.php/Faddeeva_Package """ from scipy.special import erfc, erfcx x_half = x / 2.0 delta_double = 2.0 * delta return ( (erfc(x_half) - erfcx(delta_double + x_half) / exp(x_half * x_half) - erfc(x) + ((.25 - delta * (x + delta_double)) * erfcx(delta_double + x) + delta / SQRT_PI) * 4.0 / exp(x * x)) * vf / (delta_double * erfcx(delta_double)))
def kai(model, altmp): #approximated 1/Psi M = model.M kaitmp = np.where(altmp / sqrt(2.0) < -10.0, -altmp, np.zeros((M))) kaitmp2 = np.where( abs(altmp / sqrt(2.0)) < 10.0, sqrt(2.0 / pi) / scisp.erfcx(-altmp / sqrt(2.0)), kaitmp) return kaitmp2
def _erfcx_integral(a, b, order): """Fixed order Gauss-Legendre quadrature of erfcx from a to b.""" assert np.all(a >= 0) and np.all(b >= 0) x, w = roots_legendre(order) x = x[:, np.newaxis] w = w[:, np.newaxis] return (b - a) * np.sum(w * erfcx((b - a) * x / 2 + (b + a) / 2), axis=0) / 2
def score_sum(sum_u, k, l, data): m, n = data.shape cnr = gammaln(m + 1) - gammaln(k + 1) - gammaln(m - k + 1) cnc = gammaln(n + 1) - gammaln(l + 1) - gammaln(n - l + 1) ar = sum_u / np.sqrt(k * l) rest2 = -(ar * ar) / 2.0 + np.log(erfcx(ar / np.sqrt(2)) * 0.5) sc = -rest2 - cnr - cnc return sc
def log_erfc(x): fx = np.zeros(x.shape) ind = (x > 8) fx[ind] = np.log(erfcx(x[ind])) - x[ind]**2 ind = (x <= 8) fx[ind] = np.log(erfc(x[ind])) return fx
def cvoigt(omega, cent, delta, gamma=0.0): """Complex normalized Voigt line shape """ a = (delta**2) / (4.0 * numpy.log(2)) z = (gamma - 1j * (omega - cent)) / (2.0 * numpy.sqrt(a)) return numpy.real(special.erfcx(z)) * numpy.sqrt(numpy.pi / a) / 2.0
def channel(y, w, v, var_noise): """Compute g and g' for probit channel""" phi = -y * w / np.sqrt(2 * (v + var_noise)) g = 2 * y / (np.sqrt(2 * np.pi * (v + var_noise)) * erfcx(phi)) dg = -g * (w / (v + var_noise) + g) return g, dg
def forward_cpu(self, x): if not available_cpu: raise ImportError('SciPy is not available. Forward computation' ' of erfcx in CPU cannot be done. ' + str(_import_error)) self.retain_inputs((0,)) self.retain_outputs((0,)) return utils.force_array(special.erfcx(x[0]), dtype=x[0].dtype),
def forward_cpu(self, x): if not available_cpu: raise ImportError('SciPy is not available. Forward computation' ' of erfcx in CPU cannot be done. ' + str(_import_error)) self.retain_inputs((0, )) self.retain_outputs((0, )) return utils.force_array(special.erfcx(x[0]), dtype=x[0].dtype),
def cvoigt(omega, cent, delta, gamma=0.0): """Complex normalized Voigt line shape """ a = (delta**2)/(4.0*numpy.log(2)) z = (gamma - 1j*(omega - cent))/(2.0*numpy.sqrt(a)) return numpy.real(special.erfcx(z))*numpy.sqrt(numpy.pi/a)/2.0
def lnDifErf(z1, z2): #Z2 is always positive logdiferf = np.zeros(z1.shape) ind = np.where(z1 > 0.) ind2 = np.where(z1 <= 0.) if ind[0].shape > 0: z1i = z1[ind] z12 = z1i * z1i z2i = z2[ind] logdiferf[ind] = -z12 + np.log( erfcx(z1i) - erfcx(z2i) * np.exp(z12 - z2i**2)) if ind2[0].shape > 0: z1i = z1[ind2] z2i = z2[ind2] logdiferf[ind2] = np.log(erf(z2i) - erf(z1i)) return logdiferf
def testLogErfcx(self, dtype): x = tf.random.uniform(shape=[int(1e5)], minval=-3., maxval=3., dtype=dtype, seed=test_util.test_seed()) x_, logerfcx_ = self.evaluate([x, tfp.math.logerfcx(x)]) self.assertAllClose(np.log(scipy_special.erfcx(x_)), logerfcx_)
def testErfcxLargeNegative(self, dtype): x = tf.random.uniform(shape=[int(1e5)], minval=-100., maxval=-20., dtype=dtype, seed=test_util.test_seed()) x_, erfcx_ = self.evaluate([x, tfp.math.erfcx(x)]) self.assertAllClose(scipy_special.erfcx(x_), erfcx_)
def int_brute_force(self, X): '''2nd order Dawson function (direct integration)''' q = np.zeros(X.size) i = 0 for x in X: q[i], _ = quad(lambda x: erfcx(-x), 0, x) i += 1 q = q * np.sqrt(np.pi) / 2 return q
def dbl_dawson(x): assert isinstance(x, np.ndarray) # assert x.ndim == 1 length = x.size shape = x.shape x_flatten = x.flatten() res = np.empty([length], dtype=np.float) for i in range(length): res[i] = pi / 4 * quad(lambda u: np.exp(x_flatten[i]**2 - u**2) * erfcx(-u) ** 2, - np.inf, x_flatten[i])[0] return res.reshape(shape)
def g_prec(I,theta,sigm): range_grid = np.meshgrid(np.linspace(0,1,nint),xrange(len(I)))[0] dint = (theta-V_r)/(sigm*nint) #int_mesh = np.ndarray((480,nint)) diff_grid = np.meshgrid(xrange(nint),(theta-V_r)/sigm)[1] low_value_grid = np.meshgrid(xrange(nint),(V_r-E_l-I*tau_m)/sigm)[1] int_mesh = erfcx(-(low_value_grid + range_grid*diff_grid)) return 1./(pi_sqrt_tau*dint*int_mesh.sum(axis=1))
def compute_F( t, x_bar, h ): if x_bar < 1e-30: return math.sqrt(h)*sp.erf( np.pi/(2*h*t) ) else: z = np.complex( np.pi/(2*h*t), t*x_bar ) w_iz = sp.erfcx( z ) f = math.exp( -t**2 * x_bar**2 ) f = f - np.real( np.exp(-t**2 * x_bar**2 - z*z)*w_iz ) f = f*math.sqrt(h) return f
def phi_noise(I,V_t,V_r,E_l,tau_m,sigm,nint): range_grid = np.meshgrid(np.linspace(0,1,nint),xrange(len(I)))[0] dint = (V_t-V_r)/(sigm*nint) #int_mesh = np.ndarray((480,nint)) diff_grid = np.meshgrid(xrange(nint),(V_t-V_r)/sigm)[1] low_value_grid = np.meshgrid(xrange(nint),(V_r-E_l-I*tau_m)/sigm)[1] int_mesh = erfcx(-(low_value_grid + range_grid*diff_grid)) return 1./(np.sqrt(np.pi)*tau_m*dint*int_mesh.sum(axis=1))
def compute_F( t, x_bar, h ): if x_bar < 1e-30: return sqrt(h)*sp.erf( pi/(2*h*t) ) else: z = np.complex( pi/(2*h*t), t*x_bar ) w_iz = sp.erfcx( z ) f = exp( -t**2 * x_bar**2 ) f = f - np.real( np.exp(-t**2 * x_bar**2 - z*z)*w_iz ) f = f*sqrt(h) return f
def integrand(u_arr): """Integrand of self-consistency equation""" integrand_all = erfcx(-u_arr) #integrand_all = np.zeros(u_arr.shape) #u_mask = u_arr < -4.0 #u = u_arr[u_mask] #integrand_all[u_mask] = -1. / np.sqrt(np.pi) * (1.0 / u - 1.0 / (2.0 * u**3) + #3.0 / (4.0 * u**5) - #15.0 / (8.0 * u**7)) #integrand_all[~u_mask] = np.exp(u_arr[~u_mask]**2) * (1. + erf(u_arr[~u_mask])) return integrand_all
def compute_expectation_spec(beta, mu, sigma): """compute \int(exp(-beta*x)*phi(x;mu,sigma^2)) from mu to infty""" print beta, mu, sigma # we do this because the first term is liable to overflow u = beta*sigma/sqrt(2) prefactor = 1/2.0 * exp(-beta*mu) # first_term_ref = exp(u**2) # second_term_ref = erfc(u) # ans_ref = prefactor * first_term * second_term ans = prefactor * erfcx(u) # print ans_ref, ans return ans
def mushroom_math(x, delta=0.1, vf=0.1): """ new method, rewrite for numerical stability at high delta delta=0 causes divide by zero error!! Compensate elsewhere. http://ab-initio.mit.edu/wiki/index.php/Faddeeva_Package """ from scipy.special import erfc, erfcx x_half = x / 2.0 delta_double = 2.0 * delta return ( ( erfc(x_half) - erfcx(delta_double + x_half) / exp(x_half * x_half) - erfc(x) + ((0.25 - delta * (x + delta_double)) * erfcx(delta_double + x) + delta / SQRT_PI) * 4.0 / exp(x * x) ) * vf / (delta_double * erfcx(delta_double)) )
def compute_expectation_spec(beta, mu, sigma): """compute \int(exp(-beta*x)*phi(x;mu,sigma^2)) from mu to infty""" print beta, mu, sigma # we do this because the first term is liable to overflow u = beta * sigma / sqrt(2) prefactor = 1 / 2.0 * exp(-beta * mu) # first_term_ref = exp(u**2) # second_term_ref = erfc(u) # ans_ref = prefactor * first_term * second_term ans = prefactor * erfcx(u) # print ans_ref, ans return ans
def CreateData(y_range=(0., 2.), npoints=1000000, filename='erfcxinv.h5'): # Create a table of values for erfc(x), so that we can numerically estimate the inverse. # We're only interested in a limited range of values. x = np.flip( np.linspace(*y_range, npoints) ) # function monotonically decreases, we want f(x) to increase with index, thus perform flip y = spec.erfcx(x) result = np.column_stack((y, x)) f = h5.File(filename, 'w') dset = f.create_dataset('erfcxinv', data=result, compression='gzip', compression_opts=7) f.close()
def sigmoid_lif(i0, tau=1.0, sigma=1.0, vr=-68.0, vth=-48.0, vrevers=-50.0, dt=10E-3): x1 = (vr - vrevers - i0) / sigma x2 = (vth - vrevers - i0) / sigma dx = (x2 - x1) / 2000.0 x = np.arange(x1, x2 + dx, dx, dtype=np.float64) fx = erfcx(-x) t = tau * np.sqrt(pi) * np.sum(fx * dx) + dt * tau return 1.0 / t
def test_erfc(self): # in the application in STO-GTO integration, # the argument of erfcx becomes a/(2.0*sqrt(b)), # where a and b is orbital exponent of STO and GTO. # a ~ 1.0, 0.5 or 1/3 # b ~ 0.01 ~ 30.0 # So argument becomes # 5 ~ 0.027 for theta in [0, 1, 45, 90, 270]: t = theta * np.pi / 180.0 for a in [0.001, 0.01, 0.1, 1.0, 10.0, 100.0]: z = a * np.exp(1.0j * a) self.assertAlmostEqual(sp.erfc(z), erfc(z), places=15) self.assertAlmostEqual(sp.erfcx(z), erfcx(z), places=15)
def xGaussFn(mode, x, h, mu, sig, tau): if mode == 1: return (h * sig * np.sqrt(np.pi / 2.) / tau) * np.exp((sig / tau)**2. / 2. - (x - mu) / tau) * sp.erfc( (sig / tau - (x - mu) / sig) / np.sqrt(2.)) elif mode == 2: return (h * sig * np.sqrt(np.pi / 2.) / tau) * np.exp( (-1. / 2.) * ((x - mu) / sig)**2.) * sp.erfcx( (sig / tau - (x - mu) / sig) / np.sqrt(2.)) elif mode == 3: return (h / (1 - (x - mu) * tau / sig**2.)) * np.exp( (-1. / 2.) * ((x - mu) / sig)**2.) else: print "unknown mode!" return -1
def analyticDistribution(x, args): #args[0]= time args[1]=absorption coefficient t = args[0] alpha = args[1] mat = args[2] z = mat.D * t x1 = alpha * np.sqrt(z) - x / (2 * np.sqrt(z)) x2 = alpha * np.sqrt(z) + x / (2 * np.sqrt(z)) x3 = mat.s * np.sqrt(t / mat.D) + x / 2. / np.sqrt(z) phi=np.array(0.5*(erfcx(x1)+\ (alpha*mat.D+mat.s)/(alpha*mat.D-mat.s)*erfcx(x2))\ -mat.s*erfcx(x3)/(alpha*mat.D-mat.s)) phi[phi == inf] = 0 np.seterr(under='ignore') dat = phi * np.exp(-x * x / (4 * z) - t / mat.tau) np.seterr(under='raise') dat[np.where(dat == 0)] = np.exp(-alpha * x[np.where(dat == 0)]) return dat
def trunc_norm_param_residuals(params): mean = params[:d] chol = params[d:] alpha = -mean * chol erf_term = 1 / erfcx(alpha / 2**0.5) std = 1 / chol var = std**2 const = (2 / np.pi)**0.5 trunc_mean = mean + const * erf_term * std trunc_var = var * (1 + const * alpha * erf_term - (const * erf_term)**2) mean_res = sample_mean - trunc_mean # (d, ) var_res = sample_var - trunc_var # (d, ) return np.concatenate((mean_res, var_res))
def _npcr(self, x): """ Return the pdf/cdf ratio of a standard normal RV evaluated at x. Parameters ---------- x : ndarray The values to evaluate the ratio at. Returns ------- npcr_values : ndarray The evaluated ratios. """ convert = x.dtype.type npcr_values = (2 / np.sqrt(2 * np.pi)) / convert( special.erfcx(-np.float_(x / np.sqrt(2)))) return npcr_values
def ln_diff_erfs(x1, x2, return_sign=False): """Function for stably computing the log of difference of two erfs in a numerically stable manner. :param x1 : argument of the positive erf :type x1: ndarray :param x2 : argument of the negative erf :type x2: ndarray :return: tuple containing (log(abs(erf(x1) - erf(x2))), sign(erf(x1) - erf(x2))) Based on MATLAB code that was written by Antti Honkela and modified by David Luengo and originally derived from code by Neil Lawrence. """ x1 = np.require(x1).real x2 = np.require(x2).real if x1.size==1: x1 = np.reshape(x1, (1, 1)) if x2.size==1: x2 = np.reshape(x2, (1, 1)) if x1.shape==x2.shape: v = np.zeros_like(x1) else: if x1.size==1: v = np.zeros(x2.shape) elif x2.size==1: v = np.zeros(x1.shape) else: raise ValueError("This function does not broadcast unless provided with a scalar.") if x1.size == 1: x1 = np.tile(x1, x2.shape) if x2.size == 1: x2 = np.tile(x2, x1.shape) sign = np.sign(x1 - x2) if x1.size == 1: if sign== -1: swap = x1 x1 = x2 x2 = swap else: I = sign == -1 swap = x1[I] x1[I] = x2[I] x2[I] = swap with np.errstate(divide='ignore'): # switch off log of zero warnings. # Case 0: arguments of different sign, no problems with loss of accuracy I0 = np.logical_or(np.logical_and(x1>0, x2<0), np.logical_and(x2>0, x1<0)) # I1=(x1*x2)<0 # Case 1: x1 = x2 so we have log of zero. I1 = (x1 == x2) # Case 2: Both arguments are non-negative I2 = np.logical_and(x1 > 0, np.logical_and(np.logical_not(I0), np.logical_not(I1))) # Case 3: Both arguments are non-positive I3 = np.logical_and(np.logical_and(np.logical_not(I0), np.logical_not(I1)), np.logical_not(I2)) _x2 = x2.flatten() _x1 = x1.flatten() for group, flags in zip((0, 1, 2, 3), (I0, I1, I2, I3)): if np.any(flags): if not x1.size==1: _x1 = x1[flags] if not x2.size==1: _x2 = x2[flags] if group==0: v[flags] = np.log( erf(_x1) - erf(_x2) ) elif group==1: v[flags] = -np.inf elif group==2: v[flags] = np.log(erfcx(_x2) -erfcx(_x1)*np.exp(_x2**2 -_x1**2)) - _x2**2 elif group==3: v[flags] = np.log(erfcx(-_x1) -erfcx(-_x2)*np.exp(_x1**2 -_x2**2))-_x1**2 # TODO: switch back on log of zero warnings. if return_sign: return v, sign else: if v.size==1: if sign==-1: v = v.view('complex64') v += np.pi*1j else: # Need to add in a complex part because argument is negative. v = v.view('complex64') v[I] += np.pi*1j return v
def differfln(x0, x1): # this is a, hopefully!, a numerically more stable variant of log(erf(x0)-erf(x1)) = log(erfc(x1)-erfc(x0)). return np.where(x0>x1, -x1*x1 + np.log(erfcx(x1)-np.exp(-x0**2+x1**2)*erfcx(x0)), -x0*x0 + np.log(np.exp(-x1**2+x0**2)*erfcx(x1) - erfcx(x0)))
def _erfcx_cpu(x, dtype): from scipy import special return special.erfcx(x).astype(dtype)
def normcdfln(x): return np.where(x < 0, -.5*x*x + np.log(.5) + np.log(erfcx(-x/np.sqrt(2))), np.log(normcdf(x)))
def integrand(u): """Integrand of self-consistency equation""" return erfcx(-u)