def test_lambertw_ufunc_loop_selection(): # see https://github.com/scipy/scipy/issues/4895 dt = np.dtype(np.complex128) assert_equal(lambertw(0, 0, 0).dtype, dt) assert_equal(lambertw([0], 0, 0).dtype, dt) assert_equal(lambertw(0, [0], 0).dtype, dt) assert_equal(lambertw(0, 0, [0]).dtype, dt) assert_equal(lambertw([0], [0], [0]).dtype, dt)
def minimum_Ns(x,arg='mutation'): if arg=='variance': var = x guess = sqrt(2*var/lambertw(2*var)).real return newton(lambda x: condition(x,var), guess) elif arg=='mutation': NUd = x guess = NUd/lambertw(NUd).real return newton(lambda x: condition(x,NUd*x), guess)
def find_N(spar, cond_num): """ Find the datasize for which the HHL is more convenient wrt the Conjugate Gradient, according to Lambert W solution""" from scipy.special import lambertw import numpy as np w = lambertw(-np.log(2) / (spar * cond_num * np.sqrt(cond_num)), k=-1) return (-spar * cond_num * np.sqrt(cond_num) * w / np.log(2))
def calculate_delta_hoeffding(t_bound_coeff, W, K, alpha): delta_term_coeff = t_bound_coeff * K * 2 # this * log(1/delta) * delta is the result of union bound over time and arms alpha_term = alpha / delta_term_coeff # this must equal log(1/delta) * delta log_delta = np.real( lambertw(-alpha_term, -1) ) # two real solutions on branches 0 and -1, the one with larger delta requires t < 1 so use smaller assert (log_delta < np.real(lambertw(-alpha_term, 0))) delta = math.exp(log_delta) print("delta:", delta) max_t = alpha / (delta * 2 * K) assert (max_t >= 20) print("t cannot be more than", max_t) return delta
def exp_prof(r, totflux=1, Reff=1): from scipy.special import lambertw #the parenthetical below is non-trivial, but it's ~1.67835 rs = Reff / -(lambertw(-1/2/np.exp(1),-1).real+1.) rs = Reff/1.67835 # print rs return totflux*np.exp(-r/rs)/np.pi/2./rs/rs
def __init__(self, dens_gen): self.dgen = dens_gen cen = self.dgen.size // 2 # Maximum res_edge for LLM limits self.res_max = float(1. / max(self.dgen.qrad[0, cen[1], cen[2]], self.dgen.qrad[cen[0], 0, cen[2]], self.dgen.qrad[cen[0], cen[1], 0])) # u-vectors for LLM uvox = np.linalg.inv(self.dgen.qvox.T) / self.dgen.size #x, y, z = self.dgen.x, self.dgen.y, self.dgen.z x, y, z = np.indices(self.dgen.qrad.shape) x -= self.dgen.qrad.shape[0] // 2 y -= self.dgen.qrad.shape[1] // 2 z -= self.dgen.qrad.shape[2] // 2 self.urad = cp.array( np.linalg.norm(np.dot(uvox, np.array([x, y, z]).reshape(3, -1)), axis=0).reshape(x.shape)) self.abc = tuple([ int(_) for _ in self.dgen.config.get( 'parameters', 'rlatt_vox', fallback='0 0 0').split() ]) self.slimits = [ np.real( np.sqrt( special.lambertw( -(1.e-3 * special.factorial(n))**(1. / n) / n, k=0)) * np.sqrt(n) * -1j) for n in range(1, 150) ] self.slimits = np.array(self.slimits)
def depth_dependent_clearance(I, k, c, swim_speed, min_vis): """ k specifies the attenueation rate, c is the light detection threshold, I specifies the light level""" D = 2 * special.lambertw(1 / 2 * k**2 * np.sqrt( (c * I + c * k) / (I * k**2)) * (1 / c - k / (c * (I + k)))) beta = swim_speed * D**2 + swim_speed * min_vis**2 return beta
def F(v=None,z=None): if v is None: return 0,matrix(0.0,(m,1)) u = -D.T*v #compute u #===define some auxilary variables=== Y2=mul(ki,Y**2) z1=u-ki+rho*alpha W=matrix(np.real( lambertw(mul((Y2/rho),exp(-z1/rho))) )) h_opt= W +(z1/rho) dh_to_du=(1/rho)*(div(1,1+W)) z2=mul(Y2,exp(-h_opt)) z3=z2+z1-rho*h_opt #===define some auxilary variables=== #====compute f=== f=sum( mul(u,h_opt)-mul(ki,h_opt)-z2-(rho/2)*(h_opt-alpha)**2 ) #====compute f=== #===compute Jacobian=== df_to_du=h_opt+mul(dh_to_du,z3) Df = -df_to_du.T*D.T if z is None: return f, Df #===compute Jacobian=== #===compute Hessian=== d2h_to_du2=(1/rho**2)*div(W,(1+W)**3) d2f_to_du2=mul(d2h_to_du2,z3)+mul(dh_to_du,2-mul(z2+rho,dh_to_du)) H=D*spdiag(mul(z[0],d2f_to_du2))*D.T #===compute Hessian=== return f, Df, H
def q_(sb, sd, u, k, i): mk = m_(sb, sd, u, k) q_kk = (-1. / mk) * lambertw(-mk * np.exp(-mk)).real if i > k: return 1. if k == i: return q_kk else: m_i = m_(sb, sd, u, i) q_ki = (-1 / m_i) * lambertw(-m_i * np.exp( -m_i * (1 + sum([((u**(j - i)) / factorial( (j - i), exact=True)) * (1 - q_(sb, sd, u, k, int(j))) for j in np.arange(i + 1, k + 1, step=1)])))).real return q_ki
def rho_calculator(av_w,agg=False,indist=False,M=1,**kwargs): """ Computes the lagrange multiplier for the case of fixed k and a desired average existing weight av_w = T/<E> (where T is the total number of events). It does so by inverting the equation: <t | t>0 > (rho) -> rho(av_w) according to each case. Input: av_w: total number of events divided by total number of binary edges. agg: Set to true to analyze the case of aggregation of binary networks indist: Set to true to analyze the case of weighted networks M: Number of aggregated layers (ignored for the ME case) Output: rho: Lagrange multiplier for the generation of random network ensembles. """ av_w = 1.0*av_w if not indist and not agg: # case ME, fully analytical from scipy import special x = -av_w*np.exp(-av_w) rho = (special.lambertw(x,0)+av_w).real else: from scipy import optimize as opt if indist: if M==1: # fully analytical rho = 1. - 1/av_w else: # must solve equation numerically rho = opt.brentq(rho_ZINB,1e-16,1-1e-14,args=(av_w,M),**kwargs) else: if M==1: # fully analytical rho = 1 #pretty absurd, it is a binary network! else: # must solve equation numerically rho = opt.brentq(rho_ZIB,1e-12,2e20,args=(av_w,M),**kwargs) return rho
def get_min_d_n(self): for n in range(self.number_of_user): w = (self.tasks[n].local_to_remote_size * math.log(2) / (self.W * self.tasks[n].DAG.D / 100)) - 1 d = 0.000005 x = d * math.pow(self.tasks[n].H[self.id], 2) / ( self.get_ch_number(n) * self.N_0 * np.exp(1)) - 1 / np.exp(1) current = lambertw(x, 0) while current.real < w: d = d + 0.000005 x = d * math.pow(self.tasks[n].H[self.id], 2) / (self.get_ch_number(n) * self.N_0 * np.exp(1)) - 1 / np.exp(1) current = lambertw(x, 0) # print(d, current.real, w) self.d_n_min[n] = d
def dimension_uniform_sphere(py, alphas): ''' %Gives an estimation of the dimension of uniformly sampled n-sphere %corresponding to the average probability of being inseparable and a margin %value % %Inputs: % py - average fraction of data points which are INseparable. % alphas - set of values (margins), must be in the range (0;1) % It is assumed that the length of py and alpha vectors must be of the % same. % %Outputs: % n - effective dimension profile as a function of alpha % n_single_estimate - a single estimate for the effective dimension % alfa_single_estimate is alpha for n_single_estimate. ''' if len(py) != len(alphas[0, :]): raise ValueError('length of py (%i) and alpha (%i) does not match' % (len(py), len(alphas[0, :]))) if np.sum(alphas <= 0) > 0 or np.sum(alphas >= 1) > 0: raise ValueError([ '"Alphas" must be a real vector, with alpha range, the values must be within (0,1) interval' ]) #Calculate dimension for each alpha n = np.zeros((len(alphas[0, :]))) for i in range(len(alphas[0, :])): if py[i] == 0: #All points are separable. Nothing to do and not interesting n[i] = np.nan else: p = py[i] a2 = alphas[0, i]**2 w = np.log(1 - a2) n[i] = np.real(lambertw(-(w / (2 * np.pi * p * p * a2 * (1 - a2))))) / (-w) n[n == np.inf] = float('nan') #Find indices of alphas which are not completely separable inds = np.where(~np.isnan(n))[0] if len(inds) == 0: warnings.warn( 'All points are fully separable for any of the chosen alphas') return n, np.array([np.nan]), np.nan #Find the maximal value of such alpha alpha_max = max(alphas[0, inds]) #The reference alpha is the closest to 90 of maximal partially separable alpha alpha_ref = alpha_max * 0.9 k = np.where( abs(alphas[0, inds] - alpha_ref) == min(abs(alphas[0, :] - alpha_ref)))[0] #Get corresponding values alfa_single_estimate = alphas[0, inds[k]] n_single_estimate = n[inds[k]] return n, n_single_estimate, alfa_single_estimate
def current(self, volts, temp, flux): V = volts T = temp k = self.k q = self.q n = self.n I_L = self.isc * flux / 1000 V_T = k * T / q ns = self.nseries # number of cells in series Vc = V / ns # cell voltage Rs = self.Rs Rsh = self.Rsh I0 = self.a_parameter * self.area * exp(-self.eg / V_T) if Rs == 0: Iout = I_L - I0 * (exp(Vc / (n * V_T)) - 1) - Vc / Rsh else: term1 = ((I_L + I0) - Vc / Rsh) / (1 + Rs / Rsh) term2 = -n * V_T / Rs term3 = I0 * Rs / (n * V_T * (1 + Rs / Rsh)) term4 = Vc / (n * V_T) term5 = (1 - Rs / (Rs + Rsh)) term6 = (I_L + I0) * Rs / (n * V_T * (1 + Rs / Rsh)) Iout = term1 + term2 * np.real( lambertw(term3 * exp(term4 * term5 + term6))) return Iout
def _rand_pdf(n_in, em_gain, x_max, size): """Draw samples from the EM gain distribution.""" x = np.random.random(size) # Use exact solutions for n_in == 1 and 2 if n_in == 1: n_out = -em_gain * np.log(1 - x) elif n_in == 2: n_out = -em_gain * special.lambertw( (x - 1) / np.exp(1), -1).real - em_gain else: # For n > 2 use CDF approximation # Use x values ranging from 0 to maximum allowable x output x_axis = np.arange(0, x_max).astype(float) x_axis[0] = np.finfo(float).eps # Use epsilon to avoid divide by 0 cdf = _get_cdf(n_in, em_gain, x_axis) if cdf is None: # If cdf maxes out, return maximum value n_out = np.ones_like(x) * x_max else: # Draw random samples from the CDF cdf_lookups = (cdf.max() - cdf.min()) * x + cdf.min() n_out = x_axis[np.searchsorted( cdf, cdf_lookups)] # XXX This could be made more accurate return np.round(n_out)
def function_h_minus_one(x): r""" The inverse function of :math:`h(u)`, that is :math:`h^{-1}(x) = u \Leftrightarrow h(u) = x`. It is given by the Lambert W function, see :func:`scipy.special.lambertw`: .. math:: h^{-1}(x) = - \mathcal{W}(- \exp(-x)). - Example: >>> np.random.seed(105) >>> y = np.random.randn() ** 2 >>> print(f"y = {y}") y = 0.060184682907834595 >>> x = function_h(y) >>> print(f"h(y) = {x}") h(y) = 2.8705220786966508 >>> z = function_h_minus_one(x) >>> print(f"h^-1(x) = {z}") h^-1(x) = 0.060184682907834595 >>> assert np.isclose(z, y), "Error: h^-1(h(y)) = z = {z} should be very close to y = {}...".format(z, y) """ if x <= 1: raise ValueError( "Error: the function h inverse only accepts values larger than 1, not x = {}" .format(x)) sol = root_scalar(lambda u: function_h(u) - x, x0=x, x1=2 * x) if sol.converged: return sol.root else: z = -lambertw(-exp(-x)) return z.real
def solve_chareq_rate_boxcar(branch, k, tau, W_rate, width, delay): """ Solve the characteristic equation for the linearized rate model for one branch analytically. Requires a spatially organized network with boxcar connectivity profile. Parameters: ----------- branch: float Branch number. k: float Wavenumber in 1/mm. tau: float Time constant from fit in s. W_rate: np.ndarray Weights from fit. width: np.ndarray Spatial widths of boxcar connectivtiy profile in mm. delay: float Delay in s. Returns: -------- eigenval: complex delay, tau must be floats, W, width is vector """ M = W_rate * p_hat_boxcar(k, width) xi = determinant(M) eigenval = -1./tau + 1./delay * \ lambertw(xi * delay/tau * np.exp(delay/tau), branch) return eigenval
def d_time_to_ground(initial_velocity, initial_position): K = (initial_velocity.z - A) / DECAY const_A = (K - FPS * (initial_position.z - BALL_RADIUS)) / A W = lambertw((DECAY * K * math.e**(DECAY * const_A)) / A) ret = const_A - (W / DECAY) return ret.real / FPS
def envelope_circuit(signal, cap=220e-12, res=50): v_c = 0 v_out = [] r_d = 25 i_s = 3e-6 n = 1.06 v_t = 26e-3 charge_exp = np.exp(-signal.dt/(res*cap)) discharge = i_s*res*(1-charge_exp) lambert_factor = n*v_t*res/r_d*(1-charge_exp) frac = i_s*r_d/n/v_t lambert_exponent = np.log(frac) + frac for v_in in signal.values: a = lambert_exponent + (v_in - v_c)/n/v_t if a>100: b = np.log(a) lambert_term = a - b + b/a else: lambert_term = np.real(lambertw(np.exp(a))) if np.isnan(lambert_term): lambert_term = 0 v_c = v_c*charge_exp - discharge + lambert_factor*lambert_term v_out.append(v_c) return pyrex.Signal(signal.times, v_out, value_type=pyrex.Signal.ValueTypes.voltage)
def calculate_jcell(self, vcell, params): """ calculate current density as a function of cell voltage (vcell) according using using the exact expression of the jV curves Params: jph -- photocurrent density (A/cm^2) jnot -- dark-saturation current density (A/cm^2) Rs -- series resistance (Ohm.cm^2) Rsh -- shunt resistance (Ohm.cm^2) returns cell current density (jcell in A/cm^2). """ qE = CHARGE_ELEM kB = BOLTZMANN_CONST T = self.temperature # unpack other parameters jph, jnot, m, Rs, Rsh = params # calculate jcell as a function of vcell through exact analytical # expression of diode model equation jcell = (qE * vcell + (-lambertw(-qE * Rs * jnot * Rsh * \ np.exp(Rsh * qE * (Rs * jph+Rs*jnot-vcell) / (m * kB * \ T * (Rsh + Rs))) / (-Rs * m * kB * T-Rsh * m * kB * T)).real + \ Rsh * qE * (Rs * jph + Rs * jnot - vcell)/(m*kB * T * \ (Rsh + Rs))) * m * kB * T) / (qE * Rs) return jcell
def rhs_sce_alpha(self, w, adim=False): ''' Right-hand side of the self-coherent equation for w in the alpha model. ''' w_tmp = w if not adim: w_tmp = w/self.dimW if isinstance(w, float) else w.copy()/self.dimW w_min = self.w_minus(adim=True) cst = self.EL + self.Ie - self.Vr cst2 = self.c*self.k*np.e*self.ts if isinstance(w, float): if np.isclose(w, w_min): w_tmp = np.nextafter(w_min, w_min+1.) else: w_tmp[np.isclose(w_tmp, w_min)] = np.nextafter(w_min, w_min+1.) # test shift = self.EL + self.Ie - w_tmp Vd = shift + (self.Vr - shift)*np.exp(-self.d) # between d and d+4\tau_s I = self.c*self.k*np.e / 4. V_int = Vd + 4*self.ts*(self.Ie + I -Vd + self.EL - w_tmp) LW = np.real(lambertw(-np.exp(shift), -1)) DT, Vth = self._param["Delta_T"], self._param["V_th"] rhs = cst + np.exp(self.d)*(LW + cst2)/(4*self.ts-1) return rhs if adim else rhs*self.dimW
def estimate_library_complexity(nseq, ndup, nopticaldup=0): """Estimate library complexity accounting for optical/clustering duplicates Parameters ---------- nseq : int Total number of sequences ndup : int Total number of duplicates nopticaldup : int, optional Number of non-PCR duplicates, by default 0 Returns ------- float Estimated complexity """ nseq = nseq - nopticaldup if nseq == 0: logger.warning("Empty of fully duplicated library, can't estimate complexity") return 0 ndup = ndup - nopticaldup u = (nseq - ndup) / nseq if u == 0: logger.warning( "All the sequences are duplicates. Do you run complexity estimation on duplicates file?" ) return 0 seq_to_complexity = special.lambertw(-np.exp(-1 / u) / u).real + 1 / u complexity = float(nseq / seq_to_complexity) # clean np.int64 data type return complexity
def exp_prof(r, totflux=1, Reff=1): from scipy.special import lambertw #the parenthetical below is non-trivial, but it's ~1.67835 rs = Reff / -(lambertw(-1 / 2 / np.exp(1), -1).real + 1.) rs = Reff / 1.67835 # print rs return totflux * np.exp(-r / rs) / np.pi / 2. / rs / rs
def distance_before_contraction(self, x, d_2): # d_1 = self.beta_inv_(self.eta, self.circle.radius, self.gamma, d_2) l = lambertw( self.eta * self.gamma * np.exp(self.circle.radius - self.gamma * d_2)).real d_1 = l / self.gamma + d_2 return d_1
def computePSPnorm(tauMem, CMem, tauSyn): a = (tauMem / tauSyn) b = (1.0 / tauSyn - 1.0 / tauMem) t_max = 1.0 / b * (-lambertw(-exp(-1.0 / a) / a, k=-1).real - 1.0 / a) return exp(1.0) / (tauSyn * CMem * b) * ( (exp(-t_max / tauMem) - exp(-t_max / tauSyn)) / b - t_max * exp(-t_max / tauSyn))
def voltage(self, current, temp, flux): I = current T = temp k = self.k q = self.q n = self.n ns = self.nseries # number of cells in series V_T = k * T / q I_L = self.isc * flux / 1000 I0 = self.a_parameter * self.area * exp(-self.eg / V_T) Rs = self.Rs Rsh = self.Rsh term1 = (I_L + I0) * Rsh term2 = -I * (Rs + Rsh) term3 = -n * V_T term4 = I0 * Rsh / (n * V_T) term5 = (I_L + I0 - I) * Rsh / (n * V_T) try: Vout = ns * (term1 + term2 + term3 * np.real(lambertw(term4 * exp(term5)))) except: print('Ignoring shunt resistance') Vout = ns * (n * V_T * log((I_L - I) / I0 + 1) - I * Rs) return Vout
def my_i_from_v(resistance_shunt, resistance_series, nNsVth, voltage, saturation_current, photocurrent): ''' Internal function to estimate module output current at a given voltage. * Slightly adapted from PVLIB to better fit my use case TODO: Add citation to PVLib **Should be hidden** ''' # This transforms Gsh=1/Rsh, including ideal Rsh=np.inf into Gsh=0., which # is generally more numerically stable conductance_shunt = 1./resistance_shunt # Ensure that we are working with read-only views of numpy arrays # Turns Series into arrays so that we don't have to worry about # multidimensional broadcasting failing Gsh, Rs, a, V, I0, IL = (conductance_shunt, resistance_series, nNsVth, voltage, saturation_current, photocurrent) # LambertW argument, cannot be float128, may overflow to np.inf argW = Rs*I0/(a*(Rs*Gsh + 1.)) * np.exp((Rs*(IL + I0) + V) / (a*(Rs*Gsh + 1.))) # lambertw typically returns complex value with zero imaginary part # may overflow to np.inf lambertwterm = lambertw(argW).real # Eqn. 2 in Jain and Kapoor, 2004 # I = -V/(Rs + Rsh) - (a/Rs)*lambertwterm + Rsh*(IL + I0)/(Rs + Rsh) # Recast in terms of Gsh=1/Rsh for better numerical stability. I = (IL + I0 - V*Gsh) / (Rs*Gsh + 1.) - (a/Rs)*lambertwterm return I
def equations(p): v, f = p return ( ((p_delta * 2 * R * 2) / (length * density * f))**(1 / 2) - v, (1 / (0.838 * lambertw(0.629 * (2 * v * R * density) / viscosity)))**2 - f)
def incubation_time(sb, sd, u, B, infection_nb, c=400): kk = K(sb, sd, u) states = stateSpace(B, kk) n = states[infection_nb] for i in range(kk + 1): if n[i] != 0: k_0 = i break p_k = [clickProba(sb, sd, u, k, n) for k in range(k_0, kk + 1)] + [extinctionProb(sb, sd, u, n)] k = np.random.choice(list(range(k_0, kk + 1)) + [100], p=np.array(p_k) / sum(p_k)) mk = m_(sb, sd, u, k) qk = (-1. / mk) * lambertw(-mk * np.exp(-mk)).real if k == 100: return 0, k if k == k_0: n_t = n[k] prob_l = [] for l in range(1, n_t + 1, 1): prob_l.append((comb(n_t, l, exact=False) * ((1 - qk)**l) * (qk**(n_t - l))) / (1 - qk**n_t)) l = np.random.choice(range(1, n_t + 1), 1, p=prob_l)[0] Wk = np.random.gamma(shape=l, scale=1 / (1 - qk)) if Wk > c * np.exp(-u / sd): return 0, 100 return np.ceil((np.log(c) - np.log(Wk) - (u / sd)) / np.log(mk)), k elif k > k_0: sum_ = 0 for i in range(k_0, k + 1): sum_ += ((n[i] * m_(sb, sd, u, i) * (u**(k - i))) / factorial(k - i, exact=True)) * ( 1 + q_(sb, sd, u, k, i) * ((1 - q_(sb, sd, u, k, k)) / (q_(sb, sd, u, k - 1, i) - q_(sb, sd, u, k, i)))) n_t = int(np.ceil(sum_)) prob_l = [] for l in range(1, n_t + 1, 1): prob_l.append((comb(n_t, l, exact=False) * ((1 - qk)**l) * (qk**(n_t - l))) / (1 - qk**n_t)) l = np.random.choice(range(1, n_t + 1), 1, p=prob_l)[0] Wk = np.random.gamma(shape=l, scale=1 / (1 - qk)) if Wk > c * np.exp(-u / sd): return 0, 100 return np.ceil((np.log(c) - np.log(Wk) - (u / sd)) / np.log(mk)) + 1, k
def kEG1(beta, tau): k = np.real(-beta * lambertw(-np.exp(-(beta + tau) / beta), k=-1) - beta - tau) if np.isscalar(k): k = k if k > 0 else 0 else: k[k < 0] = 0 return k
def test_value(self): z = np.random.uniform(high=10, size=(100,)) ssw = ss.lambertw(z) w = lambertw(z) with tf.Session() as sess: w = sess.run(w) np.testing.assert_allclose(w, ssw)
def ionised_bondi(r): global r_ion, v2, cs_i, cs2_i, bondi_r_i, rho2 omega = -(v2 / cs_i)**2 * (r_ion / r)**4 * \ np.exp(4. * (bondi_r_i / r_ion - bondi_r_i / r) - v2**2 / cs_i**2) v = -cs_i * np.sqrt(-lambertw(omega, -1).real) rho = r_ion**2 * v2 * rho2 / r**2 / v P = cs2_i * rho return rho, v, P, rho * 0.
def get_tmax(self, p, cutoff=None): # approximate formula for tmax if cutoff is None: cutoff = self.cutoff rho = p[1] cS = p[2] k0rho = k0(rho) return lambertw(1 / ((1 - cutoff) * k0rho)).real * cS
def SM(x, km, vmax): ''' implementation of the Schnell-Mendoza equation using the scipy lambertw function ''' t = x[0] so = x[1] z = so / km * np.exp(so / km - vmax / km * t) return km * lambertw(z)
def lambertDecay(t, alpha, tau, sigma_21, n, n20): arg = -alpha*sigma_21*n20*exp(-(t+alpha*sigma_21*n20*tau)/tau) # Check that result is real assert min(arg) >= -1/e, \ 'Lambert W Argument will give an imaginary result.' return -lambertw(arg).real/(alpha*sigma_21)
def _evalf_(self, n, z, parent=None, algorithm=None): """ EXAMPLES:: sage: N(lambert_w(1)) 0.567143290409784 sage: lambert_w(RealField(100)(1)) 0.56714329040978387299996866221 SciPy is used to evaluate for float, RDF, and CDF inputs:: sage: lambert_w(RDF(1)) 0.5671432904097838 sage: lambert_w(float(1)) 0.5671432904097838 sage: lambert_w(CDF(1)) 0.5671432904097838 sage: lambert_w(complex(1)) (0.5671432904097838+0j) sage: lambert_w(RDF(-1)) # abs tol 2e-16 -0.31813150520476413 + 1.3372357014306895*I sage: lambert_w(float(-1)) # abs tol 2e-16 (-0.31813150520476413+1.3372357014306895j) """ R = parent or s_parent(z) if R is float or R is RDF: from scipy.special import lambertw res = lambertw(z, n) # SciPy always returns a complex value, make it real if possible if not res.imag: return R(res.real) elif R is float: return complex(res) else: return CDF(res) elif R is complex or R is CDF: from scipy.special import lambertw return R(lambertw(z, n)) else: import mpmath return mpmath_utils.call(mpmath.lambertw, z, n, parent=R)
def lambertloc(self, rss, numtx=0): """Inverse function of the RSM. Returns estimated range in [cm]. Keyword arguments: :param rss -- received power values [dB] :param numtx -- number of the tx which rss is processed. Required to use the corresponding alpha and gamma-values. """ z = 20 / (np.log(10) * self.__alpha[numtx]) * lambertw( np.log(10) * self.__alpha[numtx] / 20 * np.exp(-np.log(10) / 20 * (rss + self.__gamma[numtx]))) return z.real # [mm]
def lambertDecayPopInv(t, *p): """ Lifetime for population inversion regime. """ sigma_21 = 1 tau = 10 alpha, n20 = p arg = -alpha*sigma_21*n20*exp(-(t+alpha*sigma_21*n20*tau)/tau) return -lambertw(arg).real/(alpha*sigma_21)
def lambert_model(V, mu, eta): """ Model for diodes using lambert W function @param V: Voltage @param mu: Fit parameter @param eta: Fit parameter @return: Fit evaluated at V with parameters mu and eta """ return mu * lambertw(eta * V)
def invgamma(x): r""" Inverse gamma function. See: http://mathoverflow.net/a/28977 """ k = 1.461632 c = 0.036534 L = np.log((x+c)/np.sqrt(2*np.pi)) W = special.lambertw(L/np.exp(1)) return L/W + 0.5
def lambertDecay(t, alpha, tau, sigma_12, sigma_21, n, n20): B = 1 + alpha*sigma_12*n A = (alpha*(sigma_12+sigma_21)) / B arg = -A*n20*exp(-(t/(B*tau))-(A*n20)) # Check that result is real assert min(arg) >= -1/e, \ 'Lambert W Argument will give an imaginary result.' return -lambertw(arg,-1).real/A
def distance_to_base(self,percentage): #here is the calibration code that may use self.parameters # The inverse function is I = 1/(b*c)(-c*W((a*b/c)*exp((b/c)*P+a))+b/P+ba) # Where W is the product log (Lambert W) function amps = 1.0/(self.parameters['b']*self.parameters['c']) \ *(-self.parameters['c']*lambertw((self.parameters['a']*self.parameters['b']/self.parameters['c'])*exp((self.parameters['b']/self.parameters['c'])*(percentage+self.parameters['a']))) \ +self.parameters['b']*percentage \ +self.parameters['b']*self.parameters['a']) volts = amps/ self.parameters['current_cal'] return (volts > 0) * abs(volts)
def lambertDecay(t, tau, sigma_12, sigma_21, n, n20, r, rho, d): # alpha = r/2 alpha = (1+r/2) A = alpha*rho*d*(sigma_12+sigma_21) B = 1 + alpha*rho*d*sigma_12*n x = (t/(B*tau)) + (A*n20) arg = -A*n20*exp(-x)/B # Check that result is real assert min(arg) >= -1/e, \ 'Lambert W Argument will give an imaginary result.' return -B*lambertw(arg).real/A
def JVdiode(vv, Rs, Rp, m, i0): kT = __main__.kT log_ii_teo = [] for v in vv: r = (v-i0*Rp)/(Rp+Rs) a0 = (Rp+Rs)/(i0*Rp)*np.exp(-v/(m*kT)) c = Rs/(m*kT) z = c/a0*np.exp(-c*r) i = r+(1/c)*special.lambertw(z) if(abs(i.real) > abs(i0)): log_ii_teo.append(np.log10(abs(i.real))) else: log_ii_teo.append(np.log10(i0)) nlog_ii_teo=np.array(log_ii_teo) return nlog_ii_teo
def main(): parser = argparse.ArgumentParser( description="Estimates parameter for scheduled sampling.") parser.add_argument("--value", type=float, required=True, help="The value the threshold should achieve.") parser.add_argument("--step", type=int, required=True, help="Step when you want to achieve the value.") args = parser.parse_args() x = args.step c = args.value coeff = c * np.exp(lambertw((1 - c) / c * x)) / (1 - c) print(coeff.real)
def shell_max(amplitude, radius, ratio): ''' Compute the amplitude of the gaussians given the requested amplitude of the shell. ''' small_rad = radius large_rad = ratio * radius # Solve for the position of the maximum. Verified with Wolfram Alpha. lamb_arg = (small_rad*large_rad)**2 / (large_rad**2 - small_rad**2) x = 0.5*(small_rad * large_rad) * \ np.sqrt(lambertw(lamb_arg).real / (large_rad**2 - small_rad**2)) func_val = np.exp(-0.5 * x**2 / large_rad**2) - \ np.exp(-0.5 * x**2 / small_rad**2) return amplitude / func_val
def max_energy_to_temperature(energy): """ Converts the peak energy to the peak. This formula is derived noting that the funcional dependence is in the term x^2/(Exp[x]-1) where x=E/kT. From mathematatica, we find: In[0]:= ArgMax[x^2/(Exp[x] - 1), x] Out[13]= 2 + ProductLog[-(2/E^2)] Which is approximately 1.59362 So the peak energy is at x ~ 1.6, or kT~E/1.6 Implementation Note: ProductLog in Mathematica is the Lambert W function (scipy.special.lambertw) """ print 'fix documentation' return energy/float(2 + lambertw(-2/exp(2)))
def I_from_V(Rsh, Rs, nNsVth, V, I0, IL): ''' calculates I from V per Eq 2 Jain and Kapoor 2004 uses Lambert W implemented in wapr_vec.m Rsh, nVth, V, I0, IL can all be DataFrames Rs can be a DataFrame, but should be a scalar ''' try: from scipy.special import lambertw except ImportError: raise ImportError('The I_from_V function requires scipy') argW = Rs*I0*Rsh*np.exp(Rsh*(Rs*(IL+I0)+V)/(nNsVth*(Rs+Rsh)))/(nNsVth*(Rs + Rsh)) inputterm = lambertw(argW) # Eqn. 4 in Jain and Kapoor, 2004 I = -V/(Rs + Rsh) - (nNsVth/Rs) * inputterm + Rsh*(IL + I0)/(Rs + Rsh) return I.real
def lambertDecay(t, *p): """ Lifetime for general regime. """ alpha, n20 = p tau = 10 n = 1 sigma_12 = 1 sigma_21 = 1 B = 1 + alpha*sigma_12*n A = (alpha*(sigma_12+sigma_21)) / B arg = -A*n20*exp(-(t/(B*tau))-(A*n20)) # Check that result is real assert min(arg) >= -1/e, \ 'Lambert W Argument will give an imaginary result.' return -lambertw(arg).real/A
def __init__(self,m,n,iterations): self.n = n self.m = m self.iterations=iterations phi0 = 1.*m/n smallPores = [m] for i in range(1,iterations): if (uniform(0,smallPores[-1] + n) < n): smallPores.append(smallPores[-1]) else: smallPores.append(smallPores[-1]-1) self.smallPores = array(smallPores) self.simphi = double(smallPores)/n T = 1.0*iterations/n simphi = double(smallPores)/n self.simp = simphi/(1.+simphi) self.sims = r_[0:T:iterations*1.j] self.s = r_[0:T:1000j] c = log(phi0*exp(phi0)) self.phi = lambertw(exp(-self.s+c)).real self.p = self.phi/(1.+self.phi)
def solvfun(x,y,z): # help function to simulate default times return np.real((x*z-y*z+spec.lambertw(y*z*np.exp(-x*z+y*z)))/z);
def test_ufunc(): assert_array_almost_equal( lambertw(r_[0., e, 1.]), r_[0., 1., 0.567143290409783873])
def w(x, y): return lambertw(x, y.real.astype(int))
def test_values(): assert isnan(lambertw(nan)) assert_equal(lambertw(inf,1).real, inf) assert_equal(lambertw(inf,1).imag, 2*pi) assert_equal(lambertw(-inf,1).real, inf) assert_equal(lambertw(-inf,1).imag, 3*pi) assert_equal(lambertw(1.), lambertw(1., 0)) data = [ (0,0, 0), (0+0j,0, 0), (inf,0, inf), (0,-1, -inf), (0,1, -inf), (0,3, -inf), (e,0, 1), (1,0, 0.567143290409783873), (-pi/2,0, 1j*pi/2), (-log(2)/2,0, -log(2)), (0.25,0, 0.203888354702240164), (-0.25,0, -0.357402956181388903), (-1./10000,0, -0.000100010001500266719), (-0.25,-1, -2.15329236411034965), (0.25,-1, -3.00899800997004620-4.07652978899159763j), (-0.25,-1, -2.15329236411034965), (0.25,1, -3.00899800997004620+4.07652978899159763j), (-0.25,1, -3.48973228422959210+7.41405453009603664j), (-4,0, 0.67881197132094523+1.91195078174339937j), (-4,1, -0.66743107129800988+7.76827456802783084j), (-4,-1, 0.67881197132094523-1.91195078174339937j), (1000,0, 5.24960285240159623), (1000,1, 4.91492239981054535+5.44652615979447070j), (1000,-1, 4.91492239981054535-5.44652615979447070j), (1000,5, 3.5010625305312892+29.9614548941181328j), (3+4j,0, 1.281561806123775878+0.533095222020971071j), (-0.4+0.4j,0, -0.10396515323290657+0.61899273315171632j), (3+4j,1, -0.11691092896595324+5.61888039871282334j), (3+4j,-1, 0.25856740686699742-3.85211668616143559j), (-0.5,-1, -0.794023632344689368-0.770111750510379110j), (-1./10000,1, -11.82350837248724344+6.80546081842002101j), (-1./10000,-1, -11.6671145325663544), (-1./10000,-2, -11.82350837248724344-6.80546081842002101j), (-1./100000,4, -14.9186890769540539+26.1856750178782046j), (-1./100000,5, -15.0931437726379218666+32.5525721210262290086j), ((2+1j)/10,0, 0.173704503762911669+0.071781336752835511j), ((2+1j)/10,1, -3.21746028349820063+4.56175438896292539j), ((2+1j)/10,-1, -3.03781405002993088-3.53946629633505737j), ((2+1j)/10,4, -4.6878509692773249+23.8313630697683291j), (-(2+1j)/10,0, -0.226933772515757933-0.164986470020154580j), (-(2+1j)/10,1, -2.43569517046110001+0.76974067544756289j), (-(2+1j)/10,-1, -3.54858738151989450-6.91627921869943589j), (-(2+1j)/10,4, -4.5500846928118151+20.6672982215434637j), (pi,0, 1.073658194796149172092178407024821347547745350410314531), # Former bug in generated branch, (-0.5+0.002j,0, -0.78917138132659918344 + 0.76743539379990327749j), (-0.5-0.002j,0, -0.78917138132659918344 - 0.76743539379990327749j), (-0.448+0.4j,0, -0.11855133765652382241 + 0.66570534313583423116j), (-0.448-0.4j,0, -0.11855133765652382241 - 0.66570534313583423116j), ] data = array(data, dtype=complex_) def w(x, y): return lambertw(x, y.real.astype(int)) FuncData(w, data, (0,1), 2, rtol=1e-10, atol=1e-13).check()
def computePSPnorm(tauMem, CMem, tauSyn): a = (tauMem / tauSyn) b = (1.0 / tauSyn - 1.0 / tauMem) t_max = 1.0 / b * (-lambertw(-exp(-1.0 / a) / a, k=-1).real - 1.0 / a) return exp(1.0) / (tauSyn * CMem * b) * ((exp(-t_max / tauMem) - exp(-t_max / tauSyn)) / b - t_max * exp(-t_max / tauSyn))
def planckPeak(T): # [J] energy location of peak in Planck function return np.real((5 + special.lambertw(-5 / np.e ** 5)) * sp.k * T) # [J]
def test_lambertw(self): assert_mpmath_equal(lambda x, k: sc.lambertw(x, int(k)), lambda x, k: mpmath.lambertw(x, int(k)), [Arg(), IntArg(0, 10)])
def __init__(self, T, beta, lambda0, alpha, fnorm, wavenorm=500.0, noalpha=False, opthin=False): """Initializer Parameters: ----------- T : float Temperature/(1+z) in K beta : float Extinction slope lambda0 : float Wavelength where emission becomes optically thick * (1+z), in microns alpha : float Blue side power law slope fnorm : float Normalization flux, in mJy wavenorm : float Wavelength of normalization flux, in microns (def: 500) noalpha : bool Do not use blue side power law opthin : bool Assume emission is optically thin """ self._T = float(T) self._beta = float(beta) if bool(noalpha): self._hasalpha = False self._alpha = None else: self._hasalpha = True self._alpha = float(alpha) self._fnorm = float(fnorm) self._wavenorm = float(wavenorm) if bool(opthin): self._opthin = True self._lambda0 = None else: self._opthin = False self._lambda0 = float(lambda0) if self._hasalpha and alpha <= 0.0: errmsg = "alpha must be positive. You gave: {:.5g}" raise ValueError(errmsg.format(self._alpha)) if self._beta < 0.0: errmsg = "beta must be non-negative. You gave: {:.5g}" raise ValueError(errmsg.format(self._beta)) # Some constants -- eventually, replace these with # astropy.constants, but that is in development, so hardwire for now self._hcokt = h * c / (k * self._T) # Convert wavelengths to x = h nu / k T if not self._opthin: self._x0 = self._hcokt / lambda0 self._xnorm = self._hcokt / self._wavenorm # Two cases -- optically thin and not. # Each has two sub-cases -- with power law merge and without if self._opthin: if not self._hasalpha: # No merge to power law, easy self._normfac = self._fnorm * math.expm1(self._xnorm) / \ self._xnorm**(3.0 + beta) else: # First, figure out the x (frequency) where the join # happens. At frequencies above this (x > xmarge) # are on the blue, alpha power law side # The equation we are trying to find the root for is: # x - (1-exp(-x))*(3+alpha+beta) # Amazingly, this has a special function solution # A = (3+alpha+beta) # xmerge = A + LambertW[ -A Exp[-A] ] # This has a positive solution for all A > 1 -- and since # we require alpha and beta > 0, this is always the case a = 3.0 + self._alpha + self._beta self._xmerge = a + lambertw(-a * math.exp(-a)).real # Get merge constant -- note this is -before- flux # normalization to allow for the case where wavenorm is # on the power law part self._kappa = self._xmerge**(3.0 + self._alpha + self._beta) / \ math.expm1(self._xmerge) # Compute normalization constant if self._xnorm > self._xmerge: self._normfac = self._fnorm * self._xnorm**self._alpha / \ self._kappa else: self._normfac = self._fnorm * math.expm1(self._xnorm) / \ self._xnorm**(3.0 + self._beta) else: #Optically thick case if not self._hasalpha: self._normfac = - self._fnorm * math.expm1(self._xnorm) / \ (math.expm1(-(self._xnorm / self._x0)**self._beta) * self._xnorm**3) else: # This is harder, and does not have a special function # solution. Hence, we have to do this numerically. # The equation we need to find the root for is given by # alpha_merge_eqn. # First, we bracket. For positive alpha, beta # we expect this to be negative for small a and positive # for large a. We try to step out until we achieve that maxiters = 100 a = 0.1 aval = alpha_merge_eqn(a, self._alpha, self._beta, self._x0) iter = 0 while aval >= 0.0: a /= 2.0 aval = alpha_merge_eqn(a, self._alpha, self._beta, self._x0) if iter > maxiters: errmsg = "Couldn't bracket low alpha merge point for "\ "T: {:f} beta: {:f} lambda0: {:f} "\ "alpha {:f}, last a: {:f} value: {:f}" raise ValueError(errmsg.format(self._T, self._beta, self._lambda0, self._alpha, a, aval)) iter += 1 b = 15.0 bval = alpha_merge_eqn(b, self._alpha, self._beta, self._x0) iter = 0 while bval <= 0.0: b *= 2.0 bval = alpha_merge_eqn(b, self._alpha, self._beta, self._x0) if iter > maxiters: errmsg = "Couldn't bracket high alpha merge point "\ "for T: {:f} beta: {:f} lambda0: {:f} "\ "alpha {:f}, last a: {:f} value: {:f}" raise ValueError(errmsg.format(self._T, self._beta, self._lambda0, self._alpha, a, aval)) iter += 1 # Now find root args = (self._alpha, self._beta, self._x0) self._xmerge = scipy.optimize.brentq(alpha_merge_eqn, a, b, args=args, disp=True) #Merge constant # Note this will overflow and crash for large xmerge, alpha self._kappa = - self._xmerge**(3 + self._alpha) * \ math.expm1(-(self._xmerge / self._x0)**self._beta) / \ math.expm1(self._xmerge) #Normalization factor if self._xnorm > self._xmerge: self._normfac = self._fnorm * self._xnorm**self._alpha / \ self._kappa else: expmfac = math.expm1(-(self._xnorm / self._x0)**self._beta) self._normfac = -self._fnorm * math.expm1(self._xnorm) / \ (self._xnorm**3 * expmfac)
def test_lambertw(self): # python-list/2010-December/594592.html xxroot = fixed_point(lambda xx: np.exp(-2.0*xx)/2.0, 1.0, args=(), xtol=1e-12, maxiter=500) assert_allclose(xxroot, np.exp(-2.0*xxroot)/2.0) assert_allclose(xxroot, lambertw(1)/2)