def setTransitionsFork(dimension): """ setting transitions in the state space """ space_size = np.power(2, dimension) transition = np.ndarray(shape=(space_size, space_size), dtype=bool) transition.fill(False) state1 = [0 for i in range(dimension)] state2 = state1[:] for i in range(dimension-1): state2[1+i] = 1 state1[0] = 1 state2[0] = 1 transition[st2Ind(state1)][st2Ind(state2)] = True # forward transition transition[st2Ind(state2)][st2Ind(state1)] = True # backward transition state1 = state2[:] state1 = [0 for i in range(dimension)] state2 = state1[:] for i in range(dimension-1): state2[dimension-i-1] = 1 state1[0] = 1 state2[0] = 1 transition[st2Ind(state1)][st2Ind(state2)] = True # forward transition transition[st2Ind(state2)][st2Ind(state1)] = True # backward transition state1 = state2[:] transition[0][np.power(2, (dimension-1))] = True transition[np.power(2, (dimension-1))][0] = True print 'Fork transitions' printTransitions(transition, dimension) return transition
def DiffFunction(self, x, param): Sp, alpha, beta, Ta = param if x < Ta: return 0 x2 = x - Ta y = Sp * scipy.power((scipy.e / (alpha*beta)), alpha) * (alpha * scipy.power(x2, alpha - 1) * scipy.exp(-x2/beta) - scipy.power(x2, alpha) * scipy.exp(-x2/beta) / beta) return y
def Function(self, x, param): Sp, alpha, beta, Ta = param S0 = self.CalcS0(Ta) x2 = (scipy.greater_equal(x, Ta) * (x - Ta)) #y = Sp * numpy.abs(scipy.power((scipy.e / (alpha*beta)), alpha)) * numpy.abs(scipy.power(x2, alpha)) * scipy.exp(-x2/beta) + S0 y = Sp * numpy.abs(scipy.power((scipy.e / (alpha*beta)), alpha) * scipy.power(x2, alpha) * scipy.exp(-x2/beta)) + S0 return y
def calc_volume(self): """calculate the volume over which the compound can move. We have Cavg = mass/volume """ return sp.sum((sp.power(self.grid_edge[1:],2) - sp.power(self.grid_edge[:-1],2)) ) * sp.pi
def backward_pass(self, a1L, a1R, a2L, a2LR, a2R, a3, z1Lb, z1LRb, z1Rb, z2b, xLb, xRb, t): # Third Layer if self.k == 2 : r3= -t*self.sigmoid(-t*a3) else : r3 = a3 - t grad3=sp.dot(r3,z2b.T) # Second Layer r3w3T = sp.dot(self.w3[:,:-1].T, r3) r2L=r3w3T*a2LR*self.sigmoid(a2R)*self.divsigmoid(a2L) r2R=r3w3T*a2LR*self.sigmoid(a2L)*self.divsigmoid(a2R) r2LR=r3w3T*self.sigmoid(a2L)*self.sigmoid(a2R) grad2L = sp.dot(r2L, z1Lb.T) grad2LR = sp.dot(r2LR, z1LRb.T) grad2R = sp.dot(r2R, z1Rb.T) # First Layer r1L = sp.power(1.0/sp.cosh(a1L),2)*(sp.dot(self.w2l[:,:-1].T, r2L)+sp.dot(self.w2lr[:,:self.H1].T, r2LR)) r1R = sp.power(1.0/sp.cosh(a1R),2)*(sp.dot(self.w2r[:,:-1].T, r2R)+sp.dot(self.w2lr[:,self.H1:-1].T, r2LR)) grad1L = sp.dot(r1L, xLb.T) grad1R = sp.dot(r1R, xRb.T) return grad3, grad2L, grad2LR, grad2R, grad1L, grad1R
def r_ion_neutral(s,t,Ni,Nn,Ti,Tn): """ This will calculate resonant ion - neutral reactions collision frequencies. See table 4.5 in Schunk and Nagy. Inputs s - Ion name string t - neutral name string Ni - Ion density cm^-3 Nn - Neutral density cm^-3 Ti - Ion tempreture K Tn - Neutral tempreture K Outputs nu_ineu - collision frequency s^-1 """ Tr = (Ti+Tn)*0.5 sp1 = (s,t) # from Schunk and Nagy table 4.5 nudict={('H+','H'):[2.65e-10,0.083],('He+','He'):[8.73e-11,0.093], ('N+','N'):[3.84e-11,0.063], ('O+','O'):[3.67e-11,0.064], ('N2+','N'):[5.14e-11,0.069], ('O2+','O2'):[2.59e-11,0.073], ('H+','O'):[6.61e-11,0.047],('O+','H'):[4.63e-12,0.],('CO+','CO'):[3.42e-11,0.085], ('CO2+','CO'):[2.85e-11,0.083]} A = nudict[sp1][0] B = nudict[sp1][1] if sp1==('O+','H'): nu_ineu = A*Nn*sp.power(Ti/16.+Tn,.5) elif sp1==('H+','O'): nu_ineu = A*Nn*sp.power(Ti,.5)*(1-B*sp.log10(Ti))**2 else: nu_ineu = A*Nn*sp.power(Tr,.5)*(1-B*sp.log10(Tr))**2 return nu_ineu
def __init__(self, *args, **kwargs): MultiModalFunction.__init__(self, *args, **kwargs) self._opts = (rand((self.numPeaks, self.xdim)) - 0.5) * 9.8 self._opts[0] = (rand(self.xdim) - 0.5) * 8 alphas = [power(self.maxCond, 2 * i / float(self.numPeaks - 2)) for i in range(self.numPeaks - 1)] shuffle(alphas) self._covs = [generateDiags(alpha, self.xdim, shuffled=True) / power(alpha, 0.25) for alpha in [self.optCond] + alphas] self._R = orth(rand(self.xdim, self.xdim)) self._ws = [10] + [1.1 + 8 * i / float(self.numPeaks - 2) for i in range(self.numPeaks - 1)]
def gvf(x, Sp, alpha, beta, Ta, S0): global scipy, numpy y = ( Sp * numpy.abs(scipy.power((scipy.e / (alpha * beta)), alpha)) * numpy.abs(scipy.power((x - Ta), alpha)) * scipy.exp(-(x - Ta) / beta) + S0 ) return y
def freqs_b_by_freqs(numFilters, lowFreq, highFreq, c, d, order = 1): # Find the center frequencies of the filters from the begin and end frequencies EarQ = c minBW = d vec = scipy.arange(numFilters, 0, -1) freqs = -(EarQ*minBW) + scipy.exp(vec*(-scipy.log(highFreq + EarQ*minBW) + scipy.log(lowFreq + EarQ*minBW))/numFilters) * (highFreq + EarQ*minBW); ERB = scipy.power((scipy.power((freqs/EarQ), order) + scipy.power(minBW, order)), (1.0 / order)) B = 1.019 * 2.0 * scipy.pi * ERB return (freqs, B)
def plotdata(ionofile_in,ionofile_fit,madfile,time1): fig1,axmat =plt.subplots(2,2,facecolor='w',figsize=(10,10)) axvec = axmat.flatten() paramlist = ['ne','te','ti','vo'] paramlisti = ['Ne','Te','Ti','Vi'] paramlistiname = ['$N_e$','$T_e$','$T_i$','$V_i$'] paramunit = ['$m^{-3}$','$^\circ$ K','$^\circ$ K','m/s'] boundlist = [[0.,7e11],[500.,3200.],[500.,2500.],[-500.,500.]] IonoF = IonoContainer.readh5(ionofile_fit) IonoI = IonoContainer.readh5(ionofile_in) gfit = GeoData(readIono,[IonoF,'spherical']) ginp = GeoData(readIono,[IonoI,'spherical']) data1 = GeoData(readMad_hdf5,[madfile,['nel','te','ti','vo','dnel','dte','dti','dvo']]) data1.data['ne']=sp.power(10.,data1.data['nel']) data1.data['dne']=sp.power(10.,data1.data['dnel']) t1,t2 = data1.timelisting()[340] handlist = [] for inum,iax in enumerate(axvec): ploth = rangevsparam(data1,data1.dataloc[0,1:],time1,gkey=paramlist[inum],fig=fig1,ax=iax,it=False) handlist.append(ploth[0]) ploth = rangevsparam(ginp,ginp.dataloc[0,1:],0,gkey=paramlisti[inum],fig=fig1,ax=iax,it=False) handlist.append(ploth[0]) ploth = rangevsparam(gfit,gfit.dataloc[0,1:],0,gkey=paramlisti[inum],fig=fig1,ax=iax,it=False) handlist.append(ploth[0]) iax.set_xlim(boundlist[inum]) iax.set_ylabel('Altitude in km') iax.set_xlabel(paramlistiname[inum]+' in '+paramunit[inum]) # with error bars plt.tight_layout() fig1.suptitle('Comparison Without Error Bars\nPFISR Data Times: {0} to {1}'.format(t1,t2)) plt.subplots_adjust(top=0.9) plt.figlegend( handlist[:3], ['PFISR', 'SimISR Input','SimISR Fit'], loc = 'lower center', ncol=5, labelspacing=0. ) fig2,axmat2 =plt.subplots(2,2,facecolor='w',figsize=(10,10)) axvec2 = axmat2.flatten() handlist2 = [] for inum,iax in enumerate(axvec2): ploth = rangevsparam(data1,data1.dataloc[0,1:],time1,gkey=paramlist[inum],gkeyerr='d'+paramlist[inum],fig=fig2,ax=iax,it=False) handlist2.append(ploth[0]) ploth = rangevsparam(ginp,ginp.dataloc[0,1:],0,gkey=paramlisti[inum],fig=fig2,ax=iax,it=False) handlist2.append(ploth[0]) ploth = rangevsparam(gfit,gfit.dataloc[0,1:],0,gkey=paramlisti[inum],gkeyerr='n'+paramlisti[inum],fig=fig2,ax=iax,it=False) handlist2.append(ploth[0]) iax.set_xlim(boundlist[inum]) iax.set_ylabel('Altitude in km') iax.set_xlabel(paramlistiname[inum]+' in '+paramunit[inum]) plt.tight_layout() fig2.suptitle('Comparison With Error Bars\nPFISR Data Times: {0} to {1}'.format(t1,t2)) plt.subplots_adjust(top=0.9) plt.figlegend( handlist2[:3], ['PFISR', 'SimISR Input','SimISR Fit'], loc = 'lower center', ncol=5, labelspacing=0. ) return (fig1,axvec,handlist,fig2,axvec2,handlist2)
def calc_mass(self, conc_r, split=0): """calculate the mass of component present given value in cell center This is given by 2 \pi int_r1^r2 C(r)r dr conc_r: concentration in self.grid """ if split == 1: grid = self.grid_edge_sp1 elif split == 2: grid = self.grid_edge_sp2 else: grid = self.grid_edge return sp.sum(conc_r * (sp.power(grid[1:], 2) - sp.power(grid[:-1], 2)) ) * sp.pi
def f_L(q,a_1,a_2,a_3): '''integrand of the static geometrical tensor Parameters ---------- 'q' = free variable for the geometrical tensor 'a_1,a_2,a_3' = three axes of the ellipsoid (in nm) Returns ------- 'L' = integrand of the static geometrical tensor''' L = sp.power(q+a_1**2,-1.5)*sp.power(q+a_2**2,-0.5)*sp.power(q+a_3**2,-0.5) return L
def likelihood(self, theta): X = self.X f = self.y Theta = 10**theta n = np.size(X,0) one = np.ones((n,1)) # build correlation matrix R = np.zeros((n, n)) for i in xrange(n): for j in xrange(i+1, n): R[i, j] = np.exp( -sum(Theta * sci.power(abs(X[i, :] - X[j, :]), 2))) R = R + R.T + np.eye(n) + np.eye(n)*np.finfo(np.float32).eps # upper triangular matrix U = cholesky(R) LnDetPsi = 2 * sum(np.log(abs(np.diag(U)))); mu = (np.dot(one.T, solve(U, solve(U.T, f)))) / (np.dot(one.T, solve(U, solve(U.T, one)))) SigmaSqr = (np.dot((f - one*mu).T, solve(U, solve(U.T, f-one*mu))))/n NegLnLike = -1*(-(n/2)*np.log(SigmaSqr) - .5*LnDetPsi) return NegLnLike, U, mu, SigmaSqr
def addReward(self): """ A filtered mapping towards performAction of the underlying environment. """ # by default, the cumulative reward is just the sum over the episode if self.discount: self.cumreward += power(self.discount, self.samples) * self.getReward() else: self.cumreward += self.getReward()
def filter(self): if self.level > self.max_dec_level(): clevel = self.max_dec_level() else: clevel = self.level # decompose coeffs = pywt.wavedec(self.sig, pywt.Wavelet(self.wt), \ mode=self.mode, \ level=clevel) # threshold evaluation th = sqrt(2 * log(len(self.sig)) * power(self.sigma, 2)) # thresholding for (i, cAD) in enumerate(coeffs): if i == 0: continue coeffs[i] = sign(cAD) * pywt.thresholding.less(abs(cAD), th) # reconstruct rec_sig = pywt.waverec(coeffs, pywt.Wavelet(self.wt), mode=self.mode) if len(rec_sig) == (len(self.sig) + 1): self.sig = rec_sig[:-1]
def gauss_legendre(n): k = sp.arange(1.0, n) a_band = sp.zeros((2, n)) a_band[1, 0:n - 1] = k / sp.sqrt(4 * k * k - 1) x, V = sp.linalg.eig_banded(a_band, lower=True) w = 2 * sp.real(sp.power(V[0, :], 2)) return x, w
def setNewScale(self, state): Names = ( 'Y_XScale', 'XLogScale', 'YLogScale', 'LogScale' ) Types = {'c' : 0, 's' : 1, 'r' : 2} senderName = self.sender().objectName() t, Type = senderName[0], Types[senderName[0]] data = self.getData(Type) Scale = data.Scale() ui_obj = self.findUi([t + i for i in Names]) if senderName[1:] == Names[0]: #ui_obj = getattr(self.ui, t + "LogScale") if state: Scale[1] = 2 data[:,1] = data[:,1] / data[:,0] else: Scale[1] = 0 data[:,1] = data[:,1] * data[:,0] ui_obj[3].setEnabled(not ui_obj[0].isChecked()) else: index = bool(senderName[1] != "X") #ui_obj = getattr(self.ui, t + Names[0]) if Scale[index] != state: if state == 1: data[:,index] = sp.log10(data[:,index]) else: data[:,index] = sp.power(10.,data[:,index]) Scale[index] = int(state) ui_obj[0].setEnabled(not (ui_obj[1].isChecked() or ui_obj[2].isChecked())) self.updateData(array = Array(data, Type = Type, scale = Scale))
def asymmetrify(x, beta=0.2): res = x.copy() dim = len(x) for i, xi in enumerate(x): if xi > 0: res[i] = power(xi, 1+beta*i/(dim-1.)*sqrt(xi)) return res
def cosine_alt(h1, h2): # 17 us @array, 42 us @list \w 100 bins """ Alternative implementation of the cosine distance measure. @note under development. """ h1, h2 = __prepare_histogram(h1, h2) return -1 * float(scipy.sum(h1 * h2)) / (scipy.sum(scipy.power(h1, 2)) * scipy.sum(scipy.power(h2, 2)))
def filter(self, mode='soft'): if self.level > self.max_dec_level(): clevel = self.max_dec_level() else: clevel = self.level # decompose coeffs = pywt.wavedec(self.sig, pywt.Wavelet(self.wt), \ mode=self.mode, \ level=clevel) # threshold evaluation th = sqrt(2 * log(len(self.sig)) * power(self.sigma, 2)) # thresholding for (i, cAD) in enumerate(coeffs): if mode == 'soft': coeffs[i] = pywt.thresholding.soft(cAD, th) elif mode == 'hard': coeffs[i] = pywt.thresholding.hard(cAD, th) # reconstruct rec_sig = pywt.waverec(coeffs, pywt.Wavelet(self.wt), mode=self.mode) if len(rec_sig) == (len(self.sig) + 1): self.sig = rec_sig[:-1]
def generateNodesAdaptive(self): innerDomainSize = self.innerDomainSize innerMeshSize = self.innerMeshSize numberElementsInnerDomain = innerDomainSize/innerMeshSize assert(numberElementsInnerDomain < self.numberElements) domainCenter = (self.domainStart+self.domainEnd)/2 nodes0 = np.linspace(domainCenter,innerDomainSize/2.0,(numberElementsInnerDomain/2.0)+1.0) nodes0 = np.delete(nodes0,-1) numberOuterIntervalsFromDomainCenter = (self.numberElements - numberElementsInnerDomain)/2.0 const = np.log2(innerDomainSize/2.0)/0.5 exp = np.linspace(const,np.log2(self.domainEnd*self.domainEnd),numberOuterIntervalsFromDomainCenter+1) nodes1 = np.power(np.sqrt(2),exp) nodesp = np.concatenate((nodes0,nodes1)) nodesn = -nodesp[::-1] nodesn = np.delete(nodesn,-1) linNodalCoordinates = np.concatenate((nodesn,nodesp)) nodalCoordinates = 0 #Introduce higher order nodes if self.elementType == "quadratic" or self.elementType == "cubic": if self.elementType == "quadratic": numberNodesPerElement = 3 elif self.elementType == "cubic": numberNodesPerElement = 4 for i in range(0,len(linNodalCoordinates)-1): newnodes = np.linspace(linNodalCoordinates[i],linNodalCoordinates[i+1],numberNodesPerElement) nodalCoordinates = np.delete(nodalCoordinates,-1) nodalCoordinates = np.concatenate((nodalCoordinates,newnodes)) else: nodalCoordinates = linNodalCoordinates return nodalCoordinates
def generateGaborMotherWavelet(self): pitch = 440.0 sigma = 6. NL = 48 NU = 39 print 'sampling rate:', self.fs, 'Hz' fs = float(self.fs) self.sample_duration = 10. #asigma = 0.3 limit_t = 0.1 #zurashi = 1. #NS = NL + NU + 1 f = sp.array([2**(i/12.) for i in range(NL+NU+1)]) * pitch*2**(-NL/12.) f = f[:, sp.newaxis] sigmao = sigma*10**(-3)*sp.sqrt(fs/f) t = sp.arange(-limit_t, limit_t+1/fs, 1/fs) inv_sigmao = sp.power(sigmao, -1) inv_sigmao_t = inv_sigmao * t t_inv_sigmao2 = sp.multiply(inv_sigmao_t, inv_sigmao_t) omega_t = 2*sp.pi*f*t gabor = (1/sp.sqrt(2*sp.pi)) gabor = sp.multiply(gabor, sp.diag(inv_sigmao)) exps = -0.5*t_inv_sigmao2+sp.sqrt(-1)*omega_t self.gabor = gabor*sp.exp(exps)
def multivariateNormalPdf(z, x, sigma): """ The pdf of a multivariate normal distribution (not in scipy). The sample z and the mean x should be 1-dim-arrays, and sigma a square 2-dim-array. """ assert len(z.shape) == 1 and len(x.shape) == 1 and len(x) == len(z) and sigma.shape == (len(x), len(z)) tmp = -0.5 * dot(dot((z - x), inv(sigma)), (z - x)) res = (1. / power(2.0 * pi, len(z) / 2.)) * (1. / sqrt(det(sigma))) * exp(tmp) return res
def minowski(h1, h2, p = 2): # 46..45..14,11..43..44 / 45 us for p=int(-inf..-24..-1,1..24..inf) / float @array, +20 us @list \w 100 bins r""" Minowski distance. With :math:`p=2` equal to the Euclidean distance, with :math:`p=1` equal to the Manhattan distance, and the Chebyshev distance implementation represents the case of :math:`p=\pm inf`. The Minowksi distance between two histograms :math:`H` and :math:`H'` of size :math:`m` is defined as: .. math:: d_p(H, H') = \left(\sum_{m=1}^M|H_m - H'_m|^p \right)^{\frac{1}{p}} *Attributes:* - a real metric *Attributes for normalized histograms:* - :math:`d(H, H')\in[0, \sqrt[p]{2}]` - :math:`d(H, H) = 0` - :math:`d(H, H') = d(H', H)` *Attributes for not-normalized histograms:* - :math:`d(H, H')\in[0, \infty)` - :math:`d(H, H) = 0` - :math:`d(H, H') = d(H', H)` *Attributes for not-equal histograms:* - not applicable Parameters ---------- h1 : sequence The first histogram. h2 : sequence The second histogram. p : float The :math:`p` value in the Minowksi distance formula. Returns ------- minowski : float Minowski distance. Raises ------ ValueError If ``p`` is zero. """ h1, h2 = __prepare_histogram(h1, h2) if 0 == p: raise ValueError('p can not be zero') elif int == type(p): if p > 0 and p < 25: return __minowski_low_positive_integer_p(h1, h2, p) elif p < 0 and p > -25: return __minowski_low_negative_integer_p(h1, h2, p) return math.pow(scipy.sum(scipy.power(scipy.absolute(h1 - h2), p)), 1./p)
def __init__(self, name="gaussian", position=0.5, width=10.0, peak_power=1e-3, offset_nu=0.0, m=1, C=0.0, initial_phase=0.0, channel=0, using_fwhm=False): if not (0.0 <= position <= 1.0): raise OutOfRangeError( "position is out of range. Must be in [0.0, 1.0]") if not (1e-3 < width < 1e3): raise OutOfRangeError( "width is out of range. Must be in (1e-3, 1e3)") if not (0.0 <= peak_power < 1e9): raise OutOfRangeError( "peak_power is out of range. Must be in [0.0, 1e9)") if not (-100.0 < offset_nu < 100.0): raise OutOfRangeError( "offset_nu is out of range. Must be in (-100.0, 100.0)") if not (0 < m < 50): raise OutOfRangeError( "m is out of range. Must be in (0, 50)") if not (-1e3 < C < 1e3): raise OutOfRangeError( "C is out of range. Must be in (-1e3, 1e3)") if not (0.0 <= initial_phase < 2.0 * pi): raise OutOfRangeError( "initial_phase is out of range. Must be in [0.0, 2.0 * pi)") if not (0 <= channel < 2): raise OutOfRangeError( "channel is out of range. Must be in [0, 2)") if int(m) != m: raise NotIntegerError("m must be an integer") if int(channel) != channel: raise NotIntegerError("channel must be an integer") self.name = name self.position = position self.width = width # ps self.peak_power = peak_power # W self.offset_nu = offset_nu # THz self.m = m self.C = C # rad self.initial_phase = initial_phase # rad self.channel = channel self.fwhm = None # For a FWHM pulse width, store then convert to a HWIeM pulse width: if using_fwhm: self.fwhm = width # store fwhm pulse width self.width *= 0.5 / power(log(2.0), 1.0 / (2 * m)) self.field = None
def f(self, x): z = x[:] for i in range(self.xdim): e = i/(self.xdim-1.)/2. if x[i] <= 0 or i%2==0: e += 1 z[i] *= power(10, e) return dot(z,z) + 10 * self.xdim - 10*sum(cos(2*pi*z))
def magacf(tau,K,C,alpha,Om): """ magacf(tau,K,C,alpha,Om) by John Swoboda This function will create a single particle acf for a particle species with magnetic field but no collisions. Inputs tau: The time vector for the acf. K: Bragg scatter vector magnetude. C: Thermal speed of the species. alpha: Magnetic aspect angle in radians. Om: The gyrofrequency of the particle. Output acf - The single particle acf. """ Kpar = sp.sin(alpha)*K Kperp = sp.cos(alpha)*K return sp.exp(-sp.power(C*Kpar*tau,2.0)/2.0-2.0*sp.power(Kperp*C*sp.sin(Om*tau/2.0)/Om,2.0))
def f(self, x): f1 = sum(-10*exp(-0.2*sqrt(x[:-1]**2+x[1:]**2))) f2 = sum(power(abs(x), 0.8)+5*sin(x**3)) return -array([f1, f2])
def boundary_term_power(intensities): """ Implementation of a exponential boundary term computation over an array. """ # apply (1 / (1 + x))^sigma intensities = 1. / (intensities + 1) intensities = scipy.power(intensities, sigma) intensities[intensities <= 0] = sys.float_info.min return intensities
def multivariateNormalPdf(z, x, sigma): """ The pdf of a multivariate normal distribution (not in scipy). The sample z and the mean x should be 1-dim-arrays, and sigma a square 2-dim-array. """ assert len(z.shape) == 1 and len( x.shape) == 1 and len(x) == len(z) and sigma.shape == (len(x), len(z)) tmp = -0.5 * dot(dot((z - x), inv(sigma)), (z - x)) res = (1. / power(2.0 * pi, len(z) / 2.)) * (1. / sqrt(det(sigma))) * exp(tmp) return res
def mannings_n(self, area, hydrad, slope, disch): """ Calculates manning's roughness from discharge. 'area' - self.handarea (wet area), 'hydrad' - self.handrad (hydraulic radius), 'slope' - self.handslope (bed slope), and 'disch' - any discharge values""" res = 1.49 * area * scipy.power( hydrad, (2 / 3.0)) * scipy.sqrt(slope) / disch.T return res.T
def vmax(pCentre, pEnv, type="holland", beta=1.3, rho=1.15): """ Calculate the maximum wind speed from the pressure difference. :param float pc: central pressure (Pa) :param float pe: environmental pressure (Pa) :param str type: which Vmax relation to use (Willoughby & Rahn, Holland or Atkinson & Holliday) :param float beta: Holland's (1980) beta parameter. Only used for the Holland estimation (type=holland) :param float rho: air density (default=1.15 kg/m^3) :return: maximum wind speed. For types 1 & 2, this is a gradient level wind. The relation used in type 3 (Atkinson & Holliday) was determined using surface wind observations so should be used with caution at the gradient level. :raises ValueError: if environmental pressure is lower than central pressure Note: The pressure should ideally be passed in units of Pa, but the function will accept hPa and automatically convert to Pa. """ # Convert from hPa to Pa if necessary: if pCentre < 10000: pCentre = metutils.convert(pCentre, "hPa", "Pa") if pEnv < 10000: pEnv = metutils.convert(pEnv, "hPa", "Pa") if pEnv < pCentre: raise ValueError, "Error in vmax - Environmental pressure is less than central pressure. Check values and/or order of input arguments" dP = pEnv - pCentre if type == "willoughby": # Default: Most advanced estimation technique: # Willoughby & Rahn (2004), Parametric Representation of the # Primary Hurricane Vortex. Part I: Observations and # Evaluation of the Holland (1980) Model. # Mon. Wea. Rev., 132, 3033-3048 vMax = 0.6252*sqrt(dP) elif type == "holland": # Holland (1980), An Analytic Model of the Wind and Pressure # Profiles in Hurricanes. Mon. Wea. Rev, 108, 1212-1218 # Density of air is assumed to be 1.15 kg/m^3. # beta is assumed to be 1.3. Other values can be specified. # Gradient level wind (assumed maximum). vMax = sqrt(beta*dP/(exp(1)*rho)) elif type == "atkinson": # Atkinson and Holliday (1977), Tropical Cyclone Minimum Sea # Level Pressure / Maximum Sustained Wind Relationship for # the Western North Pacific. Mon. Wea. Rev., 105, 421-427 # Maximum 10m, 1-minute wind speed. Uses pEnv as 1010 hPa vMax = 3.04*power(1010 - metutils.convert(pCentre,"Pa","hPa"), 0.644) else: raise NotImplementedError, "Vmax type " + type + " not implemented" return vMax
def boundary_term_division(intensities): """ Implementation of a exponential boundary term computation over an array. """ # apply (1 / (1 + x))^sigma intensities = 1. / (intensities + 1) intensities = scipy.power(intensities, sigma) intensities[intensities <= 0] = sys.float_info.min return intensities
def magacf(tau, K, C, alpha, Om): """ magacf(tau,K,C,alpha,Om) by John Swoboda This function will create a single particle acf for a particle species with magnetic field but no collisions. Inputs tau: The time vector for the acf. K: Bragg scatter vector magnetude. C: Thermal speed of the species. alpha: Magnetic aspect angle in radians. Om: The gyrofrequency of the particle. Output acf - The single particle acf. """ Kpar = sp.sin(alpha) * K Kperp = sp.cos(alpha) * K return sp.exp(-sp.power(C * Kpar * tau, 2.0) / 2.0 - 2.0 * sp.power(Kperp * C * sp.sin(Om * tau / 2.0) / Om, 2.0))
def mannings_q(self, area, hydrad, slope, n): """ Calculates manning's discharge from roughness. 'area' - self.handarea (wet area), 'hydrad' - self.handrad (hydraulic radius), 'slope' - self.handslope (bed slope), and 'n' - any roughness values""" res = 1.49 * area * scipy.power(hydrad, (2 / 3.0)) * scipy.sqrt(slope) / n.T return res.T
def _calc_expfit(wptab): xdata = np.array(wptab)[:, 0] ydata = np.array(wptab)[:, 1] optfunc = lambda x: x[0] * np.power(xdata, x[1]) - x[2] - ydata xs1 = 5.0 xs2 = -0.5 xs3 = 0.0 x1, x2, x3 = leastsq(optfunc, [xs1, xs2, xs3])[0] return [x1, x2, x3]
def ans_one_model(index, mweights, mmeans, mcovs): #init nummods = mweights.shape[0] initgaus = mweights.shape[1] finalgaus = 64 lenvars = mmeans.shape[2] weights = numpy.zeros(finalgaus, dtype="float128") means = numpy.zeros((finalgaus, lenvars), dtype="float128") covs = numpy.zeros((finalgaus, lenvars, lenvars), dtype="float128") h = numpy.float128( numpy.random.randint(1, 10000, (finalgaus, nummods, initgaus))) h = h / h.sum(0) for _ in range(80): #M-step weights = h.sum(1).sum(1) / (nummods * initgaus) temp = h * mweights temp = temp / temp.sum(1).sum(1).reshape(finalgaus, 1, 1) tempcovs = (mmeans.reshape(1, nummods, initgaus, lenvars, 1) - means.reshape(finalgaus, 1, 1, lenvars, 1)).reshape( finalgaus * nummods * initgaus, lenvars, 1) tempcovs = numpy.array([tt.dot(tt.transpose()) for tt in tempcovs ]).reshape(finalgaus, nummods, initgaus, lenvars, lenvars) tempcovs = tempcovs + mcovs covs = (temp.reshape(finalgaus, nummods, initgaus, 1, 1) * tempcovs).sum(1).sum(1) means = (temp.reshape(finalgaus, nummods, initgaus, 1) * mmeans.reshape(1, nummods, initgaus, lenvars)).sum(1).sum(1) #E-step for m in range(finalgaus): gaus = mnormal(means[m], covs[m]) invcovs = numpy.linalg.inv(numpy.float64(covs[m])) temp = numpy.zeros((nummods, initgaus), dtype="float128") for j in range(nummods): for k in range(initgaus): temp[j, k] = -0.5 * invcovs.dot(mcovs[j][k]).trace() h[m, :, :] = sc.power( gaus.pdf(mmeans) * sc.power(sc.e, temp), mweights) * weights[m] + 1e-20 h = h / h.sum(0) with open("final_model/%d.pickle" % (index, ), "wb") as f: pickle.dump((weights, means, covs), f)
def compute_numerical_gradient(cost_func, theta): e = sp.power(10, -4) numgrad = sp.zeros(theta.shape) perturb = sp.zeros(theta.shape) for i in range(0, theta.size): perturb[i] = e numgrad[i] = (cost_func(theta + perturb) - cost_func(theta - perturb)) / (2 * e) perturb[i] = 0 return numgrad
def bband(inwave,airmass=1.,scale=0.85): path = spectra.__path__[0] file = path+"/data/bband.dat" bband = sio.read_array(file) wave = scipy.power(10.,bband[:,0]) data = bband[:,1].astype(scipy.float32) return get_correction(inwave,airmass,scale,wave,data)
def _calculate_RadiusofGyration(Coordinates): coords = [] mass = c_mass * len(Coordinates) nAT = len(Coordinates) masscenter = np.mean(Coordinates, axis=0) result = 0.0 for i in range(nAT): dis = np.linalg.norm(Coordinates[i] - masscenter) result = result + c_mass * scipy.power(dis, p=2) return round(scipy.sqrt(float(result / mass)), 3)
def dNeurons(self, statevec, t): # Extract relevant parameters from train = training > 0 and t > training and t < training + train_dur x_i = statevec[0:Ng] w_i = statevec[Ng:2 * Ng] # generate noise patterns exp_noise_amp = 0.1 if train: zeta = np.random.uniform(-exp_noise_amp, exp_noise_amp, 1) else: zeta = 0.0 # Compute Firing Rates and feedback signals r_i = sp.tanh(x_i) + zeta_state(t) z_i = np.dot(w_i, r_i) + zeta # Compute next timestep depending on if training or not if train: dxidt = (-x_i + lambd * np.dot(W_rec, r_i) + np.dot(W_in_left, self.uleft[t]) + np.dot(W_in_right, self.uright[t]) + z_i * W_fb) / tau x_new = x_i + dxidt * dt P = -1.0 * sp.power(z_i - self.f_target[t], 2) M = 1.0 * (P > self.P_avg) dwdt = eta(t) * (z_i - self.z_avg) * M * r_i w_new = w_i + dwdt * dt self.P_avg = (1 - (dt / tau_avg)) * self.P_avg + (dt / tau_avg) * P self.z_avg = (1 - (dt / tau_avg)) * self.z_avg + (dt / tau_avg) * z_i else: dxidt = (-x_i + lambd * np.dot(W_rec, r_i) + np.dot(W_in_left, self.uleft[t]) + np.dot(W_in_right, self.uright[t]) + z_i * W_fb) / tau x_new = x_i + dxidt * dt dwdt = np.zeros(np.shape(w_i)) w_new = w_i #weight change magnitude: dwmag = LA.norm(dwdt) rmag = LA.norm(r_i) if t % 10000 == 0: print(str(t) + ' ' + str(z_i) + ' ' + str(train)) self.zsave[t] = z_i self.rsave[t] = rmag return np.concatenate((x_new, w_new))
def time_step(r, t, h): def runge_kutta_step(r, t, h): ''' :param r: current positions and velocities :param t: current t :param h: step size :return: a vector of the change in positions and velocities to get to t+h ''' k1 = h * f(r, t) k2 = h * f(r + 0.5 * k1, t + 0.5 * h) k3 = h * f(r + 0.5 * k2, t + 0.5 * h) k4 = h * f(r + k3, t + h) return (k1 + 2 * k2 + 2 * k3 + k4) / 6 # perform 2 RK steps of step size h delta_step_1 = runge_kutta_step(r, t, h) delta_step_2 = runge_kutta_step(r + delta_step_1, t + h, h) delta_r1 = delta_step_1 + delta_step_2 # perform 1 RK step with step size 2h delta_r2 = runge_kutta_step(r, t, 2 * h) # Compute error estimate delta_x1 = delta_r1[0] delta_x2 = delta_r2[0] delta_y1 = delta_r1[2] delta_y2 = delta_r2[2] error = sqrt((delta_x1 - delta_x2) ** 2 + (delta_y1 - delta_y2) ** 2) / 30 # Calculate rho rho = h * delta / error # Calculate factor to multiply h by factor = power(rho, 1 / 4) # Update h accordingly # If target accuracy met, move on to next step if rho >= 1: # update t t = t + 2 * h # Prevent h from getting too large if factor > 2: h *= 2 else: h *= factor # Use local extrapolation to better our estimate of the positions delta_r1[0] += (delta_x1 - delta_x2) / 15 delta_r1[2] += (delta_y1 - delta_y2) / 15 return delta_r1, h, t # If target accuracy not met, must redo step with smaller h else: return time_step(r, t, factor * h)
def calEquilibriumDisFuncLoc(self, macroDensity, macroVelocity): coeff1 = 3.0 coeff2 = 9. / 2. coeff3 = -3. / 2.0 tmpEquilibrium = np.ones(9, dtype='float64') for i in sp.arange(9): tmpEquilibrium[i] = self.weightsCoeff[i] * macroDensity * (1. + \ coeff1 * (np.dot(self.microVelocity[i], macroVelocity)) + \ coeff2 * (sp.power(np.dot(self.microVelocity[i], macroVelocity), \ 2.0)) + coeff3 * np.dot(macroVelocity, macroVelocity)) return tmpEquilibrium
def transfer_function(self, nu, centre_nu): """ :param Dvector nu: Spectral domain array. :param double centre_nu: Centre frequency. :return: Array of values. :rtype: Dvector Generate an array representing the filter power transfer function. """ if len(nu) < 8: raise OutOfRangeError( "Require spectral array with at least 8 values") delta_nu = nu - centre_nu - self.offset_nu factor1 = power(delta_nu / self.width_nu1, (2 * self.m)) factor2 = power(delta_nu / self.width_nu2, (2 * self.m)) self.shape = self.a1 * exp(-0.5 * factor1) + self.a2 * exp( -0.5 * factor2) return np.abs(self.shape)**2
def boundary_term_exponential(intensities): """ Implementation of a exponential boundary term computation over an array. """ # apply exp-(x**2/sigma**2) intensities = scipy.power(intensities, 2) intensities /= math.pow(sigma, 2) intensities *= -1 intensities = scipy.exp(intensities) intensities[intensities <= 0] = sys.float_info.min return intensities
def european_option_rho(self): "Price of the call option" "the vectorized method can compute price of multiple options in array" numerator = sp.add( sp.log( sp.divide( self.spot_price, self.strike_price, ) ), sp.multiply( ( self.interest_rate - self.dividend_yield + 0.5*sp.power(self.sigma,2) ), self.time_to_maturity) ) d1 = sp.divide( numerator, sp.prod( [ self.sigma, sp.sqrt(self.time_to_maturity) ], axis=0, ) ) d2 = sp.add( d1, -sp.multiply( self.sigma, sp.sqrt(self.time_to_maturity) ) ) j = sp.product( [ self.spot_price, self.time_to_maturity, sp.exp( sp.multiply( -self.interest_rate, self.time_to_maturity ) ), ], axis=0 ) c_rho = j * self.bls_erf_value(d2) p_rho = -j * self.bls_erf_value(-d2) return c_rho, p_rho
def calc_center(kp, img): x_key_point, y_key_point = kp.pt[:2] height, weight = img.shape[:2] x_center = weight / 2 y_center = height / 2 center_img = (x_center, y_center) xVector = x_center - x_key_point yVector = y_center - y_key_point vector = (xVector, yVector) module = scipy.sqrt(scipy.power((x_center - x_key_point), 2) + scipy.power((y_center - y_key_point), 2)) if (y_center - y_key_point) == 0: angle = 0 else: angle = scipy.arctan((x_center - x_key_point) / (y_center - y_key_point)) distance_center = (module, vector, angle, center_img) return distance_center
def __init__(self, *args, **kwargs): MultiModalFunction.__init__(self, *args, **kwargs) print(self.numPeaks, self.xdim) self._opts = [(rand(self.xdim) - 0.5) * 8] self._opts.extend([(rand(self.xdim) - 0.5) * 9.8 for _ in range(self.numPeaks - 1)]) alphas = [ power(self.maxCond, 2 * i / float(self.numPeaks - 2)) for i in range(self.numPeaks - 1) ] shuffle(alphas) self._covs = [ generateDiags(alpha, self.xdim, shuffled=True) / power(alpha, 0.25) for alpha in [self.optCond] + alphas ] self._R = orth(rand(self.xdim, self.xdim)) self._ws = [10] + [ 1.1 + 8 * i / float(self.numPeaks - 2) for i in range(self.numPeaks - 1) ]
def addReward(self, r=None): """ A filtered mapping towards performAction of the underlying environment. """ r = self.getReward() if r is None else r # by default, the cumulative reward is just the sum over the episode if self.discount: self.cumulativeReward += power(self.discount, self.samples) * r else: self.cumulativeReward += r
def loss_to_pair(self, pair, atg_a, atg_b, pl_exp=4, gamma=1e2): dist = sp.sqrt( sp.add(sp.square(pair.tx_x), sp.add(sp.square(pair.tx_y), sp.square(self.h)))) phi = sp.multiply(sp.divide(180, sp.pi), sp.arcsin(sp.divide(self.h, dist))) pr_LOS = sp.divide( 1, sp.add( 1, sp.multiply( atg_a, sp.exp(sp.multiply(-atg_b, sp.subtract(phi, atg_a)))))) pr_NLOS = sp.subtract(1, pr_LOS) total_loss = sp.add( sp.multiply(pr_LOS, sp.power(dist, -pl_exp)), sp.multiply(sp.multiply(pr_NLOS, gamma), sp.power(dist, -pl_exp))) return total_loss
def GetWHIM12(CoordinateMatrix, AtomLabel, proname='u'): """ ################################################################# WHIM descriptors --->E3u ################################################################# """ nAtom, kc = CoordinateMatrix.shape if proname == 'u': weight = scipy.matrix(scipy.eye(nAtom)) else: weight = GetPropertyMatrix(AtomLabel, proname) S = XPreCenter(CoordinateMatrix) u, s, v = scipy.linalg.svd(S.T * weight * S / sum(scipy.diag(weight))) res = scipy.power(s[2], 2) * nAtom / sum(scipy.power(S * scipy.matrix(u[:, 2]).T, 4)) return round(float(res.real), 3)
def split_forest(nb_part, dll, ll, de, diff, iv, first_pixel): ll_limit = [ll[first_pixel]] nb_bin = (len(ll) - first_pixel) // nb_part m_z_arr = [] ll_arr = [] de_arr = [] diff_arr = [] iv_arr = [] ll_c = ll.copy() de_c = de.copy() diff_c = diff.copy() iv_c = iv.copy() for p in range(1, nb_part): ll_limit.append(ll[nb_bin * p + first_pixel]) ll_limit.append(ll[len(ll) - 1] + 0.1 * dll) for p in range(nb_part): selection = (ll_c >= ll_limit[p]) & (ll_c < ll_limit[p + 1]) ll_part = ll_c[selection] de_part = de_c[selection] diff_part = diff_c[selection] iv_part = iv_c[selection] lam_lya = constants.absorber_IGM["LYA"] m_z = (sp.power(10., ll_part[len(ll_part) - 1]) + sp.power(10., ll_part[0])) / 2. / lam_lya - 1.0 m_z_arr.append(m_z) ll_arr.append(ll_part) de_arr.append(de_part) diff_arr.append(diff_part) iv_arr.append(iv_part) return m_z_arr, ll_arr, de_arr, diff_arr, iv_arr
def eval(self,fill_SED=True,nu=None,get_model=False,loglog=False): """ Evaluates the Template for the current parameters values """ if nu is None: nu = np.copy(self.nu_template) if loglog == False: nu = np.power(10,nu) #print(nu) if loglog==False: log_nu=log10(nu) lin_nu=nu else: log_nu=nu lin_nu=power(10.,log_nu) log_model= self.log_func(log_nu) model=power(10.,log_model) if fill_SED==True: self.SED.fill(nu=lin_nu, nuFnu=model) #print nu.size,nu #print(model[model>1E-20]) if get_model==True: if loglog==False: return model else: return log_model else: return None
def convert_pulse_width(width, to_fwhm=True, shape="gaussian", m=1): """ :param float width: Pulse width :param bool to_fwhm: Whether to convert to FWHM measure :param string shape: Shape of the pulse :param Uint m: Order parameter, used for super-Gaussian pulses Helper function to convert pulse widths between FWHM and HWIeM measures. """ if shape.lower() == "gaussian": if to_fwhm: return width * 2.0 * power(log(2.0), 1.0 / (2 * m)) else: return width * 0.5 / power(log(2.0), 1.0 / (2 * m)) elif shape.lower() == "sech": if to_fwhm: return width * 2.0 * log(1.0 + sqrt(2.0)) else: return width * 0.5 / log(1.0 + sqrt(2.0)) else: print("Pulse shape not recognised: %s" % shape)
def get_q_i(res_nam, pKa, pH): """Calculate the charge of a residue depending on its pKa and pH.""" # CYS residues forming disulfide bonds are neutral. q_i = 0.0 if pKa == 99.99: return q_i exponent = power(10, pKa - pH) q_i = exponent/(1.0 + exponent) # Boolean algebra requires (...) when using 'OR' operator. if res_nam in ['ASP', 'GLU', 'C-' , 'TYR', 'Oco', 'CYS']: q_i -= 1.0 return q_i
def loss_to_pair(self, pair, gain=1e-3, exp_factor=sp.random.exponential(1), pl_exp=3): dist = sp.sqrt( sp.add(sp.square(sp.subtract(self.tx_x, pair.rx_x)), sp.square(sp.subtract(self.tx_y, pair.rx_y)))) loss = sp.multiply( gain, sp.multiply(sp.square(exp_factor), sp.power(dist, -pl_exp))) return loss
def select_from_galfit(catalog): try: mag_auto = catalog[:, 5] a_image = catalog[:, 16] mu0 = catalog[:, 20] r50 = catalog[:, 21] radius_eff = catalog[:, 52] magnitude = catalog[:, 49] sersic = catalog[:, 55] axis_ratio = catalog[:, 58] except IndexError: return None pixelscale = 0.168 kappa = 2 * sersic - 0.33 g = scipy.special.gamma(2 * sersic) m = magnitude f_tot = scipy.power(10., -0.4 * m) sig_e = f_tot / (2 * scipy.pi * (radius_eff**2) * ((scipy.e)**kappa) * sersic * (kappa**(-2 * sersic)) * g * axis_ratio) sig_0 = sig_e * numpy.exp(kappa) #print("sig_e = %s" % sig_e) #print("sig_0 = %s" % sig_0) surfbrite_0 = -2.5 * numpy.log10(sig_0) + 5 * numpy.log10(pixelscale) #print(surfbrite_0) # udg = (mag_auto < 24) & (a_image > 5) & (mu0 > 24) & (r50 > 6) udg = (mag_auto < 24) & (mu0 > 24) & (r50 > 6) udg = (surfbrite_0 > 23) & (axis_ratio > 0.5) & (radius_eff > 9) # criteria_count = numpy.sum((mag_auto < 24)) + numpy.sum((a_image > 6)) + \ # numpy.sum((mu0 > 24)) + numpy.sum((r50 > 6)) criteria_count = (mag_auto < 24).astype(numpy.int) + \ (a_image > 6).astype(numpy.int) + \ (mu0 > 24).astype(numpy.int) + \ (r50 > 6).astype(numpy.int) # print(criteria_count.shape) # udg = (criteria_count >= 3) # udg = (numpy.isfinite(surfbrite_0)) & (sersic > 0.8) & (sersic < 6) & (radius_eff > 8) return numpy.append(catalog, surfbrite_0.reshape((-1, 1)), axis=1)[udg]
def test_optdiv2(delta=0.1, mu=0.5, sigma=1.0, dt=1.0, grid=scipy.linspace(0, 5, 200), useValueIter=True): time1 = time.time() localvars = {} def postVIterCallbackFn(nIter, currentVArray, newVArray, optControls, stoppingResult): global g_iterList (stoppingDecision, diff) = stoppingResult print("iter %d, diff %f" % (nIter, diff)) localvars[0] = nIter def postPIterCallbackFn(nIter, newVArray, currentPolicyArrayList, greedyPolicyList, stoppingResult): (stoppingDecision, diff) = stoppingResult print("iter %d, diff %f" % (nIter, diff)) localvars[0] = nIter initialVArray = grid; # initial guess for V: a linear fn initialPolicyArray = grid; # initial guess for d: pay out everything utilityFn = lambda x: x; # linear utility beta = scipy.power(scipy.e, -(delta * dt)) print("beta = exp(- %f * %f) = %f" % (delta, dt, beta)) zRV = scipy.stats.norm(loc=mu*dt, scale=sigma*scipy.sqrt(dt)) print("income shock: mean %f, sd %f" % (mu*dt, sigma*dt)) bstar = calc_opt_b(mu, sigma, delta) print("optimal barrier: %f" % bstar) params = OptDivParams2(utilityFn, beta, zRV, grid); # don't use parallel search with this, since it makes a callback to Python if (useValueIter == True): result = bellman.grid_valueIteration([grid], initialVArray, params, postIterCallbackFn=postVIterCallbackFn, parallel=False) (nIter, currentVArray, newVArray, optControls) = result else: result = bellman.grid_policyIteration([grid], [initialPolicyArray], initialVArray, params, postIterCallbackFn=postPIterCallbackFn, parallel=False) (nIter, currentVArray, currentPolicyArrayList, greedyPolicyList) = result newVArray = currentVArray optControls = currentPolicyArrayList time2 = time.time() nIters = localvars[0] print("total time: %f, avg time: %f" % (time2-time1, (time2-time1)/nIters)) # plot V fig = plt.figure() ax = fig.add_subplot(111) ax.plot(grid, newVArray) ax.set_xlabel("M") ax.set_ylabel("V") # plot optimal d fig = plt.figure() ax = fig.add_subplot(111) ax.plot(grid, optControls[0]) ax.axvline(bstar, color='gray') ax.set_xlabel("M") ax.set_ylabel("optimal d") plt.show() return
def logrebin(spectrum,crval,crpix,cd1): from scipy import interpolate # First determine the "best" pixel scale start = crval+cd1*(1-crpix) inwave = scipy.arange(start,start+spectrum.size*cd1,cd1) sampwave = scipy.log10(inwave) pixscale = (sampwave[-1]-sampwave[0])/sampwave.size outwave = scipy.arange(sampwave[0],sampwave[-1],pixscale) outwave = scipy.power(10,outwave) spline = interpolate.splrep(inwave,spectrum,s=0) newspec = interpolate.splev(outwave,spline) return outwave,newspec
def all_spline_stuff(xs, ys, density=2000): myspline = INTERP.UnivariateSpline(xs, ys) dense_xs = S.linspace(xs[0], xs[-1], density) dense_ys = myspline(dense_xs) dy_dx = myspline.derivative(1) dense_dy_dx_samples = dy_dx(dense_xs) dl_dx_samples = S.power(1.0 + S.power(dense_dy_dx_samples, 2.0), 0.5) dl_spline = INTERP.UnivariateSpline(dense_xs, dl_dx_samples) integrated = list() for i in dense_xs: integrated.append(dl_spline.integral(0.0, i)) integrated = S.array(integrated) x_to_l = INTERP.UnivariateSpline(dense_xs, integrated) l_to_x = INTERP.UnivariateSpline(integrated, dense_xs) l_to_y = INTERP.UnivariateSpline(integrated, dense_ys) return myspline, {"x_to_l": x_to_l, "l_to_x": l_to_x, "l_to_y": l_to_y}