Beispiel #1
0
def extract_parameters(d_snm, e_snm, d_pnm=None, e_pnm=None, n0=None):
    #
    # input:
    #   d_snm, e_snm: densities and energies of symmetric nuclear matter
    #   d_pnm, e_pnm: densities and energies of pure neutron matter
    # output:
    #   n0: saturation density
    #   e0: saturation energy
    #    K: incompressibility
    #    S: symmetry energy
    #    L: slope parameter
    # Ksym: parameter
    from scipy.interpolate import UnivariateSpline
    from scipy.optimize import minimize
    snm_spl = UnivariateSpline(d_snm, e_snm, s=0, k=4)
    if (n0 == None):
        res = minimize(snm_spl, (min(d_snm) + max(d_snm)) / 2,
                       bounds=((min(d_snm), max(d_snm)), ))
        n0 = res.x[0]
        e0 = res.fun[0]
    else:
        e0 = snm_spl(n0)
    K = snm_spl.derivatives(n0)[2] * 9 * n0**2
    M = snm_spl.derivatives(n0)[3] * 27 * n0**3  # cubic term
    if (d_pnm == None and e_pnm == None): return n0, e0, K, M, 0, 0, 0

    pnm_spl = UnivariateSpline(d_pnm, e_pnm, s=0, k=4)
    S = pnm_spl(n0) - e0
    L = pnm_spl.derivatives(n0)[1] * 3 * n0
    Ksym = pnm_spl.derivatives(n0)[2] * 9 * n0**2 - K
    return n0, e0, K, M, S, L, Ksym
def read_kinematics_data(kinematics_data_file):
    """read kinematics from file"""
    kinematics_arr = []
    with open(kinematics_data_file) as csv_file:
        csv_reader = csv.reader(csv_file, delimiter='(')
        line_count = 0

        for row in csv_reader:
            if line_count <= 1:
                line_count += 1
            elif row[0] == ')':
                line_count += 1
            else:
                t_datai = row[1]
                # print(row)
                trans_datai0 = row[3].split()[0]
                trans_datai1 = row[3].split()[1]
                trans_datai2 = row[3].split()[2].split(')')[0]

                rot_datai0 = row[4].split()[0]
                rot_datai1 = row[4].split()[1]
                rot_datai2 = row[4].split()[2].split(')')[0]
                kinematics_arr.append([
                    float(t_datai),
                    float(trans_datai0),
                    float(trans_datai1),
                    float(trans_datai2),
                    float(rot_datai0),
                    float(rot_datai1),
                    float(rot_datai2)
                ])
                line_count += 1

        print(f'Processed {line_count} lines in {kinematics_data_file}')

    kinematics_arr = np.array(kinematics_arr)

    dkinematics_arr = np.array([kinematics_arr[:, 0]])
    ddkinematics_arr = np.array([kinematics_arr[:, 0]])
    for i in range(6):
        spl = UnivariateSpline(kinematics_arr[:, 0],
                               kinematics_arr[:, i + 1],
                               s=0)
        dk = []
        ddk = []
        for t in kinematics_arr[:, 0]:
            dki = spl.derivatives(t)[1]
            ddki = spl.derivatives(t)[2]
            dk.append(dki)
            ddk.append(ddki)
        dk = np.array([dk])
        ddk = np.array([ddk])

        dkinematics_arr = np.append(dkinematics_arr, dk, axis=0)
        ddkinematics_arr = np.append(ddkinematics_arr, ddk, axis=0)
    dkinematics_arr = np.transpose(dkinematics_arr)
    ddkinematics_arr = np.transpose(ddkinematics_arr)

    return kinematics_arr, dkinematics_arr, ddkinematics_arr
Beispiel #3
0
def eos_spline(v, e, tol):
    '''
    Get volume, energy, pressure, and bulk modulus using spline, given
    v in \A^3 and e in eV.
    '''
    from scipy.interpolate import UnivariateSpline
    s = UnivariateSpline(v, e, k=3, s=tol)
    vh = np.linspace(v[0], v[-1], 10 * len(v) - 1)
    eh = [s.derivatives(i)[0] for i in vh]
    ph = [-s.derivatives(i)[1] * units.eVA_GPa for i in vh]
    bh = [s.derivatives(i)[2] * vh[i] * units.eVA_GPa for i in vh]
    return vh, eh, ph, bh
Beispiel #4
0
def calc_conductance_curve(V_list,T,R_T,C_sigma):
    #test_voltages = arange(-v_max,v_max,v_step)
    test_currents = []
    for V in V_list:
        test_currents.append(calc_current(V,T,R_T,C_sigma))
        #print "V: %g, current %g"%(V,test_currents[-1])

    ## calc conductances manually
    #test_conductances = []
    #for idx,V in enumerate (test_currents[1:-2]):
    #    if idx==0:
    #        print idx
    #    test_conductances.append((test_currents[idx+2]-test_currents[idx])/(2.0*v_step))
    #
    #test_voltages_G = test_voltages[1:-2]

    #
    # SPLINE
    #
    spline = UnivariateSpline(V_list,test_currents,s=0)
    #print "test_conductances"
    #indices = [x for x, y in enumerate(col1) if (y >0.7 or y<-0.7)]
    test_conductances = []
    for v_iter in V_list:
        test_conductances.append(spline.derivatives(v_iter)[1])
    return test_conductances
Beispiel #5
0
def extract_symmetry_energy_parameters(d_snm, e_snm, d_pnm, e_pnm, n0=None):
    #
    # input:
    #   d_snm, e_snm: densities and energies of symmetric nuclear matter
    #   d_pnm, e_pnm: densities and energies of pure neutron matter
    # output:
    #   n0: saturation density
    #   e0: saturation energy
    #    K: incompressibility
    #    S: symmetry energy
    #    L: slope parameter
    from scipy.interpolate import UnivariateSpline
    from scipy.optimize import minimize
    snm_spl = UnivariateSpline(d_snm, e_snm, s=0, k=4)
    if (n0 == None):
        res = minimize(snm_spl, (min(d_snm) + max(d_snm)) / 2,
                       bounds=((min(d_snm), max(d_snm)), ))
        n0 = res.x[0]
        e0 = res.fun[0]
    else:
        e0 = snm_spl(n0)
    e_sym = []
    for i in range(min(len(e_snm), len(e_pnm))):
        dn_snm = d_snm[i]
        if (dn_snm != d_pnm[i]): continue
        e_sym.append(e_pnm[i] - e_snm[i])
    sym_spl = UnivariateSpline(d_pnm, e_sym, s=0, k=4)
    S = sym_spl(n0)
    L = sym_spl.derivatives(n0)[1] * 3 * n0
    K = sym_spl.derivatives(n0)[2] * 9 * n0**2
    return n0, S, L, K
Beispiel #6
0
def my_curve_fit():
    path = [[5, 117], [12, 110], [12, 105], [13, 104], [15, 102], [20, 97], [22, 95], [22, 73], [24, 71], [29, 66], [30, 66], [31, 65], [32, 64], [37, 59], [41, 55], [41, 54], [42, 53], [44, 51], [44, 35], [45, 34], [50, 29], [57, 29], [58, 28], [63, 23], [70, 16], [84, 16], [85, 15]]

    points = np.array(path)
    x = points[:,0]
    x = [float(a) for a in x]
    b = set()
    for i in range(len(x)):
        if x[i] in b:
            x[i] = x[i] + 0.1
        else:
            b.add(x[i])
    y = points[:,1]
    plt.plot(x, y, "og")
    spl = UnivariateSpline(x, points[:,1], s=0)
    kn = spl.get_knots()
    lst = []
    for i in range(4):
        lst.append(1/jiecheng(i))
    for i in range(len(kn)-1):
        cf = lst * spl.derivatives(kn[i])
        print("For {0} <= x <= {1}, p(x) = {5}*(x-{0})^3 + {4}*(x-{0})^2 + {3}*(x-{0}) + {2}".format(kn[i], kn[i+1], *cf))
        dx = np.linspace(kn[i], kn[i+1], 10)
        dy = []
        for item in dx:
            dy.append(pow((item-kn[i]),3)*cf[3]+pow((item-kn[i]),2)*cf[2]+(item-kn[i])*cf[1]+cf[0])
        plt.plot(dx, dy, "-r")
Beispiel #7
0
    def curie_inflection(self, min_temp: float, max_temp: float)\
            -> Tuple[float, UnivariateSpline]:
        """Estimate Curie temperature by inflection point.
        
        Estimate Curie point by determining the inflection point of
        the curve segment starting at the Hopkinson peak. The curve
        segment must be specified.

        :param min_temp: start of curve segment
        :param max_temp: end of curve segment
        :return: (temp, spline) where
          temp is the estimated Curie temperature;
          spline is the scipy.interpolate.UnivariateSpline used to fit
            the data and determine the inflection point
        """

        # Fit a cubic spline to the data. Using the whole dataset gives
        # a better approximation at the endpoints of the selected range.
        spline = UnivariateSpline(self.data[0][0], self.data[0][1], s=.1)

        # Get the data points which lie within the selected range.
        temps, _ = MeasurementCycle.chop_data(self.data[0], min_temp, max_temp)

        # Evaluate the second derivative of the spline at each selected
        # temperature step.
        derivs = [spline.derivatives(t)[2] for t in temps]

        # Fit a new spline to the derivatives in order to calculate the
        # inflection point.
        spline2 = UnivariateSpline(temps, derivs, s=3)

        # The root of the 2nd-derivative spline gives the inflection point.
        return spline2.roots()[0], spline
Beispiel #8
0
def QuasiPWeight(ReSE_A):
	''' calculating the Fermi-liquid quasiparticle weight (residue) Z '''
	N = len(En_A)
	#M = int(1e-3/dE) if dE < 1e-3 else 1	# very fine grids lead to oscillations
	# replace 1 with M below to dilute the grid
	ReSE = UnivariateSpline(En_A[int(N/2-10):int(N/2+10):1],ReSE_A[int(N/2-10):int(N/2+10):1])
	dReSEdw = ReSE.derivatives(0.0)[1]
	Z = 1.0/(1.0-dReSEdw)
	return sp.array([Z,dReSEdw])
Beispiel #9
0
def QuasiPWeight(ReSE_A):
	''' calculating the Fermi-liquid quasiparticle weight (residue) Z '''
	N = len(En_A)
	#M = int(1e-3/dE) if dE < 1e-3 else 1	# very fine grids lead to oscillations
	# replace 1 with M below to dilute the grid
	ReSE = UnivariateSpline(En_A[int(N/2-10):int(N/2+10):1],ReSE_A[int(N/2-10):int(N/2+10):1])
	dReSEdw = ReSE.derivatives(0.0)[1]
	Z = 1.0/(1.0-dReSEdw)
	return sp.array([Z,dReSEdw])
Beispiel #10
0
def calc_conductance_curve_full(sigma,N,V_list,R_T,C_sigma,T_p,island_volume):
    """
    Calculates full conductance curve taking into account self-heating
    """
    test_currents = []
    for V in V_list:
        test_currents.append(calc_current_full(sigma,N,V,R_T,C_sigma,T_p,island_volume))
    # SPLINE
    spline = UnivariateSpline(V_list,test_currents,s=0)
    test_conductances = []
    for v_iter in V_list:
        test_conductances.append(spline.derivatives(v_iter)[1])
    return test_conductances
def read_cfd_data(kinematics_file, cfd_data_file):
    """read cfd results force coefficients data"""
    kinematics_arr = []
    with open(kinematics_file) as csv_file:
        csv_reader = csv.reader(csv_file, delimiter='(')
        line_count = 0

        for row in csv_reader:
            if line_count <= 1:
                line_count += 1
            elif row[0] == ')':
                line_count += 1
            else:
                t_datai = row[1]
                # print(row)
                rot_datai0 = row[4].split()[0]
                kinematics_arr.append([
                    float(t_datai),
                    float(rot_datai0) * np.pi / 180,
                ])
                line_count += 1

        print(f'Processed {line_count} lines in {kinematics_file}')

    kinematics_arr = np.array(kinematics_arr)
    spl = UnivariateSpline(kinematics_arr[:, 0], kinematics_arr[:, 1], s=0)

    cf_array = []
    with open(cfd_data_file) as csv_file:
        csv_reader = csv.reader(csv_file, delimiter='\t')
        line_count = 0

        for row in csv_reader:
            if line_count <= 14:
                line_count += 1
            else:
                ti = float(row[0])
                phii = spl(ti)
                dphii = -1.0 * spl.derivatives(ti)[1]
                cli = float(row[3])
                cdi = np.sign(dphii) * (np.sin(phii) * float(row[2]) +
                                        np.cos(phii) * float(row[1]))
                csi = np.cos(phii) * float(row[2]) - np.sin(phii) * float(
                    row[1])
                cf_array.append([ti, cli, cdi, csi])
                line_count += 1

        print(f'Processed {line_count} lines in {cfd_data_file}')

    cf_array = np.array(cf_array)
    return cf_array
Beispiel #12
0
def demo():
    x = np.arange(6)
    y = np.array([3, 1, 4, 1, 5, 9])
    spl = UnivariateSpline(x, y, s=0)
    kn = spl.get_knots()
    for i in range(len(kn)-1):
        cf = [1, 1, 1/2, 1/6] * spl.derivatives(kn[i])
        print("For {0} <= x <= {1}, p(x) = {5}*(x-{0})^3 + {4}*(x-{0})^2 + {3}*(x-{0}) + {2}".format(kn[i], kn[i+1], *cf))
        dx = np.linspace(kn[i], kn[i+1], 100)
        dy = []
        for item in dx:
            dy.append(pow((item-kn[i]),3)*cf[3]+pow((item-kn[i]),2)*cf[2]+(item-kn[i])*cf[1]+cf[0])
        plt.plot(dx, dy, "-r")
        plt.plot(x, y, "og")
        plt.plot(x, y, "-b")
def _find_peak(x, y, ye, k=3, s=None):
    spl = UnivariateSpline(x, y, ye ** -1, k=k, s=s)
    f = lambda k : -spl(k)
    fprime = np.vectorize(lambda k : - spl.derivatives(k)[1])
    xp_best = None
    yp_best = -np.inf
    bounds = [(x.min(), x.max())]
    for i in xrange(5):
        x0 = (x.ptp() * np.random.rand() + x.min(),)
        xp, nfeval, rc = fmin_tnc(f, x0, fprime=fprime, bounds=bounds,
                messages=tnc.MSG_NONE)
        xp = xp.item()
        yp = spl(xp)
        if yp >= yp_best:
            xp_best = xp
            yp_best = yp
    return xp_best, yp_best.item()
Beispiel #14
0
    def frequency_and_derivative(self,
                                 smth_order=None,
                                 fft_order=None,
                                 spline_derivative=None,
                                 verbose=0):

        if (smth_order or fft_order):

            if (verbose):

                print(
                    'Cannot assure proper functionality of both order smoothing and low pass filtering.'
                )

        self.deriv = np.zeros_like(self.pos)
        for i in range(1, len(self.pos)):
            self.deriv[i] = (self.pos[i] - self.pos[i - 1]) / (
                self.time[i] - self.time[i - 1])

        if (smth_order):
            smth_params = np.polyfit(self.time, self.deriv, smth_order)
            pos_func = np.poly1d(smth_params)
            self.deriv = pos_func(self.time)

        if (fft_order):
            self.deriv = self.deriv

        if (spline_derivative):

            # hard set as a cubic spline,
            #    number is a smoothing factor between knots, see scipy.UnivariateSpline
            #
            #    recommended: 7 for dt=0.002 spacing

            spl = UnivariateSpline(self.time,
                                   self.pos,
                                   k=3,
                                   s=spline_derivative)
            self.deriv = (spl.derivative())(self.time)

            self.dderiv = np.zeros_like(self.deriv)
            #
            # can also do a second deriv
            for indx, timeval in enumerate(self.time):

                self.dderiv[indx] = spl.derivatives(timeval)[2]
Beispiel #15
0
def _find_peak(x, y, ye, k=3, s=None):
    spl = UnivariateSpline(x, y, ye ** -1, k=k, s=s)
    f = lambda k : -spl(k)
    fprime = np.vectorize(lambda k : - spl.derivatives(k)[1])
    xp_best = None
    yp_best = -np.inf
    bounds = [(x.min(), x.max())]
    for i in xrange(5):
        x0 = (x.ptp() * np.random.rand() + x.min(),)
        xp, nfeval, rc = fmin_tnc(f, x0, fprime=fprime, bounds=bounds,
                messages=tnc.MSG_NONE)
        xp = xp.item()
        yp = spl(xp)
        if yp >= yp_best:
            xp_best = xp
            yp_best = yp
    return xp_best, yp_best.item()
Beispiel #16
0
 def add_Nsquared(self, rhokey="rho", depthkey="z", N2key="N2", s=0.2):
     """ Calculate the squared buoyancy frequency, based on in-situ density.
     Uses a smoothing spline to compute derivatives.
     
     rhokey::string              Data key to use for in-situ density
     depthkey::string            Data key to use for depth
     N2key::string               Data key to use for N^2
     s::float                    Spline smoothing factor (smaller values
                                 give a noisier result)
     """
     if rhokey not in self.fields:
         raise FieldError("add_Nsquared requires in-situ density")
     msk = self.nanmask((rhokey, depthkey))
     rho = self[rhokey][~msk]
     z = self[depthkey][~msk]
     rhospl = UnivariateSpline(z, rho, s=s)
     drhodz = np.asarray([-rhospl.derivatives(_z)[1] for _z in z])
     N2 = np.empty(len(self), dtype=np.float64)
     N2[msk] = np.nan
     N2[~msk] = -G / rho * drhodz
     return self._addkeydata(N2key, N2)
Beispiel #17
0
spl = UnivariateSpline(mass, luminosity, k=4, s=0)

print mass, luminosity
lum_deriv = luminosity
#lum_deriv[0] = spl.derivatives(mass[0])[1]


xs = np.linspace(0.5, 100., 1000)
#xs = np.logspace(0.5, 100., 1000)

xs_deriv = np.zeros(len(mass))
#xs_deriv = np.zeros(len(xs))
print len(xs)
for i in range(len(mass)):
#    print i, xs[i]
    xs_deriv[i] = spl.derivatives(mass[i])[1]
#    xs_deriv[i] = spl.derivatives(xs[i])[1]
    print mass[i], luminosity[i], xs_deriv[i], xs_deriv[i] * mass[i] / luminosity[i]

#log_lum = np.zeros(len(mass))
#for i in range(len(mass)):#(0,1,2):#range(len(mass)):
#    lum_deriv[i] = spl.derivatives(mass[i])[1]
#    log_lum[i] = lum_deriv[i] / (mass[i]*mass[i])
#    print i, mass[i], luminosity[i], lum_deriv[i], log_lum[i]
#
#print lum_orig
#luminosity = lum_orig
#print mass, luminosity, lum_deriv, log_lum
#print len(xs), len(xs_deriv)

pl.clf()
Beispiel #18
0
	def estimate(self, observedLC):
		"""!
		Estimate intrinsicFlux, period, eccentricity, omega, tau, & a2sini 
		"""
		## intrinsicFluxEst
		maxPeriodFactor = 10.0
		model = LombScargleFast().fit(observedLC.t, observedLC.y, observedLC.yerr)
		periods, power = model.periodogram_auto(nyquist_factor = observedLC.numCadences)
		model.optimizer.period_range = (2.0*np.mean(observedLC.t[1:] - observedLC.t[:-1]), maxPeriodFactor*observedLC.T)
		periodEst = model.best_period
		numIntrinsicFlux = 100
		lowestFlux = np.min(observedLC.y[np.where(observedLC.mask == 1.0)])
		highestFlux = np.max(observedLC.y[np.where(observedLC.mask == 1.0)])
		intrinsicFlux = np.linspace(np.min(observedLC.y[np.where(observedLC.mask == 1.0)]), np.max(observedLC.y[np.where(observedLC.mask == 1.0)]), num = numIntrinsicFlux)
		intrinsicFluxList = list()
		totalIntegralList = list()
		for f in xrange(1, numIntrinsicFlux - 1):
			beamedLC = observedLC.copy()
			beamedLC.x = np.require(np.zeros(beamedLC.numCadences), requirements=['F', 'A', 'W', 'O', 'E'])
			for i in xrange(beamedLC.numCadences):
				beamedLC.y[i] = observedLC.y[i]/intrinsicFlux[f]
				beamedLC.yerr[i] = observedLC.yerr[i]/intrinsicFlux[f]
			dopplerLC = beamedLC.copy()
			dopplerLC.x = np.require(np.zeros(dopplerLC.numCadences), requirements=['F', 'A', 'W', 'O', 'E'])
			for i in xrange(observedLC.numCadences):
				dopplerLC.y[i] = math.pow(beamedLC.y[i], 1.0/3.44)
				dopplerLC.yerr[i] = (1.0/3.44)*math.fabs(dopplerLC.y[i]*(beamedLC.yerr[i]/beamedLC.y[i]))
			dzdtLC = dopplerLC.copy()
			dzdtLC.x = np.require(np.zeros(dopplerLC.numCadences), requirements=['F', 'A', 'W', 'O', 'E'])
			for i in xrange(observedLC.numCadences):
				dzdtLC.y[i] = 1.0 - (1.0/dopplerLC.y[i])
				dzdtLC.yerr[i] = math.fabs((-1.0*dopplerLC.yerr[i])/math.pow(dopplerLC.y[i], 2.0))
			foldedLC = dzdtLC.fold(periodEst)
			foldedLC.x = np.require(np.zeros(foldedLC.numCadences), requirements=['F', 'A', 'W', 'O', 'E'])
			integralSpline = UnivariateSpline(foldedLC.t[np.where(foldedLC.mask == 1.0)], foldedLC.y[np.where(foldedLC.mask == 1.0)], 1.0/foldedLC.yerr[np.where(foldedLC.mask == 1.0)], k = 3, s = None, check_finite = True)
			totalIntegral = math.fabs(integralSpline.integral(foldedLC.t[0], foldedLC.t[-1]))
			intrinsicFluxList.append(intrinsicFlux[f])
			totalIntegralList.append(totalIntegral)
		intrinsicFluxEst = intrinsicFluxList[np.where(np.array(totalIntegralList) == np.min(np.array(totalIntegralList)))[0][0]]

		## periodEst
		for i in xrange(beamedLC.numCadences):
			beamedLC.y[i] = observedLC.y[i]/intrinsicFluxEst
			beamedLC.yerr[i] = observedLC.yerr[i]/intrinsicFluxEst
			dopplerLC.y[i] = math.pow(beamedLC.y[i], 1.0/3.44)
			dopplerLC.yerr[i] = (1.0/3.44)*math.fabs(dopplerLC.y[i]*(beamedLC.yerr[i]/beamedLC.y[i]))
			dzdtLC.y[i] = 1.0 - (1.0/dopplerLC.y[i])
			dzdtLC.yerr[i] = math.fabs((-1.0*dopplerLC.yerr[i])/math.pow(dopplerLC.y[i], 2.0))
		model = LombScargleFast().fit(dzdtLC.t, dzdtLC.y, dzdtLC.yerr)
		periods, power = model.periodogram_auto(nyquist_factor = dzdtLC.numCadences)
		model.optimizer.period_range = (2.0*np.mean(dzdtLC.t[1:] - dzdtLC.t[:-1]), maxPeriodFactor*dzdtLC.T)
		periodEst = model.best_period

		## eccentricityEst & omega2Est
		# First find a full period going from rising to falling. 
		risingSpline = UnivariateSpline(dzdtLC.t[np.where(dzdtLC.mask == 1.0)], dzdtLC.y[np.where(dzdtLC.mask == 1.0)], 1.0/dzdtLC.yerr[np.where(dzdtLC.mask == 1.0)], k = 3, s = None, check_finite = True)
		risingSplineRoots = risingSpline.roots()
		firstRoot = risingSplineRoots[0]
		if risingSpline.derivatives(risingSplineRoots[0])[1] > 0.0:
			tRising = risingSplineRoots[0]
		else:
			tRising = risingSplineRoots[1]
		# Now fold the LC starting at tRising and going for a full period.
		foldedLC = dzdtLC.fold(periodEst, tStart = tRising)
		foldedLC.x = np.require(np.zeros(foldedLC.numCadences), requirements=['F', 'A', 'W', 'O', 'E'])
		# Fit the folded LC with a spline to figure out alpha and beta
		fitLC = foldedLC.copy()
		foldedSpline = UnivariateSpline(foldedLC.t[np.where(foldedLC.mask == 1.0)], foldedLC.y[np.where(foldedLC.mask == 1.0)], 1.0/foldedLC.yerr[np.where(foldedLC.mask == 1.0)], k = 3, s = 2*foldedLC.numCadences, check_finite = True)
		for i in xrange(fitLC.numCadences):
			fitLC.x[i] = foldedSpline(fitLC.t[i])
		# Now get the roots and find the falling root
		tZeros = foldedSpline.roots()
		if tZeros.shape[0] == 1: # We have found just tFalling
			tFalling = tZeros[0]
			tRising = fitLC.t[0]
			startIndex = 0
			tFull = fitLC.t[-1]
			stopIndex = fitLC.numCadences
		elif tZeros.shape[0] == 2: # We have found tFalling and one of tRising or tFull
			if foldedSpline.derivatives(tZeros[0])[1] < 0.0:
				tFalling = tZeros[0]
				tFull = tZeros[1]
				stopIndex = np.where(fitLC.t < tFull)[0][-1]
				tRising = fitLC.t[0]
				startIndex = 0
			elif foldedSpline.derivatives(tZeros[0])[1] > 0.0:
				if foldedSpline.derivatives(tZeros[1])[1] < 0.0:
					tRising = tZeros[0]
					startIndex = np.where(fitLC.t > tRising)[0][0]
					tFalling = tZeros[1]
					tFull = fitLC.t[-1]
					stopIndex = fitLC.numCadences
				else:
					raise RuntimeError('Could not determine alpha & omega correctly because the first root is rising but the second root is not falling!')
		elif tZeros.shape[0] == 3:
			tRising = tZeros[0]
			startIndex = np.where(fitLC.t > tRising)[0][0]
			tFalling = tZeros[1]
			tFull = tZeros[2]
			stopIndex = np.where(fitLC.t < tFull)[0][-1]
		else:
			raise RuntimeError('Could not determine alpha & omega correctly because tZeros has %d roots!'%(tZeros.shape[0]))
		# One full period now goes from tRising to periodEst. The maxima occurs between tRising and tFalling while the minima occurs between tFalling and tRising + periodEst  
		# Find the minima and maxima
		alpha = math.fabs(fitLC.x[np.where(np.max(fitLC.x[startIndex:stopIndex]) == fitLC.x)[0][0]])
		beta = math.fabs(fitLC.x[np.where(np.min(fitLC.x[startIndex:stopIndex]) == fitLC.x)[0][0]])
		peakLoc = fitLC.t[np.where(np.max(fitLC.x[startIndex:stopIndex]) == fitLC.x)[0][0]]
		troughLoc = fitLC.t[np.where(np.min(fitLC.x[startIndex:stopIndex]) == fitLC.x)[0][0]]
		KEst = 0.5*(alpha + beta)
		delta2 = (math.fabs(foldedSpline.integral(tRising, peakLoc)) + math.fabs(foldedSpline.integral(troughLoc, tFull)))/2.0
		delta1 = (math.fabs(foldedSpline.integral(peakLoc, tFalling)) + math.fabs(foldedSpline.integral(tFalling, troughLoc)))/2.0
		eCosOmega2 = (alpha - beta)/(alpha + beta)
		eSinOmega2 = ((2.0*math.sqrt(alpha*beta))/(alpha + beta))*((delta2 - delta1)/(delta2 + delta1))
		eccentricityEst = math.sqrt(math.pow(eCosOmega2, 2.0) + math.pow(eSinOmega2, 2.0))
		tanOmega2 = math.fabs(eSinOmega2/eCosOmega2)
		if (eCosOmega2/math.fabs(eCosOmega2) == 1.0) and (eSinOmega2/math.fabs(eSinOmega2) == 1.0):
			omega2Est = math.atan(tanOmega2)*(180.0/math.pi)
		if (eCosOmega2/math.fabs(eCosOmega2) == -1.0) and (eSinOmega2/math.fabs(eSinOmega2) == 1.0):
			omega2Est = 180.0 - math.atan(tanOmega2)*(180.0/math.pi)
		if (eCosOmega2/math.fabs(eCosOmega2) == -1.0) and (eSinOmega2/math.fabs(eSinOmega2) == -1.0):
			omega2Est = 180.0 + math.atan(tanOmega2)*(180.0/math.pi)
		if (eCosOmega2/math.fabs(eCosOmega2) == 1.0) and (eSinOmega2/math.fabs(eSinOmega2) == -1.0):
			omega2Est = 360.0 - math.atan(tanOmega2)*(180.0/math.pi)
		omega1Est = omega2Est - 180.0

		## tauEst
		zDot = KEst*(1.0 + eccentricityEst)*(eCosOmega2/eccentricityEst)
		zDotLC = dzdtLC.copy()
		for i in xrange(zDotLC.numCadences):
			zDotLC.y[i] = zDotLC.y[i] - zDot
		zDotSpline = UnivariateSpline(zDotLC.t[np.where(zDotLC.mask == 1.0)], zDotLC.y[np.where(zDotLC.mask == 1.0)], 1.0/zDotLC.yerr[np.where(zDotLC.mask == 1.0)], k = 3, s = 2*zDotLC.numCadences, check_finite = True)
		for i in xrange(zDotLC.numCadences):
			zDotLC.x[i] = zDotSpline(zDotLC.t[i])
		zDotZeros = zDotSpline.roots()
		zDotFoldedLC = dzdtLC.fold(periodEst)
		zDotFoldedSpline = UnivariateSpline(zDotFoldedLC.t[np.where(zDotFoldedLC.mask == 1.0)], zDotFoldedLC.y[np.where(zDotFoldedLC.mask == 1.0)], 1.0/zDotFoldedLC.yerr[np.where(zDotFoldedLC.mask == 1.0)], k = 3, s = 2*zDotFoldedLC.numCadences, check_finite = True)
		for i in xrange(zDotFoldedLC.numCadences):
			zDotFoldedLC.x[i] = zDotFoldedSpline(zDotFoldedLC.t[i])
		tC = zDotFoldedLC.t[np.where(np.max(zDotFoldedLC.x) == zDotFoldedLC.x)[0][0]]
		nuC = (360.0 - omega2Est)%360.0
		tE = zDotFoldedLC.t[np.where(np.min(zDotFoldedLC.x) == zDotFoldedLC.x)[0][0]]
		nuE = (180.0 - omega2Est)%360.0
		if math.fabs(360.0 - nuC) < math.fabs(360 - nuE):
			tauEst = zDotZeros[np.where(zDotZeros > tC)[0][0]]
		else:
			tauEst = zDotZeros[np.where(zDotZeros > tE)[0][0]]

		## a2sinInclinationEst
		a2sinInclinationEst = ((KEst*periodEst*self.Day*self.c*math.sqrt(1.0 - math.pow(eccentricityEst, 2.0)))/self.twoPi)/self.Parsec

		return intrinsicFluxEst, periodEst, eccentricityEst, omega1Est, tauEst, a2sinInclinationEst
Beispiel #19
0
class ContNormSpline:
    def __init__(self,
                 lmbd,
                 flux,
                 ivar,
                 name=None,
                 spln_degr=1,
                 spln_smth=100,
                 num_lines=9,
                 wndw_init=100,
                 wndw_step=0,
                 crop_rand=0,
                 mean_wdth=0,
                 crop_strt=8,
                 crop_step=25,
                 spl_2=False):
        '''
		Initializes all cropping and spline properties.

		name	  - the name of this spline for the generated plots.
		spln_degr - the degree of interpolation spline polynomials. spln_degr must be between [1, 5]
		spln_smth - the smoothing factor for the interpolation spline.
		num_lines - the last Balmer line to directly crop, all data points bluewards of this line
					are ignored. num_lines must be an int >= 3.
		crop_wdth - the number of indices to crop out on both sides of each Balmer line (note that
					2*crop_wdth+1 indices will be ignored). If `crop_wdth` is an array, the crop
					width for each line is set individually.
		crop_rand - the width of the random window for the center of each cropped index.
		mean_wdth - the number of indices to collect on both sides of each selected index to take
					the mean of (note that 2*mean_wdth+1 indices will be used). mean_wdth is an int.
		crop_strt - the index in the wavelength array to start sampling at. All indices less than
					start are ignored.
		crop_step - the step size for the sampling of indices. only one index every step_size indies
					will be included in the output arrays.
		spl_2	  - whether a second spline should be run on the initially constructed spline.
		'''
        self.lmbd = lmbd
        self.flux = flux
        self.ivar = ivar
        self.__xlen = len(lmbd)
        self.name = self.__get_name(name)
        self.spln_degr = spln_degr
        self.spln_smth = spln_smth
        self.line_idxs = np.searchsorted(self.lmbd,
                                         self.__get_balmer(num_lines))
        self.crop_wdth = self.__crop_wdth(num_lines, wndw_init, wndw_step)
        self.crop_rand = crop_rand
        self.mean_wdth = mean_wdth
        self.crop_strt = crop_strt
        self.crop_step = crop_step
        self.spl_2 = spl_2

    def __get_balmer(self, max_line):
        # generate Balmer lines
        balmer = (np.arange(3, max_line + 1))**2
        balmer = 3645.07 * (balmer / (balmer - 4))
        return balmer

    def __get_name(self, name):
        # name for titles
        if name is None:
            return ''
        else:
            return ' - ' + name

    def __crop_wdth(self, num_lines, wndw_init, wndw_step):
        # convert Balmer line crop window into required format
        if wndw_step != 0:
            return np.arange(wndw_init, wndw_init + num_lines * wndw_step,
                             wndw_step)
        else:
            return np.full((num_lines, ), wndw_init)

    def crop(self):
        '''
		Given the wavelength, flux, and inverse variance arrays, crops out the Balmer lines and
		samples a certain number of random points to prepare the arrays for spline fitting.
		'''
        mask = np.zeros(self.__xlen, dtype=bool)

        # crop random
        unif = np.arange(self.crop_strt, self.__xlen, self.crop_step)
        rand = np.random.randint(-self.crop_rand,
                                 self.crop_rand + 1,
                                 size=len(unif))
        idxs = unif + rand
        if idxs[-1] >= self.__xlen:
            idxs[-1] = self.__xlen - 1
        mask[idxs] = True

        # crop lines
        mask[np.argwhere(self.ivar < 0.1)] = False
        mask[:self.line_idxs[-1]] = False
        for cdx, wdth in zip(self.line_idxs, self.crop_wdth):
            lo = cdx - wdth
            hi = cdx + wdth
            mask[lo:hi] = False

        # crop mean
        keep_idxs = np.concatenate(np.argwhere(mask))
        self.lmbd_crop = np.zeros(keep_idxs.shape)
        self.flux_crop = np.zeros(keep_idxs.shape)
        self.ivar_crop = np.zeros(keep_idxs.shape)
        for i in range(len(keep_idxs)):
            keep = keep_idxs[i]
            self.lmbd_crop[i] = np.mean(self.lmbd[keep - self.mean_wdth:keep +
                                                  self.mean_wdth + 1])
            self.flux_crop[i] = np.mean(self.flux[keep - self.mean_wdth:keep +
                                                  self.mean_wdth + 1])
            self.ivar_crop[i] = np.sqrt(
                np.sum(self.ivar[keep - self.mean_wdth:keep + self.mean_wdth +
                                 1]**2))

    def plot_crop(self, legend=True, ylim=(0, 140)):
        plt.title('Cropped Regions' + self.name)
        plt.errorbar(self.lmbd,
                     self.flux,
                     yerr=1 / self.ivar,
                     label='raw data')
        plt.errorbar(self.lmbd_crop,
                     self.flux_crop,
                     yerr=1 / self.ivar_crop,
                     label='cropped')
        plt.ylim(ylim)
        if legend:
            plt.legend()
        plt.show()

    def construct_spline(self):
        self.spline = USpline(self.lmbd_crop,
                              self.flux_crop,
                              w=self.ivar_crop,
                              k=self.spln_degr,
                              s=self.spln_smth)
        self.blue_most = self.lmbd_crop[0]

        # get blue extrapolation
        frst_derv = self.spline.derivative()
        drvs = frst_derv(lmbd)
        x_4k, x_5k, x_6k, x_8k, x_9k, x_Tk = np.searchsorted(
            lmbd, (4000, 5000, 6000, 8000, 9000, 10000))

        dr4k = np.mean(drvs[x_4k:x_5k])  # derivative centered on 4500
        dr5k = np.mean(drvs[x_5k:x_6k])  # derivative centered on 5500
        lb4k = np.mean(lmbd[x_4k:x_5k])  # exact lambda, roughly 4500
        lb5k = np.mean(lmbd[x_5k:x_6k])  # exact lambda, roughly 5500
        scnd_derv = (dr4k - dr5k) / (
            lb4k - lb5k)  # get second derivative between these two points

        dist = (self.blue_most -
                self.lmbd[0]) / 2  # distance to middle of extrapolated section
        b_fl, b_sl = self.spline.derivatives(
            self.blue_most)  # get flux, slope at blue-most kept point
        slop = b_sl - scnd_derv * dist
        intr = b_fl - slop * self.blue_most
        self.blue_slop = slop
        self.blue_intr = intr

        if self.spl_2:
            self.compute_cont_norm()
            self.spline = USpline(self.lmbd,
                                  self.cont,
                                  w=self.ivar,
                                  k=self.spln_degr,
                                  s=self.spln_smth)
            self.blue_most = 0
            self.cont = None
            self.norm = None

    def eval(self, x):
        return np.where(x < self.blue_most,
                        self.blue_intr + self.blue_slop * x, self.spline(x))

    def compute_cont_norm(self):
        self.cont = self.eval(self.lmbd)
        self.norm = self.flux / self.cont
        self.__clip_norm()

    def __clip_norm(self, b_lim=0, u_lim=2):
        # clip values beyond b_lim or u_lim to exactly 1
        self.norm[np.argwhere(self.norm > u_lim)] = 1
        self.norm[np.argwhere(self.norm < b_lim)] = 1

    def plot_cont(self, legend=True, ylim=(0, 140)):
        plt.title('Continuum' + self.name)
        plt.plot(self.lmbd, self.flux, label='data')
        plt.plot(self.lmbd, self.cont, label='continuum')
        plt.ylim(ylim)
        if legend:
            plt.legend()
        plt.show()

    def plot_norm(self, legend=True, ylim=(0, 2)):
        plt.title('Normalization' + self.name)
        plt.plot(self.lmbd, self.norm, label='normalized')
        plt.plot(self.lmbd, [1] * self.__xlen, 'k', label='1')
        plt.ylim(ylim)
        if legend:
            plt.legend()
        plt.show()

    def run_all(self, plot=True):
        # runs all of the requisite functions to compute the continuum and normalized fluxes
        self.crop()
        self.construct_spline()
        self.compute_cont_norm()

        if plot:
            self.plot_crop()
            self.plot_cont()
            self.plot_norm()
Beispiel #20
0
        ####
        ydelta = Results_a[stage,1]
        threslist = Results_a[stage,0]
        ####
    
        #Intermittency profile

        Dv = np.empty((len(threslist),9),dtype=np.double)
    
        epsilon = 2**np.linspace(0,8,9)
        for ithres,thres in enumerate(threslist):
            sp = UnivariateSpline(np.log10(epsilon),
                                  np.log10(box_counting_vol[ithres,:9]),
                                  k=5)
            for j in range(len(epsilon)):
                Dv[ithres,j] = sp.derivatives(np.log10(epsilon[j]))[1]
    
        Ds = np.empty((len(threslist),9),dtype=np.double)
        epsilon = 2**np.linspace(0,8,9)
        for ithres,thres in enumerate(threslist):
            sp = UnivariateSpline(np.log10(epsilon),
                                  np.log10(box_counting_sur[ithres,:9]),
                                  k=5)
            for j in range(len(epsilon)):
                Ds[ithres,j] = sp.derivatives(np.log10(epsilon[j]))[1]
    
        gamma = np.array([r[0] for r in Results_a[stage,2:]])
        genusv = np.array([r[1] for r in Results_a[stage,2:]])
        genuss = np.array([r[3] for r in Results_a[stage,2:]])
        
        if stage==1:
Beispiel #21
0
    def get_Paz(self, az_data, R_data, jns):
        """
        Computes probability of line of sight acceleration at projected R : P(az|R)
        """

        # Under construction !!!

        # Return P(az|R)

        az_data = abs(az_data) # Consider only positive values

        # Assumes units of az [m/s^2] if self.G ==0.004302, else models units
        # Conversion factor from [pc (km/s)^2/Msun] -> [m/s^2]
        az_fac = 1./3.0857e10 if (self.G==0.004302) else 1

        if (R_data < self.rt):
            nz = self.nstep                   # Number of z value equal to number of r values
            zt = sqrt(self.rt**2 - R_data**2) # maximum z value at R

            z = numpy.logspace(log10(self.r[1]), log10(zt), nz)

            spl_Mr = UnivariateSpline(self.r, self.mc, s=0, ext=1)  # Spline for enclosed mass

            r = sqrt(R_data**2 + z**2)                        # Local r array
            az = self.G*spl_Mr(r)*z/r**3                     # Acceleration along los
            az[-1] = self.G*spl_Mr(self.rt)*zt/self.rt**3    # Ensure non-zero final data point

            az *= az_fac # convert to [m/s^2]
            az_spl = UnivariateSpline(z, az, k=4, s=0, ext=1) # 4th order needed to find max (can be done easier?)

            zmax = az_spl.derivative().roots()  # z where az = max(az), can be done without 4th order spline?
            azt = az[-1]                        # acceleration at the max(z) = sqrt(r_t**2 - R**2)

            # Setup spline for rho(z)
            if jns == 0 and self.nmbin == 1:
                rho = self.rho
            else:
                rho = self.rhoj[jns]

            rho_spl = UnivariateSpline(self.r, rho, ext=1, s=0)
            rhoz = rho_spl(sqrt(z**2 + R_data**2))
            rhoz_spl = UnivariateSpline(z, rhoz, ext=1, s=0)

            # Now compute P(a_z|R)
            # There are 2 possibilities depending on R:
            #  (1) the maximum acceleration occurs within the cluster boundary, or
            #  (2) max(a_z) = a_z,t (this happens when R ~ r_t)

            nr, k = nz, 3 # bit of experimenting

            # Option (1): zmax < max(z)
            if len(zmax)>0:
                zmax = zmax[0] # Take first entry for the rare cases with multiple peaks
                # Set up 2 splines for the inverse z(a_z) for z < zmax and z > zmax
                z1 = numpy.linspace(z[0], zmax, nr)
                z2 = (numpy.linspace(zmax, z[-1], nr))[::-1] # Reverse z for ascending az

                z1_spl = UnivariateSpline(az_spl(z1), z1, k=k, s=0, ext=1)
                z2_spl = UnivariateSpline(az_spl(z2), z2, k=k, s=0, ext=1)

            # Option 2: zmax = max(z)
            else:
                zmax = z[-1]
                z1 = numpy.linspace(z[0], zmax, nr)
                z1_spl = UnivariateSpline(az_spl(z1), z1, k=k, s=0, ext=1)

            # Maximum acceleration along this los
            azmax = az_spl(zmax)

            # Now determine P(az_data|R)
            if (az_data < azmax):
                z1 = max([z1_spl(az_data), z[0]]) # first radius where az = az_data
                Paz = rhoz_spl(z1)/abs(az_spl.derivatives(z1)[1])

                if (az_data> azt):
                    # Find z where a_z = a_z,t
                    z2 = z2_spl(az_data)
                    Paz += rhoz_spl(z2)/abs(az_spl.derivatives(z2)[1])

                # Normalize to 1
                Paz /= rhoz_spl.integral(0, zt)
                self.z = z
                self.az = az
                self.Paz = Paz
                self.azmax = azmax
                self.zmax = zmax
            else:
                self.Paz = 0
        else:
            self.Paz = 0

        return
def read_cfd_data(cfd_data_file, u2, karr, dkarr):
    """read cfd results force coefficients data"""
    phi_spl = UnivariateSpline(karr[:, 0], karr[:, 4] * np.pi / 180, s=0)

    cf_array = []
    with open(cfd_data_file) as csv_file:
        csv_reader = csv.reader(csv_file, delimiter='\t')
        line_count = 0

        for row in csv_reader:
            if line_count <= 14:
                line_count += 1
            else:
                ti = float(row[0])
                phii = phi_spl(ti)
                dphii = -1.0 * phi_spl.derivatives(ti)[1]
                cli = float(row[3])
                cdi = np.sign(dphii) * (np.sin(phii) * float(row[2]) +
                                        np.cos(phii) * float(row[1]))
                csi = np.cos(phii) * float(row[2]) - np.sin(phii) * float(
                    row[1])
                cf_array.append([
                    ti, cdi, csi, cli,
                    float(row[4]),
                    float(row[5]),
                    float(row[6])
                ])
                line_count += 1

        print(f'Processed {line_count} lines in {cfd_data_file}')

    cf_array = np.array(cf_array)
    cl_spl = UnivariateSpline(cf_array[:, 0], cf_array[:, 3], s=0)
    cd_spl = UnivariateSpline(cf_array[:, 0], cf_array[:, 1], s=0)
    cmx_spl = UnivariateSpline(cf_array[:, 0], cf_array[:, 6], s=0)
    cmy_spl = UnivariateSpline(cf_array[:, 0], -cf_array[:, 5], s=0)
    cmz_spl = UnivariateSpline(cf_array[:, 0], cf_array[:, 4], s=0)

    omegax_spl = UnivariateSpline(dkarr[:, 0], dkarr[:, 4] * np.pi / 180, s=0)
    omegay_spl = UnivariateSpline(
        dkarr[:, 0],
        np.multiply(dkarr[:, 5], np.cos(karr[:, 4] * np.pi / 180)) * np.pi /
        180,
        s=0)
    omegaz_spl = UnivariateSpline(
        dkarr[:, 0],
        np.multiply(dkarr[:, 5], np.sin(karr[:, 4] * np.pi / 180)) * np.pi /
        180,
        s=0)

    t_arr = cf_array[:, 0]
    cpx_arr = [-1 * cmx_spl(t) * omegax_spl(t) / u2 for t in t_arr]
    cpy_arr = [-1 * cmy_spl(t) * omegay_spl(t) / u2 for t in t_arr]
    cpz_arr = [-1 * cmz_spl(t) * omegaz_spl(t) / u2 for t in t_arr]

    cpx_spl = UnivariateSpline(t_arr, cpx_arr, s=0)
    cpy_spl = UnivariateSpline(t_arr, cpy_arr, s=0)
    cpz_spl = UnivariateSpline(t_arr, cpz_arr, s=0)

    #--------plot axes-------
    # plt.plot(t_arr, cpx_spl(t_arr), t_arr, cpy_spl(t_arr) + cpz_spl(t_arr))
    # plt.plot(t_arr, cpx_spl(t_arr), t_arr, cpy_spl(t_arr), t_arr,
    # cpz_spl(t_arr))
    # plt.legend(['x', 'y', 'z'], loc='best')
    # plt.xlim([4.0, 5.0])
    # plt.show()
    #------------------------
    mcl = cl_spl.integral(4.0, 5.0)
    mcd = cd_spl.integral(4.0, 5.0)
    mcp = cpx_spl.integral(4.0, 5.0) + cpy_spl.integral(
        4.0, 5.0) + cpz_spl.integral(4.0, 5.0)

    mcf_array = [mcl, mcd, mcp]

    return mcf_array
Beispiel #23
0
mSI = []

for x in range(len(m2Raw)):
    r = m2Raw.iloc[x, :]
    rh = r.values.reshape(1, -1)
    regressor = LinearRegression()
    # plt.plot(xH, r)
    # plt.title(x)
    # plt.show()
    regressor.fit(xH, r)
    slo = regressor.coef_[0]
    intr = regressor.intercept_
    slInt = [slo, intr]
    s = UnivariateSpline(xBase, r, s=sf)
    sm = s(xBase)
    sd = s.derivatives(1)
    mSI.append(slInt)
    mDer.append(sd)
    m2Smo.append(sm)

plt.plot(xBase, r)
#regular spline - Model 2
smozip = zip(m2Smo)
df2 = pd.DataFrame(smozip)
df22 = pd.DataFrame(df2[0].values.tolist())
#Derivative spline
derzip = zip(mDer)
dfd = pd.DataFrame(derzip)
dfd2 = pd.DataFrame(dfd[0].values.tolist())
#Slope-Int Model
slzip = zip(mSI)
Beispiel #24
0
    def estimate(self, observedLC):
        """!
        Estimate intrinsicFlux, period, eccentricity, omega, tau, & a2sini
        """
        # fluxEst
        if observedLC.numCadences > 50:
            model = gatspy.periodic.LombScargleFast(optimizer_kwds={"quiet": True}).fit(observedLC.t,
                                                                                        observedLC.y,
                                                                                        observedLC.yerr)
        else:
            model = gatspy.periodic.LombScargle(optimizer_kwds={"quiet": True}).fit(observedLC.t,
                                                                                    observedLC.y,
                                                                                    observedLC.yerr)
        periods, power = model.periodogram_auto(nyquist_factor=observedLC.numCadences)
        model.optimizer.period_range = (
            2.0*np.mean(observedLC.t[1:] - observedLC.t[:-1]), observedLC.T)
        periodEst = model.best_period
        numIntrinsicFlux = 100
        lowestFlux = np.min(observedLC.y[np.where(observedLC.mask == 1.0)])
        highestFlux = np.max(observedLC.y[np.where(observedLC.mask == 1.0)])
        intrinsicFlux = np.linspace(np.min(observedLC.y[np.where(observedLC.mask == 1.0)]), np.max(
            observedLC.y[np.where(observedLC.mask == 1.0)]), num=numIntrinsicFlux)
        intrinsicFluxList = list()
        totalIntegralList = list()
        for f in xrange(1, numIntrinsicFlux - 1):
            beamedLC = observedLC.copy()
            beamedLC.x = np.require(np.zeros(beamedLC.numCadences), requirements=['F', 'A', 'W', 'O', 'E'])
            for i in xrange(beamedLC.numCadences):
                beamedLC.y[i] = observedLC.y[i]/intrinsicFlux[f]
                beamedLC.yerr[i] = observedLC.yerr[i]/intrinsicFlux[f]
            dopplerLC = beamedLC.copy()
            dopplerLC.x = np.require(np.zeros(dopplerLC.numCadences), requirements=['F', 'A', 'W', 'O', 'E'])
            for i in xrange(observedLC.numCadences):
                dopplerLC.y[i] = math.pow(beamedLC.y[i], 1.0/3.44)
                dopplerLC.yerr[i] = (1.0/3.44)*math.fabs(dopplerLC.y[i]*(beamedLC.yerr[i]/beamedLC.y[i]))
            dzdtLC = dopplerLC.copy()
            dzdtLC.x = np.require(np.zeros(dopplerLC.numCadences), requirements=['F', 'A', 'W', 'O', 'E'])
            for i in xrange(observedLC.numCadences):
                dzdtLC.y[i] = 1.0 - (1.0/dopplerLC.y[i])
                dzdtLC.yerr[i] = math.fabs((-1.0*dopplerLC.yerr[i])/math.pow(dopplerLC.y[i], 2.0))
            foldedLC = dzdtLC.fold(periodEst)
            foldedLC.x = np.require(np.zeros(foldedLC.numCadences), requirements=['F', 'A', 'W', 'O', 'E'])
            integralSpline = UnivariateSpline(
                foldedLC.t[np.where(foldedLC.mask == 1.0)], foldedLC.y[np.where(foldedLC.mask == 1.0)],
                1.0/foldedLC.yerr[np.where(foldedLC.mask == 1.0)], k=3, s=None, check_finite=True)
            totalIntegral = math.fabs(integralSpline.integral(foldedLC.t[0], foldedLC.t[-1]))
            intrinsicFluxList.append(intrinsicFlux[f])
            totalIntegralList.append(totalIntegral)
        fluxEst = intrinsicFluxList[
            np.where(np.array(totalIntegralList) == np.min(np.array(totalIntegralList)))[0][0]]

        # periodEst
        for i in xrange(beamedLC.numCadences):
            beamedLC.y[i] = observedLC.y[i]/fluxEst
            beamedLC.yerr[i] = observedLC.yerr[i]/fluxEst
            dopplerLC.y[i] = math.pow(beamedLC.y[i], 1.0/3.44)
            dopplerLC.yerr[i] = (1.0/3.44)*math.fabs(dopplerLC.y[i]*(beamedLC.yerr[i]/beamedLC.y[i]))
            dzdtLC.y[i] = 1.0 - (1.0/dopplerLC.y[i])
            dzdtLC.yerr[i] = math.fabs((-1.0*dopplerLC.yerr[i])/math.pow(dopplerLC.y[i], 2.0))
            if observedLC.numCadences > 50:
                model = gatspy.periodic.LombScargleFast(optimizer_kwds={"quiet": True}).fit(dzdtLC.t,
                                                                                            dzdtLC.y,
                                                                                            dzdtLC.yerr)
            else:
                model = gatspy.periodic.LombScargle(optimizer_kwds={"quiet": True}).fit(dzdtLC.t,
                                                                                        dzdtLC.y,
                                                                                        dzdtLC.yerr)
        periods, power = model.periodogram_auto(nyquist_factor=dzdtLC.numCadences)
        model.optimizer.period_range = (2.0*np.mean(dzdtLC.t[1:] - dzdtLC.t[:-1]), dzdtLC.T)
        periodEst = model.best_period

        # eccentricityEst & omega2Est
        # First find a full period going from rising to falling.
        risingSpline = UnivariateSpline(
            dzdtLC.t[np.where(dzdtLC.mask == 1.0)], dzdtLC.y[np.where(dzdtLC.mask == 1.0)],
            1.0/dzdtLC.yerr[np.where(dzdtLC.mask == 1.0)], k=3, s=None, check_finite=True)
        risingSplineRoots = risingSpline.roots()
        firstRoot = risingSplineRoots[0]
        if risingSpline.derivatives(risingSplineRoots[0])[1] > 0.0:
            tRising = risingSplineRoots[0]
        else:
            tRising = risingSplineRoots[1]
        # Now fold the LC starting at tRising and going for a full period.
        foldedLC = dzdtLC.fold(periodEst, tStart=tRising)
        foldedLC.x = np.require(np.zeros(foldedLC.numCadences), requirements=['F', 'A', 'W', 'O', 'E'])
        # Fit the folded LC with a spline to figure out alpha and beta
        fitLC = foldedLC.copy()
        foldedSpline = UnivariateSpline(
            foldedLC.t[np.where(foldedLC.mask == 1.0)], foldedLC.y[np.where(foldedLC.mask == 1.0)],
            1.0/foldedLC.yerr[np.where(foldedLC.mask == 1.0)], k=3, s=2*foldedLC.numCadences,
            check_finite=True)
        for i in xrange(fitLC.numCadences):
            fitLC.x[i] = foldedSpline(fitLC.t[i])
        # Now get the roots and find the falling root
        tZeros = foldedSpline.roots()

        # Find tRising, tFalling, tFull, startIndex, & stopIndex via DBSCAN #######################
        # Find the number of clusters
        '''dbsObj = DBSCAN(eps = periodEst/10.0, min_samples = 1)
        db = dbsObj.fit(tZeros.reshape(-1,1))
        core_samples_mask = np.zeros_like(db.labels_, dtype=bool)
        core_samples_mask[db.core_sample_indices_] = True
        labels = db.labels_
        unique_labels = set(labels)
        n_clusters_ = len(set(labels)) - (1 if -1 in labels else 0)'''

        # Find tRising, tFalling, tFull, startIndex, & stopIndex
        if tZeros.shape[0] == 1:  # We have found just tFalling
            tFalling = tZeros[0]
            tRising = fitLC.t[0]
            startIndex = 0
            tFull = fitLC.t[-1]
            stopIndex = fitLC.numCadences
        elif tZeros.shape[0] == 2:  # We have found tFalling and one of tRising or tFull
            if foldedSpline.derivatives(tZeros[0])[1] < 0.0:
                tFalling = tZeros[0]
                tFull = tZeros[1]
                stopIndex = np.where(fitLC.t < tFull)[0][-1]
                tRising = fitLC.t[0]
                startIndex = 0
            elif foldedSpline.derivatives(tZeros[0])[1] > 0.0:
                if foldedSpline.derivatives(tZeros[1])[1] < 0.0:
                    tRising = tZeros[0]
                    startIndex = np.where(fitLC.t > tRising)[0][0]
                    tFalling = tZeros[1]
                    tFull = fitLC.t[-1]
                    stopIndex = fitLC.numCadences
                else:
                    raise RuntimeError(
                        'Could not determine alpha & omega correctly because the first root is rising but \
                        the second root is not falling!')
        elif tZeros.shape[0] == 3:
            tRising = tZeros[0]
            startIndex = np.where(fitLC.t > tRising)[0][0]
            tFalling = tZeros[1]
            tFull = tZeros[2]
            stopIndex = np.where(fitLC.t < tFull)[0][-1]
        else:
            # More than 3 roots!!! Use K-Means to cluster the roots assuming we have 3 groups
            root_groups = KMeans(n_clusters=3).fit_predict(tZeros.reshape(-1, 1))
            RisingGroupNumber = root_groups[0]
            FullGroupNumber = root_groups[-1]
            RisingSet = set(root_groups[np.where(root_groups != RisingGroupNumber)[0]])
            FullSet = set(root_groups[np.where(root_groups != FullGroupNumber)[0]])
            FallingSet = RisingSet.intersection(FullSet)
            FallingGroupNumber = FallingSet.pop()
            numRisingRoots = np.where(root_groups == RisingGroupNumber)[0].shape[0]
            numFallingRoots = np.where(root_groups == FallingGroupNumber)[0].shape[0]
            numFullRoots = np.where(root_groups == FullGroupNumber)[0].shape[0]

            if numRisingRoots == 1:
                tRising = tZeros[np.where(root_groups == RisingGroupNumber)[0]][0]
            else:
                RisingRootCands = tZeros[np.where(root_groups == RisingGroupNumber)[0]]
                for i in xrange(RisingRootCands.shape[0]):
                    if foldedSpline.derivatives(RisingRootCands[i])[1] > 0.0:
                        tRising = RisingRootCands[i]
                        break

            if numFallingRoots == 1:
                tFalling = tZeros[np.where(root_groups == FallingGroupNumber)[0]][0]
            else:
                FallingRootCands = tZeros[np.where(root_groups == FallingGroupNumber)[0]]
                for i in xrange(FallingRootCands.shape[0]):
                    if foldedSpline.derivatives(FallingRootCands[i])[1] < 0.0:
                        tFalling = FallingRootCands[i]
                        break

            if numFullRoots == 1:
                tFull = tZeros[np.where(root_groups == FullGroupNumber)[0]][0]
            else:
                FullRootCands = tZeros[np.where(root_groups == FullGroupNumber)[0]]
                for i in xrange(FullRootCands.shape[0]):
                    if foldedSpline.derivatives(FullRootCands[i])[1] > 0.0:
                        tFull = FullRootCands[i]
                        break
            startIndex = np.where(fitLC.t > tRising)[0][0]
            stopIndex = np.where(fitLC.t < tFull)[0][-1]
        #

        # One full period now goes from tRising to periodEst. The maxima occurs between tRising and tFalling
        # while the minima occurs between tFalling and tRising + periodEst. Find the minima and maxima
        alpha = math.fabs(fitLC.x[np.where(np.max(fitLC.x[startIndex:stopIndex]) == fitLC.x)[0][0]])
        beta = math.fabs(fitLC.x[np.where(np.min(fitLC.x[startIndex:stopIndex]) == fitLC.x)[0][0]])
        peakLoc = fitLC.t[np.where(np.max(fitLC.x[startIndex:stopIndex]) == fitLC.x)[0][0]]
        troughLoc = fitLC.t[np.where(np.min(fitLC.x[startIndex:stopIndex]) == fitLC.x)[0][0]]
        KEst = 0.5*(alpha + beta)
        delta2 = (math.fabs(foldedSpline.integral(tRising, peakLoc)) + math.fabs(
            foldedSpline.integral(troughLoc, tFull)))/2.0
        delta1 = (math.fabs(foldedSpline.integral(peakLoc, tFalling)) + math.fabs(
            foldedSpline.integral(tFalling, troughLoc)))/2.0
        eCosOmega2 = (alpha - beta)/(alpha + beta)
        eSinOmega2 = ((2.0*math.sqrt(alpha*beta))/(alpha + beta))*((delta2 - delta1)/(delta2 + delta1))
        eccentricityEst = math.sqrt(math.pow(eCosOmega2, 2.0) + math.pow(eSinOmega2, 2.0))
        tanOmega2 = math.fabs(eSinOmega2/eCosOmega2)
        if (eCosOmega2/math.fabs(eCosOmega2) == 1.0) and (eSinOmega2/math.fabs(eSinOmega2) == 1.0):
            omega2Est = math.atan(tanOmega2)*(180.0/math.pi)
        if (eCosOmega2/math.fabs(eCosOmega2) == -1.0) and (eSinOmega2/math.fabs(eSinOmega2) == 1.0):
            omega2Est = 180.0 - math.atan(tanOmega2)*(180.0/math.pi)
        if (eCosOmega2/math.fabs(eCosOmega2) == -1.0) and (eSinOmega2/math.fabs(eSinOmega2) == -1.0):
            omega2Est = 180.0 + math.atan(tanOmega2)*(180.0/math.pi)
        if (eCosOmega2/math.fabs(eCosOmega2) == 1.0) and (eSinOmega2/math.fabs(eSinOmega2) == -1.0):
            omega2Est = 360.0 - math.atan(tanOmega2)*(180.0/math.pi)
        if omega2Est >= 180.0:
            omega1Est = omega2Est - 180.0
        if omega2Est < 180.0:
            omega1Est = omega2Est + 180.0

        # tauEst
        zDot = KEst*(1.0 + eccentricityEst)*(eCosOmega2/eccentricityEst)
        zDotLC = dzdtLC.copy()
        for i in xrange(zDotLC.numCadences):
            zDotLC.y[i] = zDotLC.y[i] - zDot
        zDotSpline = UnivariateSpline(
            zDotLC.t[np.where(zDotLC.mask == 1.0)], zDotLC.y[np.where(zDotLC.mask == 1.0)],
            1.0/zDotLC.yerr[np.where(zDotLC.mask == 1.0)], k=3, s=2*zDotLC.numCadences, check_finite=True)
        for i in xrange(zDotLC.numCadences):
            zDotLC.x[i] = zDotSpline(zDotLC.t[i])
        zDotZeros = zDotSpline.roots()
        zDotFoldedLC = dzdtLC.fold(periodEst)
        zDotFoldedSpline = UnivariateSpline(
            zDotFoldedLC.t[np.where(zDotFoldedLC.mask == 1.0)],
            zDotFoldedLC.y[np.where(zDotFoldedLC.mask == 1.0)],
            1.0/zDotFoldedLC.yerr[np.where(zDotFoldedLC.mask == 1.0)], k=3, s=2*zDotFoldedLC.numCadences,
            check_finite=True)
        for i in xrange(zDotFoldedLC.numCadences):
            zDotFoldedLC.x[i] = zDotFoldedSpline(zDotFoldedLC.t[i])
        tC = zDotFoldedLC.t[np.where(np.max(zDotFoldedLC.x) == zDotFoldedLC.x)[0][0]]
        nuC = (360.0 - omega2Est)%360.0
        tE = zDotFoldedLC.t[np.where(np.min(zDotFoldedLC.x) == zDotFoldedLC.x)[0][0]]
        nuE = (180.0 - omega2Est)%360.0
        if math.fabs(360.0 - nuC) < math.fabs(360 - nuE):
            tauEst = zDotZeros[np.where(zDotZeros > tC)[0][0]]
        else:
            tauEst = zDotZeros[np.where(zDotZeros > tE)[0][0]]
        tauEst = tauEst%periodEst

        # a2sinInclinationEst
        a2sinInclinationEst = ((KEst*periodEst*self.Day*self.c*math.sqrt(1.0 - math.pow(
            eccentricityEst, 2.0)))/self.twoPi)/self.Parsec

        return fluxEst, periodEst, eccentricityEst, omega1Est, tauEst, a2sinInclinationEst
Beispiel #25
0
f = interpolate.interp1d(x, y, kind="cubic")

xnew = np.arange(1, 3, 0.001)
ynew = f(xnew)

tab = []
for i in range(4):
    tab.append(0)
i = 1

plt.plot(tab) # rysowanie OX
plt.plot(x, y, "o", xnew, ynew, '-')
plt.show()

#Wyliczanie miejsc zerowych funkcji
x0 = fsolve(f, [1.15])
x1 = fsolve(f, [2.2])
x2 = fsolve(f, [2.7])
print("x0 =" + str(x0))
print("x1 =" + str(x1))
print("x2 =" + str(x2))

spl = UnivariateSpline(xnew, ynew, k=1, s =2)
# Ponizszy kod wyrysowuje nam ta sama funckje "plt.plot(x, y, "o", xnew, ynew, '-')"
# spl.set_smoothing_factor(0.1)
# plt.plot(xnew, spl(xnew), 'g', lw=2)
# plt.show()

#zgadywanie pochodnej w punkcie 2.1
print(spl.derivatives(2.1))
import numpy as np
#from sklearn import decomposition
import matplotlib

matplotlib.use("cairo")
import matplotlib.pyplot as plt

plt.style.use('ggplot')  # Use the ggplot style
from scipy.interpolate import UnivariateSpline

x = np.linspace(0.0, 1.0, 150)
y = 2 * np.sin((x + 0.5) * 5) / (x + 0.1)
s = UnivariateSpline(x, y, k=3)

print(s.get_coeffs())
derivs = s.derivatives(0.0)
print(derivs)

xs = np.linspace(0.0, 1.0, 1000)
ys = s(xs)

ys2 = derivs[0] + xs * derivs[1] + np.power(xs, 2) * derivs[2] / 2 + np.power(
    xs, 3) * derivs[3] / 6

plt.plot(x, y, '.-', label='data')
plt.plot(xs, ys, zorder=10, label='smoothed')
plt.plot(xs, ys2, alpha=0.5, zorder=0, label='taylor')
plt.ylim([-6, 12])
plt.legend()

plt.savefig("spline_test.pdf")
Beispiel #27
0
    def miller(self, psinorm=None, rova=None, omt_factor=None):
        """
        Calculate Miller quantities.

        Args:
            psinorm (float, default 0.8):
                psi-norm for Miller quantities (overrides rova)
            rova (float, default None):
                r/a for Miller quantities (ignored if psinorm evals to True)
            omt_factor (float, default 0.2):
                sets omt = omp * omt_factor
                and omn = omp * (1-omt_factor)

        Returns:
            dictionary with Miller quantities and reference values
            for GENE simulation
        """

        output = {}

        if psinorm is None and rova is None:
            psinorm = 0.8

        if omt_factor is None:
            omt_factor = 0.2

        # r and psi splines
        psi_grid_spl = US(self.r_minor_fs, self.psi_grid, k=self.io, s=self.s)
        r_min_spl = US(self.psi_grid, self.r_minor_fs, k=self.io, s=self.s)

        if psinorm:
            print('using `psinorm`, ignoring `rova`')
            self.psinorm = psinorm
            psi_poi = self.psinorm * (self.psisep - self.psiax) + self.psiax
            r_poi = r_min_spl(psi_poi)[()]
            self.rova = r_poi / self.a_lcfs
        else:
            print('using `rova`, ignoring `psinorm`')
            self.rova = rova
            r_poi = self.rova * self.a_lcfs  # r = r/a * a; FS minor radius
            psi_poi = psi_grid_spl(r_poi)[()]  # psi at FS
            self.psinorm = (psi_poi - self.psiax) / (self.psisep - self.psiax
                                                     )  # psi-norm at FS

        output['gfile'] = self.gfile
        output['psinorm'] = self.psinorm
        output['rova'] = self.rova

        R0_spl = US(self.r_minor_fs, self.R_major_fs, k=self.io, s=self.s)
        R0_poi = R0_spl(r_poi)[()]  # R_maj of FS
        q_spl = US(self.r_minor_fs, self.qpsi_fs, k=self.io, s=self.s)
        #q_spl_psi = US(self.psi_grid,   self.qpsi_fs,    k=self.io, s=self.s)
        q_poi = q_spl(r_poi)[()]
        drdpsi_poi = float(r_min_spl.derivatives(psi_poi)[1])
        eps_poi = r_poi / R0_poi
        if not self.quiet:
            print('\n*** Flux surfance ***')
            print('r_min/a = {:.3f}'.format(self.rova))
            print('psinorm = {:.3f}'.format(self.psinorm))
            print('r_min = {:.3f} m'.format(r_poi))
            print('R_maj = {:.3f} m'.format(R0_poi))
            print('eps = {:.3f}'.format(eps_poi))
            print('q = {:.3f}'.format(q_poi))
            print('psi = {:.3e} Wb/rad'.format(psi_poi))
            print('dr/dpsi = {:.3g} m/(Wb/rad)'.format(drdpsi_poi))

        F_spl = US(self.r_minor_fs, self.F_fs, k=self.io, s=self.s)
        F_poi = F_spl(r_poi)[()]  # F of FS
        Bref_poi = F_poi / R0_poi
        p_spl = US(self.r_minor_fs, self.p_fs, k=self.io, s=self.s)
        p_poi = p_spl(r_poi)[()]
        t_poi = self.ti
        n_poi = p_poi / (t_poi * 1e3 * 1.602e-19) / 1e19
        output['Lref'] = self.a_lcfs
        output['Bref'] = Bref_poi
        output['pref'] = p_poi
        output['Tref'] = t_poi
        output['nref'] = n_poi
        beta = 403.e-5 * n_poi * self.ti / (Bref_poi**2)
        coll = 2.3031e-5 * (24-np.log(np.sqrt(n_poi*1e13)/(1e3*t_poi))) \
                    * self.a_lcfs * n_poi / (t_poi**2)
        output['beta'] = beta
        output['coll'] = coll
        if not self.quiet:
            print('\n*** Reference values ***')
            print('Lref = {:.3g} m ! for Lref=a convention'.format(
                self.a_lcfs))
            print('Bref = {:.3g} T'.format(Bref_poi))
            print('pref = {:.3g} Pa'.format(p_poi))
            print('Tref = {:.3g} keV'.format(t_poi))
            print('nref = {:.3g} 1e19/m^3'.format(n_poi))
            print('beta = {:.3g}'.format(beta))
            print('coll = {:.3g}'.format(coll))

        pprime_spl = US(self.r_minor_fs, self.pprime_fs, k=self.io, s=1e-4)
        pprime_poi = pprime_spl(r_poi)[()]
        pm_poi = Bref_poi**2 / (2 * 4 * 3.14e-7)
        dpdr_poi = pprime_poi / drdpsi_poi
        dpdx_pm = -dpdr_poi / pm_poi
        omp_poi = -(self.a_lcfs / p_poi) * dpdr_poi
        output['dpdx_pm'] = dpdx_pm
        output['omp'] = omp_poi
        if not self.quiet:
            print('\n*** Pressure gradients ***')
            print('dp/dpsi      = {:.3g} Pa/(Wb/rad)'.format(pprime_poi))
            print('p_m = 2mu/Bref**2 = {:.3g} Pa'.format(pm_poi))
            print('-(dp/dr)/p_m      = {:.3g} 1/m'.format(dpdx_pm))
            print('omp = a/p * dp/dr = {:.3g} ! with Lref=a'.format(omp_poi))

        omt = omp_poi * omt_factor
        omn = omp_poi * (1 - omt_factor)
        output['omt'] = omt
        output['omt_factor'] = omt_factor
        output['omn'] = omn
        if not self.quiet:
            print('\n*** Temp/dens gradients ***')
            print('omt_factor = {:.3g}'.format(omt_factor))
            print('omt = a/T * dT/dr = {:.3g}'.format(omt))
            print('omn = a/n * dn/dr = {:.3g}'.format(omn))

        sgstart = self.nw // 10
        subgrid = np.arange(sgstart, self.nw)
        nsg = subgrid.size

        # calc symmetric R/Z on psi/theta grid
        kappa = np.empty(nsg)
        delta = np.empty(nsg)
        zeta = np.empty(nsg)
        drR = np.empty(nsg)
        amhd = np.empty(nsg)
        bp = np.empty(nsg)
        bt = np.empty(nsg)
        b = np.empty(nsg)
        theta_tmp = np.linspace(-2. * np.pi, 2 * np.pi, 2 * self.ntheta - 1)
        stencil_width = self.ntheta // 10
        for i, isg in enumerate(subgrid):
            R_extended = np.empty(2 * self.ntheta - 1)
            Z_extended = np.empty(2 * self.ntheta - 1)
            R_extended[0:(self.ntheta - 1) //
                       2] = self.R_ftgrid[isg, (self.ntheta + 1) // 2:-1]
            R_extended[(self.ntheta - 1) // 2:(3 * self.ntheta - 3) //
                       2] = self.R_ftgrid[isg, :-1]
            R_extended[(3 * self.ntheta - 3) //
                       2:] = self.R_ftgrid[isg, 0:(self.ntheta + 3) // 2]
            Z_extended[0:(self.ntheta - 1) //
                       2] = self.Z_ftgrid[isg, (self.ntheta + 1) // 2:-1]
            Z_extended[(self.ntheta - 1) // 2:(3 * self.ntheta - 3) //
                       2] = self.Z_ftgrid[isg, :-1]
            Z_extended[(3 * self.ntheta - 3) //
                       2:] = self.Z_ftgrid[isg, 0:(self.ntheta + 3) // 2]
            theta_mod_ext = np.arctan2(Z_extended - self.Z_avg_fs[isg],
                                       R_extended - self.R_major_fs[isg])
            # introduce 2pi shifts to theta_mod_ext
            for ind in range(self.ntheta):
                if theta_mod_ext[ind + 1] < 0. \
                and theta_mod_ext[ind] > 0. \
                and abs(theta_mod_ext[ind + 1] - theta_mod_ext[ind]) > np.pi:
                    lshift_ind = ind
                if theta_mod_ext[-ind - 1] > 0. \
                and theta_mod_ext[-ind] < 0. \
                and abs(theta_mod_ext[-ind - 1] - theta_mod_ext[-ind]) > np.pi:
                    rshift_ind = ind
            theta_mod_ext[-rshift_ind:] += 2. * np.pi
            theta_mod_ext[:lshift_ind + 1] -= 2. * np.pi
            theta_int = interp1d(theta_mod_ext, theta_tmp, kind=self.io)
            R_int = interp1d(theta_mod_ext, R_extended, kind=self.io)
            Z_int = interp1d(theta_mod_ext, Z_extended, kind=self.io)
            R_tm = R_int(self.theta_grid)
            Z_tm = Z_int(self.theta_grid)

            Z_sym = 0.5 * (Z_tm[:] - Z_tm[::-1]) + self.Z_avg_fs[isg]
            R_sym = 0.5 * (R_tm[:] + R_tm[::-1])
            delta_ul = np.empty(2)
            for o in range(2):
                if o:
                    ind = np.argmax(Z_sym)
                    section = np.arange(ind + stencil_width // 2,
                                        ind - stencil_width // 2, -1)
                else:
                    ind = np.argmin(Z_sym)
                    section = np.arange(ind - stencil_width // 2,
                                        ind + stencil_width // 2)
                x = R_sym[section]
                y = Z_sym[section]
                y_int = interp1d(x, y, kind=self.io)
                x_fine = np.linspace(np.amin(x), np.amax(x),
                                     stencil_width * 100)
                y_fine = y_int(x_fine)
                if o:
                    x_at_extremum = x_fine[np.argmax(y_fine)]
                    Z_max = np.amax(y_fine)
                else:
                    x_at_extremum = x_fine[np.argmin(y_fine)]
                    Z_min = np.amin(y_fine)
                delta_ul[o] = (self.R_major_fs[isg] - x_at_extremum) \
                    / self.r_minor_fs[isg]
            kappa[i] = (Z_max - Z_min) / 2. / self.r_minor_fs[isg]
            delta[i] = delta_ul.mean()
            # calc zeta
            zeta_arr = np.empty(4)
            for o in range(4):
                if o == 0:
                    val = np.pi / 4
                    searchval = np.cos(val + np.arcsin(delta[i]) / np.sqrt(2))
                    searcharr = (R_sym -
                                 self.R_major_fs[isg]) / self.r_minor_fs[isg]
                elif o == 1:
                    val = 3 * np.pi / 4
                    searchval = np.cos(val + np.arcsin(delta[i]) / np.sqrt(2))
                    searcharr = (R_sym -
                                 self.R_major_fs[isg]) / self.r_minor_fs[isg]
                elif o == 2:
                    val = -np.pi / 4
                    searchval = np.cos(val - np.arcsin(delta[i]) / np.sqrt(2))
                    searcharr = (R_sym -
                                 self.R_major_fs[isg]) / self.r_minor_fs[isg]
                elif o == 3:
                    val = -3 * np.pi / 4
                    searchval = np.cos(val - np.arcsin(delta[i]) / np.sqrt(2))
                    searcharr = (R_sym -
                                 self.R_major_fs[isg]) / self.r_minor_fs[isg]
                else:
                    raise ValueError('out of range')
                if o in [0, 1]:
                    searcharr2 = searcharr[self.ntheta // 2:]
                    ind = self._find(searchval, searcharr2) + self.ntheta // 2
                else:
                    searcharr2 = searcharr[0:self.ntheta // 2]
                    ind = self._find(searchval, searcharr2)
                section = np.arange(ind - stencil_width // 2,
                                    ind + stencil_width // 2)
                theta_sec = self.theta_grid[section]
                if o in [0, 1]:
                    theta_int = interp1d(-searcharr[section],
                                         theta_sec,
                                         kind=self.io)
                    theta_of_interest = theta_int(-searchval)
                else:
                    theta_int = interp1d(searcharr[section],
                                         theta_sec,
                                         kind=self.io)
                    theta_of_interest = theta_int(searchval)
                Z_sec = Z_sym[section]
                Z_sec_int = interp1d(theta_sec, Z_sec, kind=self.io)
                Z_val = Z_sec_int(theta_of_interest)
                zeta_arg = (Z_val - self.Z_avg_fs[isg]
                            ) / kappa[i] / self.r_minor_fs[isg]
                if abs(zeta_arg) >= 1:
                    zeta_arg = 0.999999 * np.sign(zeta_arg)
                zeta_arr[o] = np.arcsin(zeta_arg)
            zeta_arr[1] = np.pi - zeta_arr[1]
            zeta_arr[3] = -np.pi - zeta_arr[3]
            zeta[i] = 0.25 * (np.pi + zeta_arr[0] - zeta_arr[1] - zeta_arr[2] +
                              zeta_arr[3])
            # calc dr/dR, amhd, and derivs
            amhd[i] = -self.qpsi_fs[isg]**2 * self.R_major_fs[isg] * self.pprime_fs[isg] * \
                8 * np.pi * 1e-7 / Bref_poi**2 / \
                r_min_spl.derivatives(self.psi_grid[isg])[1]
            drR[i] = R0_spl.derivatives(self.r_minor_fs[isg])[1]
            R = self.R_major_fs[isg] + self.r_minor_fs[isg]
            Z = self.Z_avg_fs[isg]
            Br = -self.psi_spl(Z, R, dx=1, dy=0) / R
            Bz = self.psi_spl(Z, R, dx=0, dy=1) / R
            bp[i] = np.sqrt(Br**2 + Bz**2)
            bt[i] = self.F_fs[isg] / R
            b[i] = np.sqrt(bp[i]**2 + bt[i]**2)

        amhd_spl = US(self.r_minor_fs[sgstart:], amhd, k=self.io, s=1e-3)
        drR_spl = US(self.r_minor_fs[sgstart:], drR, k=self.io, s=self.s)
        b_spl = US(self.psinorm_grid[sgstart:], b, k=self.io, s=self.s)

        # calc derivatives for kappa, delta, zeta
        s_kappa = np.empty(nsg)
        s_delta = np.empty(nsg)
        s_zeta = np.empty(nsg)
        kappa_spl = US(self.r_minor_fs[sgstart:], kappa, k=self.io, s=self.s)
        delta_spl = US(self.r_minor_fs[sgstart:], delta, k=self.io, s=self.s)
        zeta_spl = US(self.r_minor_fs[sgstart:], zeta, k=self.io, s=self.s)
        for i, isg in enumerate(subgrid):
            s_kappa[i] = kappa_spl.derivatives(self.r_minor_fs[isg])[1] \
                                 * self.r_minor_fs[isg] / kappa[i]
            s_delta[i] = delta_spl.derivatives(self.r_minor_fs[isg])[1] \
                                    * self.r_minor_fs[isg] / np.sqrt(1 - delta[i]**2)
            s_zeta[i] = zeta_spl.derivatives(self.r_minor_fs[isg])[1] \
                                * self.r_minor_fs[isg]
        output['trpeps'] = eps_poi
        output['q0'] = q_poi
        output['shat'] = (r_poi / q_poi) * q_spl.derivatives(r_poi)[1]
        output['amhd'] = amhd_spl(r_poi)[()]
        output['drR'] = drR_spl(r_poi)[()]
        output['kappa'] = kappa_spl(r_poi)[()]
        output['s_kappa'] = kappa_spl.derivatives(
            r_poi)[1] * r_poi / kappa_spl(r_poi)[()]
        output['delta'] = delta_spl(r_poi)[()]
        output['s_delta'] = delta_spl.derivatives(r_poi)[1] * r_poi \
                                        / np.sqrt(1 - delta_spl(r_poi)[()]**2)
        output['zeta'] = zeta_spl(r_poi)[()]
        output['s_zeta'] = zeta_spl.derivatives(r_poi)[1] * r_poi
        output['minor_r'] = 1.0
        output['major_R'] = R0_poi / self.a_lcfs

        if not self.quiet:
            print(
                '\n\nShaping parameters for flux surface r=%9.5g, r/a=%9.5g:' %
                (r_poi, self.rova))
            print('Copy the following block into a GENE parameters file:\n')
            print('trpeps  = %9.5g' % (eps_poi))
            print('q0      = %9.5g' % q_poi)
            print('shat    = %9.5g !(defined as r/q*dq_dr)' % (r_poi / q_poi \
                                     * q_spl.derivatives(r_poi)[1]))
            print('amhd    = %9.5g' % amhd_spl(r_poi))
            print('drR     = %9.5g' % drR_spl(r_poi))
            print('kappa   = %9.5g' % kappa_spl(r_poi))
            print('s_kappa = %9.5g' % (kappa_spl.derivatives(r_poi)[1] \
                                       * r_poi / kappa_spl(r_poi)))
            print('delta   = %9.5g' % delta_spl(r_poi))
            print('s_delta = %9.5g' % (delta_spl.derivatives(r_poi)[1] \
                                       * r_poi / np.sqrt(1 - delta_spl(r_poi)**2)))
            print('zeta    = %9.5g' % zeta_spl(r_poi))
            print('s_zeta  = %9.5g' % (zeta_spl.derivatives(r_poi)[1] * r_poi))
            print('minor_r = %9.5g' % (1.0))
            print('major_R = %9.5g' % (R0_poi / self.a_lcfs))

        if self.plot:
            plt.figure(figsize=(6, 8))
            plt.subplot(4, 2, 1)
            plt.plot(self.psinorm_grid[sgstart:], kappa)
            plt.title('Elongation')
            plt.ylabel(r'$\kappa$', fontsize=14)

            plt.subplot(4, 2, 2)
            plt.plot(self.psinorm_grid[sgstart:], s_kappa)
            plt.title(r'$r/\kappa*(d\kappa/dr)$')
            plt.ylabel(r'$s_\kappa$', fontsize=14)

            plt.subplot(4, 2, 3)
            plt.plot(self.psinorm_grid[sgstart:], delta)
            plt.title('Triangularity')
            plt.ylabel(r'$\delta$', fontsize=14)

            plt.subplot(4, 2, 4)
            plt.plot(self.psinorm_grid[sgstart:], s_delta)
            plt.title(r'$r/\delta*(d\delta/dr)$')
            plt.ylabel(r'$s_\delta$', fontsize=14)

            plt.subplot(4, 2, 5)
            plt.plot(self.psinorm_grid[sgstart:], zeta)
            plt.title('Squareness')
            plt.ylabel(r'$\zeta$', fontsize=14)

            plt.subplot(4, 2, 6)
            plt.plot(self.psinorm_grid[sgstart:], s_zeta)
            plt.title(r'$r/\zeta*(d\zeta/dr)$')
            plt.ylabel(r'$s_\zeta$', fontsize=14)

            plt.subplot(4, 2, 7)
            plt.plot(self.psinorm_grid[sgstart:], b)
            plt.plot(self.psinorm_grid[sgstart:], bp)
            plt.plot(self.psinorm_grid[sgstart:], bt)
            plt.title('|B|')
            plt.ylabel(r'|B|', fontsize=14)

            plt.subplot(4, 2, 8)
            plt.plot(self.psinorm_grid[sgstart:],
                     b_spl(self.psinorm_grid[sgstart:], nu=1))
            plt.title('|B| deriv.')
            plt.ylabel(r'$d|B|/d\Psi_N$', fontsize=14)

            for ax in plt.gcf().axes:
                ax.set_xlabel(r'$\Psi_N$', fontsize=14)
                ax.axvline(self.psinorm, 0, 1, ls='--', color='k', lw=2)
            plt.tight_layout(pad=0.3)

        return output
Beispiel #28
0
def scale_up_function(x,
                      y,
                      method=3,
                      smoothness=1.0,
                      bound_low=None,
                      bound_up=None,
                      auto_bound=1.3,
                      intervention_end=None,
                      intervention_start_date=None):
    """
    Given a set of points defined by x and y,
    this function fits a cubic spline and returns the interpolated function

    Args:
        x: The independent variable at each observed point
        y: The dependent variable at each observed point
        method: Select an interpolation method. Methods 1, 2 and 3 use cubic interpolation defined step by step.
            1: Uses derivative at each point as constrain --> see description of the encapsulated function
            derivatives(x, y). At each step, the defined portion of the curve covers the interval [x_i, x_(i+1)]
            2: Less constrained interpolation as the curve does not necessarily pass by every point.
            At each step, the defined portion of the curve covers the interval [x_i, x_(i+2)] except when the derivative
            should be 0 at x_(i+1). In this case, the covered interval is [x_i, x_(i+1)]
            3: The curve has to pass by every point. At each step, the defined portion of the curve covers
            the interval [x_i, x_(i+1)] but x_(i+2) is still used to obtain the fit on [x_i, x_(i+1)]
            4: Uses sigmoidal curves. This is a generalisation of the function make_two_step_curve
            5: Uses an adaptive algorithm producing either an interpolation or an approximation, depending on the value
            of the smoothness. This method allows for consideration of limits that are defined through bound_low and
            bound_up. See detailed description of this method in the code after the line "if method == 5:"
        smoothness, bound_up, bound_low, auto_bound are only used when method=5
        smoothness: Defines the level of smoothness of the curve. The minimum value is 0. and leads to an interpolation.
        bound_low: Defines a potential lower bound that the curve should not overcome.
        bound_up: Defines a potential upper bound that the curve should not overcome.
        auto_bound: In absence of bound_up or bound_low, sets a strip in which the curve should be contained.
                    Its value is a multiplier that applies to the amplitude of y to determine the width of the
                    strip. Set equal to None to delete this constraint.
        intervention_end: tuple or list of two elements defining the final time and the final level corresponding to a
                    potential intervention. If it is not None, an additional portion of curve will be added to define
                    the intervention through a sinusoidal function
        intervention_start_date: If not None, define the date at which intervention should start (must be >= max(x)).
                    If None, the maximal value of x will be used as a start date for the intervention. If the argument
                    'intervention_end' is not defined, this argument is not relevant and will not be used
    Returns:
        interpolation function
    """
    assert len(x) == len(y), 'x and y must have the same length'
    x = [float(i) for i in x]
    y = [float(i) for i in y]

    x = np.array(x)
    y = np.array(y)

    # Check that every x_i is unique
    assert len(x) == len(set(x)), 'There are duplicate values in x.'

    # Make sure the arrays are ordered
    order = x.argsort()
    x = x[order]
    y = y[order]

    # Define a scale-up for a potential intervention
    if intervention_end is not None:
        if intervention_start_date is not None:
            assert intervention_start_date >= max(
                x), 'The intervention start date should be >= max(x)'
            t_intervention_start = intervention_start_date
        else:
            t_intervention_start = max(x)
        curve_intervention = scale_up_function(
            x=[t_intervention_start, intervention_end[0]],
            y=[y[-1], intervention_end[1]],
            method=4)

    if (len(x) == 1) or (max(y) - min(y) == 0):

        def curve(t):
            if intervention_end is not None:
                if t >= t_intervention_start:
                    return curve_intervention(t)
                else:
                    return y[-1]
            else:
                return y[-1]

        return curve

    def derivatives(x, y):
        """
        Defines the slopes at each data point
        Should be 0 for the first and last points (x_0 and x_n)
        Should be 0 when the variation is changing (U or n shapes), i.e. when(y_i - y_(i-1))*(y_i - y_(i+1)) > 0

        If the variation is not changing, the slope is defined by a linear combination of the two slopes measured
        between the point and its two neighbours. This combination is weighted according to the distance with the
        neighbours.
        This is only relevant when method=1
        """
        v = np.zeros(len(x))  # Initialises all zeros
        for i in range(len(x))[1:-1]:  # For each interior point
            if (y[i] - y[i - 1]) * (y[i] -
                                    y[i + 1]) < 0:  # Not changing variation
                slope_left = (y[i] - y[i - 1]) / (x[i] - x[i - 1])
                slope_right = (y[i + 1] - y[i]) / (x[i + 1] - x[i])
                w = (x[i] - x[i - 1]) / (x[i + 1] - x[i - 1])
                v[i] = w * slope_right + (1 - w) * slope_left
        return v

    if method in (1, 2, 3):
        # Normalise x to avoid too big or too small numbers when taking x**3
        coef = np.exp(np.log10(max(x)))
        x = x / coef

    vel = derivatives(x, y)  # Obtain derivatives conditions
    m = np.zeros(
        (len(x) - 1, 4)
    )  # To store the polynomial coefficients for each section [x_i, x_(i+1)]

    if method == 1:
        for i in range(1, len(x)):
            x0, x1 = x[i - 1], x[i]
            g = np.array([
                [x0**3, x0**2, x0, 1],
                [x1**3, x1**2, x1, 1],
                [3 * x0**2, 2 * x0, 1, 0],
                [3 * x1**2, 2 * x1, 1, 0],
            ])
            # Bound conditions:  f(x0) = y0   f(x1) = y1  f'(x0) = v0  f'(x1) = v1
            d = np.array([y[i - 1], y[i], vel[i - 1], vel[i]])
            m[i - 1, :] = np.linalg.solve(g, d)
    elif method == 2:
        pass_next = 0
        for i in range(len(x))[0:-1]:
            if pass_next == 1:  # When = 1, the next section [x_(i+1), x_(i+2)] is already defined
                pass_next = 0
            else:
                x0 = x[i]
                y0 = y[i]

                # Define left velocity condition
                if vel[i] == 0:
                    v = 0
                else:
                    # Get former polynomial to get left velocity condition. Derivative has to be continuous
                    p = m[i - 1, :]
                    v = 3 * p[0] * x0**2 + 2 * p[1] * x0 + p[2]

                if vel[i + 1] == 0:  # Define only one section
                    x1 = x[i + 1]
                    y1 = y[i + 1]
                    g = np.array([
                        [x0**3, x0**2, x0, 1],
                        [x1**3, x1**2, x1, 1],
                        [3 * x0**2, 2 * x0, 1, 0],
                        [3 * x1**2, 2 * x1, 1, 0],
                    ])
                    # Bound conditions: f(x0) = y0  f(x1) = y1   f'(x0) = v0  f'(x1) = 0
                    d = np.array([y0, y1, v, 0])
                    m[i, :] = np.linalg.solve(g, d)
                elif vel[i + 2] == 0:  # defines two sections
                    x1, x2 = x[i + 1], x[i + 2]
                    y1, y2 = y[i + 1], y[i + 2]
                    g = np.array([
                        [x0**3, x0**2, x0, 1],
                        [x2**3, x2**2, x2, 1],
                        [3 * x0**2, 2 * x0, 1, 0],
                        [3 * x2**2, 2 * x2, 1, 0],
                    ])
                    # Bound conditions: f(x0) = y0  f(x2) = y2   f'(x0) = v0  f'(x2) = 0
                    d = np.array([y0, y2, v, 0])
                    sol = np.linalg.solve(g, d)
                    m[i, :] = sol
                    m[i + 1, :] = sol
                    pass_next = 1
                else:  # v1 and v2 are not null. We define two sections
                    x1, x2 = x[i + 1], x[i + 2]
                    y1, y2 = y[i + 1], y[i + 2]
                    g = np.array([
                        [x0**3, x0**2, x0, 1],
                        [x1**3, x1**2, x1, 1],
                        [x2**3, x2**2, x2, 1],
                        [3 * x0**2, 2 * x0, 1, 0],
                    ])
                    # Bound conditions: f(x0) = y0  f(x1) = y1 f(x2) = y2   f'(x0) = v0
                    d = np.array([y0, y1, y2, v])
                    sol = np.linalg.solve(g, d)
                    m[i, :] = sol
                    m[i + 1, :] = sol
                    pass_next = 1

    elif method == 3:
        pass_next = 0
        for i in range(len(x))[0:-1]:
            if pass_next == 1:  # When = 1, the next section [x_(i+1), x_(i+2)] is already defined
                pass_next = 0
            else:
                x0 = x[i]
                y0 = y[i]

                # Define left velocity condition
                if vel[i] == 0:
                    v = 0
                else:
                    # Get former polynomial to get left velocity condition
                    p = m[i - 1, :]
                    v = 3 * p[0] * x0**2 + 2 * p[1] * x0 + p[2]

                if vel[i + 1] == 0:
                    x1 = x[i + 1]
                    y1 = y[i + 1]
                    g = np.array([
                        [x0**3, x0**2, x0, 1],
                        [x1**3, x1**2, x1, 1],
                        [3 * x0**2, 2 * x0, 1, 0],
                        [3 * x1**2, 2 * x1, 1, 0],
                    ])

                    # Bound conditions: f(x0) = y0  f(x1) = y1   f'(x0) = v0  f'(x1) = 0
                    d = np.array([y0, y1, v, 0])
                    m[i, :] = np.linalg.solve(g, d)
                else:
                    x1, x2 = x[i + 1], x[i + 2]
                    y1, y2 = y[i + 1], y[i + 2]
                    g = np.array([
                        [x0**3, x0**2, x0, 1],
                        [x1**3, x1**2, x1, 1],
                        [x2**3, x2**2, x2, 1],
                        [3 * x0**2, 2 * x0, 1, 0],
                    ])
                    # Bound conditions: f(x0) = y0  f(x1) = y1  f(x2) = y2  f'(x0) = v0
                    d = np.array([y0, y1, y2, v])
                    m[i, :] = np.linalg.solve(g, d)

    elif method == 4:
        functions = [[] for j in range(len(x))[0:-1]
                     ]  # Initialises an empty list to store functions
        for i in range(len(x))[0:-1]:
            y_high = y[i + 1]
            y_low = y[i]
            x_start = x[i]
            x_inflect = 0.5 * (x[i] + x[i + 1])
            func = make_sigmoidal_curve(y_high=y[i + 1],
                                        y_low=y[i],
                                        x_start=x[i],
                                        x_inflect=0.5 * (x[i] + x[i + 1]),
                                        multiplier=4)
            functions[i] = func

        def curve(t):
            if t <= min(
                    x
            ):  # t is before the range defined by x -> takes the initial value
                return y[0]
            elif t >= max(
                    x
            ):  # t is after the range defined by x -> takes the last value
                if intervention_end is not None:
                    if t >= t_intervention_start:
                        return curve_intervention(t)
                    else:
                        return y[-1]
                else:
                    return y[-1]
            else:  # t is in the range defined by x
                index_low = len(x[x <= t]) - 1
                func = functions[index_low]
                return func(t)

        return curve

    elif method == 5:
        """
        This method produces an approximation (or interpolation when smoothness=0.0) using cubic splines. When the
        arguments 'bound_low' or 'bound_up' are not None, the approximation is constrained so that the curve does not
        overcome the bounds.
        A low smoothness value will provide a curve that passes close to every point but leads to more variation changes
        and a greater curve energy. Set this argument equal to 0.0 to obtain an interpolation.
        A high smoothness value will provide a very smooth curve but its distance to certain points may be large.
        We use the following approach:
        1. We create a curve (f) made of cubic spline portions that approximates/interpolates the data, without any
            constraints
        2. We modify the initial and final sections of the curve in order to have:
            a. Perfect hits at the extreme points
            b. Null gradients at the extreme points
        3. If bounds are defined, we detect potential sections where they are overcome and we use the following method
            to adjust the fit:
            3.1. We detect the narrowest interval [x_i, x_j] which contains the overcoming section
            3.2. We fit a cubic spline (g) verifying the following conditions:
                a. g(x_i) = f(x_i)  and  g(x_j) = f(x_j)
                b. g'(x_i) = f'(x_i)  and  g'(x_j) = f'(x_j)
            3.3. If g still presents irregularities, we repeat the same process from 3.1. on the interval [x_(i-1), x_j]
                or [x_i, x_(j+1)], depending on which side of the interval led to an issue
        """

        # Calculate an appropriate smoothness (adjusted by smoothness)
        rmserror = 0.05
        s = smoothness * len(x) * (rmserror * np.fabs(y).max())**2

        # Get rid of first elements when they have same y (same for last elements) as there is no need for fitting
        ind_start = 0
        while y[ind_start] == y[ind_start + 1]:
            ind_start += 1

        ind_end = len(x) - 1
        while y[ind_end] == y[ind_end - 1]:
            ind_end -= 1

        x = x[ind_start:(ind_end + 1)]
        y = y[ind_start:(ind_end + 1)]

        k = min(3, len(x) - 1)

        w = np.ones(len(x))
        w[0] = 5.
        w[-1] = 5.

        f = UnivariateSpline(x, y, k=k, s=s, ext=3,
                             w=w)  # Create a first raw approximation

        # Shape the initial and final parts of the curve in order to get null gradients and to hit the external points
        x0 = x[0]
        x1 = x[1]
        x_f = x[-1]
        x_a = x[-2]

        v1 = f.derivatives(x1)[1]  #
        v_a = f.derivatives(x_a)[1]

        g = np.array([[x0**3, x0**2, x0, 1], [x1**3, x1**2, x1, 1],
                      [3 * x0**2, 2 * x0, 1, 0], [3 * x1**2, 2 * x1, 1, 0]])
        d_init = np.array([y[0], f(x1), 0, v1])
        a_init = np.linalg.solve(g, d_init)
        a_init = a_init[::-1]  # Reverse

        h = np.array([[x_a**3, x_a**2, x_a, 1], [x_f**3, x_f**2, x_f, 1],
                      [3 * x_a**2, 2 * x_a, 1, 0], [3 * x_f**2, 2 * x_f, 1,
                                                    0]])
        d_f = np.array([f(x_a), y[-1], v_a, 0])
        a_f = np.linalg.solve(h, d_f)
        a_f = a_f[::-1]  # Reverse

        # We have to make sure that the obtained fits do not go over/under the bounds
        cut_off_dict = {}

        amplitude = auto_bound * (max(y) - min(y))
        if bound_low is None:
            if auto_bound is not None:
                bound_low = 0.5 * (max(y) + min(y)) - 0.5 * amplitude
        if bound_up is None:
            if auto_bound is not None:
                bound_up = 0.5 * (max(y) + min(y)) + 0.5 * amplitude

        if (bound_low is not None) or (bound_up is not None):
            # We adjust the data so that no values go over/under the bounds
            if bound_low is not None:
                for i in range(len(x) - 1):
                    if y[i] < bound_low:
                        y[i] = bound_low

            if bound_up is not None:
                for i in range(len(x)):
                    if y[i] > bound_up:
                        y[i] = bound_up

            # Check bounds
            def cut_off(index, bound_low, bound_up, sign):
                if sign == -1:
                    bound = bound_low
                else:
                    bound = bound_up

                x0 = x[index]
                if index == 0:
                    y0 = y[0]
                else:
                    y0 = f(x0)

                # Look for the next knot at which the spline is inside of the bounds
                go = 1
                k = 0
                while go == 1:
                    k += 1

                    if (index + k) == 0:
                        y_k = y[0]
                    elif (index + k) == len(x) - 2:
                        y_k = a_f[0] \
                              + a_f[1] * x[index + k] + a_f[2] * x[index + k] ** 2 + a_f[3] * x[index + k] ** 3
                    elif (index + k) == len(x) - 1:
                        y_k = y[-1]
                    else:
                        y_k = f(x[index + k])

                    if (y_k <= bound_up) and (y_k >= bound_low):
                        go = 0

                next_index = index + k

                x1 = x[next_index]
                y1 = f(x1)

                if y0 == y1:
                    x_peak = 0.5 * (x0 + x1)
                else:
                    if y0 == bound:
                        x_peak = x0
                    elif y1 == bound:
                        x_peak = x1
                    else:
                        # Weighted positioning of the contact with bound
                        x_peak = x0 + (abs(y0 - bound) /
                                       (abs(y0 - bound) + abs(y1 - bound))) * (
                                           x1 - x0)

                if index == 0:
                    v0 = 0
                else:
                    v0 = f.derivatives(x0)[1]

                if index == (len(x) - 2):
                    v1 = 0
                else:
                    v1 = f.derivatives(x1)[1]

                if x0 != x_peak:
                    g = np.array([[x0**3, x0**2, x0, 1],
                                  [x_peak**3, x_peak**2, x_peak, 1],
                                  [3. * x0**2, 2. * x0, 1, 0],
                                  [3. * x_peak**2, 2. * x_peak, 1, 0]])
                    d = np.array([y0, bound, v0, 0.0])
                    a1 = np.linalg.solve(g, d)
                    a1 = a1[::-1]  # Reverse

                if x1 != x_peak:
                    g = np.array([[x_peak**3, x_peak**2, x_peak, 1],
                                  [x1**3, x1**2, x1, 1],
                                  [3. * x_peak**2, 2. * x_peak, 1, 0],
                                  [3. * x1**2, 2. * x1, 1, 0]])
                    d = np.array([bound, y1, 0.0, v1])
                    a2 = np.linalg.solve(g, d)
                    a2 = a2[::-1]  # Reverse

                if x0 == x_peak:
                    a1 = a2
                if x1 == x_peak:
                    a2 = a1

                indice_first = index
                t1 = test_a(a1, x0, x_peak, x_peak, bound_low, bound_up)
                if t1 == 0:  # There is something wrong here
                    spare = get_spare_fit(index, x_peak, bound, 'left', f,
                                          cut_off_dict, bound_low, bound_up,
                                          a_init, a_f, x, y)
                    a1 = spare['a']
                    indice_first = index - spare['cpt']
                t2 = test_a(a2, x_peak, x1, x_peak, bound_low, bound_up)
                if t2 == 0:  # There is something wrong here
                    spare = get_spare_fit(index, x_peak, bound, 'right', f,
                                          cut_off_dict, bound_low, bound_up,
                                          a_init, a_f, x, y)
                    a2 = spare['a']
                    next_index = index + 1 + spare['cpt']

                out = {
                    'a1': a1,
                    'a2': a2,
                    'x_peak': x_peak,
                    'indice_first': indice_first,
                    'indice_next': next_index
                }
                return (out)

            t = x[0]

            while t < x[-1]:
                ok = 1
                if t == x[0]:
                    y_t = y[0]
                elif t < x[1]:
                    y_t = a_init[0] + a_init[1] * t + a_init[
                        2] * t**2 + a_init[3] * t**3
                elif t < x[-2]:
                    y_t = f(t)
                elif t == x[-1]:
                    y_t = y[-1]
                else:
                    y_t = a_f[0] + a_f[1] * t + a_f[2] * t**2 + a_f[3] * t**3

                if bound_low is not None:
                    if y_t < bound_low:
                        ok = 0
                        sign = -1.
                if bound_up is not None:
                    if y_t > bound_up:
                        ok = 0
                        sign = 1.

                if ok == 0:
                    indice = len(x[x < t]) - 1
                    out = cut_off(indice, bound_low, bound_up, sign)

                    for k in range(out['indice_first'], out['indice_next']):
                        cut_off_dict[k] = out
                    t = x[out['indice_next']]
                t += (x[-1] - x[0]) / 1000.

        def curve(t):
            t = float(t)
            y_t = 0
            if t <= x[0]:
                y_t = y[0]
            elif t > x[-1]:
                if intervention_end is not None:
                    if t >= t_intervention_start:
                        y_t = curve_intervention(t)
                    else:
                        y_t = y[-1]
                else:
                    y_t = y[-1]
            elif x[0] < t < x[1]:
                y_t = a_init[
                    0] + a_init[1] * t + a_init[2] * t**2 + a_init[3] * t**3
            elif x[-2] < t < x[-1]:
                y_t = a_f[0] + a_f[1] * t + a_f[2] * t**2 + a_f[3] * t**3
            else:
                y_t = f(t)

            if x[0] < t < x[-1]:
                indice = len(x[x < t]) - 1
                if indice in cut_off_dict.keys():
                    out = cut_off_dict[indice]
                    if t < out['x_peak']:
                        a = out['a1']
                    else:
                        a = out['a2']
                    y_t = a[0] + a[1] * t + a[2] * t**2 + a[3] * t**3

            if (bound_low is not None) and (t <= x[-1]):
                y_t = max(
                    (y_t, bound_low))  # Security check. Normally not needed
            if (bound_up is not None) and (t <= x[-1]):
                y_t = min(
                    (y_t, bound_up))  # Security check. Normally not needed

            return y_t

        return curve

    else:
        raise Exception('method ' + method + 'does not exist.')

    def curve(t):
        t = t / coef
        if t <= x[0]:  # Constant before x[0]
            return y[0]
        elif t >= x[-1]:  # Constant after x[0]
            if intervention_end is not None:
                if t >= t_intervention_start / coef:
                    return curve_intervention(t * coef)
                else:
                    return y[-1]
            else:
                return y[-1]
        else:
            index = len(x[x <= t]) - 1
            p = m[index, :]  # Corresponding coefficients
            y_t = p[0] * t**3 + p[1] * t**2 + p[2] * t + p[3]
            return y_t

    return curve
Beispiel #29
0
def Erep(E_el, E_DFT, x1, Rc):  # E_el : DFTB electronic energy
    Rcuts = np.linspace(x1, x2, 1e5)
    for Rc in Rcuts:
        spX = UnivariateSpline(X, (E_DFT(Rcuts) - E_DFT(Rc)) -
                               (E_el(Rcuts) - E_el(Rc)),
                               k=4,
                               s=0)
        roots = spX.derivative(n=1).roots()
        if abs(spX(roots[0])) < 1E-7:
            liste_Rc.append(roots[0])
        Rc = np.median(liste_Rc)
    Rcmin = Rc * np.sqrt(3.0 / 2) / 2
    X = np.linspace(Rcmin, Rc,
                    200)  # x1 et x2 : limite of the a0 range used in DFT
    fsp = UnivariateSpline(
        X, (E_DFT(X) - E_DFT(Rc)) - (E_el(X) - E_el(Rc)), k=5,
        s=0)  # Difference DFT DFTB (Energie qui sert a calculer Erep)
    plt.plot(X, E_DFT(X) - E_DFT(Rc), '--', label='DFT')
    plt.plot(X, E_el(X) - E_el(Rc), label='DFTB')
    plt.legend()
    plt.savefig('Repulsion1.png')
    plt.clf()
    a = np.sqrt(3.0) / 2  # rcut*a : rayon de coupure de la fonction de paire
    x = []
    fx = []

    def frep(n, frep, rho_0):  # Erep for 1st and 2nd neighbout
        return 1 / 8 * (fsp(a**n * rho_0) - 6 * frep)

    for rho_0 in np.linspace(a * Rc, Rc - 1e-10, 50):
        # Initialize
        if a * rho_0 > Rcmin:
            x.append(a * rho_0)
            fx.append(1 / 8 * fsp(rho_0))
        for n in np.arange(1, 10, 1):
            if a**(n + 1) * rho_0 > x1:
                x.append(rho_0 * a**(n + 1))
                fx.append(frep(n, fx[-1], rho_0))
            else:
                break
    plt.clf()
    plt.plot(x, fx, '-o', color='C3')
    plt.show()
    x_EOS = np.array(sorted(x))
    fx_EOS = np.array(sorted(fx, reverse=True))
    x = np.array(sorted(x)) / 0.5291777249  # Bohr
    fx = np.array(sorted(fx, reverse=True)) / 13.605698066  # Ry
    NPoints = 120
    sp1 = UnivariateSpline(x, fx, k=4, s=0)
    X = np.linspace(x[0], x[-1], NPoints)
    interval = (x[-1] - x[0]) / (NPoints - 1)
    sp2 = UnivariateSpline(X, sp1(X), k=4, s=0)  # To obtain the derivatives...
    # Reconstruction EOS
    for val in np.linspace(np.sqrt(3) / 2 * Rc, Rc, 20):
        x_EOS = np.append(x_EOS, val)
        fx_EOS = np.append(fx_EOS, 0)
    spx_eos = UnivariateSpline(x_EOS, fx_EOS, k=5, s=0)

    # 2 - Repulsive equation for x < x1 (exp(-a1 x + a2)+a3)
    def func(x, alpha, beta, gamma):
        return np.exp(-alpha * x + beta) + gamma

    coeff = (sp2(X[1]) - sp2(X[0])) / (X[1] - X[0])
    res, cov = curve_fit(func,
                         X[0:5],
                         sp2(X[0:5]), [1 / X[0], X[0], -X[0] / sp2(X[0])],
                         maxfev=1000000)
    # 3 - Writing at the good format
    Repulsion = ""
    Repulsion = Repulsion + ("{} {}\n".format(len(X), X[-1]))
    Repulsion = Repulsion + "{} {} {}\n".format(*res)
    for i, vec in enumerate(X):
        if i < len(X) - 1:
            Repulsion = Repulsion + "{0:.8f} {1:.8f} {2:.10f} {3:.10f} {4:.10f} {5:.10f}\n".format(
                vec, vec + interval,
                sp2.derivatives(vec)[0],
                sp2.derivatives(vec)[1],
                sp2.derivatives(vec)[2] / 2,
                sp2.derivatives(vec)[3] / 6)
        else:
            Repulsion = Repulsion + "{0:.8f} {1:.8f} {2:.10f} {3:.10f} {4:.10f} {5:.10f} {6} {7} \n".format(
                vec, vec,
                sp2.derivatives(vec)[0],
                sp2.derivatives(vec)[1],
                sp2.derivatives(vec)[2] / 2,
                sp2.derivatives(vec)[3] / 6, 0, 0)
    # Final verification
    plt.clf()
    plt.plot(X, sp2(X), 'o')
    Xe = np.linspace(X[0] - 0.2, X[0] + 0.2, 10)
    plt.plot(Xe, func(Xe, *res))
    plt.savefig('Erep_skf.png')
    plt.clf()
    return (Repulsion)
Beispiel #30
0
                             k=interpol_order,
                             s=1e-5)
zeta_spl = UnivariateSpline(r_avg[psi_stencil], zeta, k=interpol_order, s=1e-5)
amhd = np.empty(pw, dtype=np.float)
amhd_Miller = np.empty(pw, dtype=np.float)
Vprime = np.empty(pw, dtype=np.float)
dV_dr = np.empty(pw, dtype=np.float)
V = np.empty(pw, dtype=np.float)
V_manual = np.empty(pw, dtype=np.float)
r_FS = np.empty(pw, dtype=np.float)
for i in psi_stencil:
    imod = i - psi_stencil[0]
    Vprime[imod] = np.abs(
        np.sum(qpsi[i] * R_sym[imod]**2 / F[i]) * 4 * np.pi**2 / ntheta)
    dV_dr[imod] = np.abs(np.sum(qpsi[i]*R_sym[imod] ** 2/F[i])*4*np.pi ** 2/ntheta)/ \
                  ravg_spl.derivatives(linpsi[i])[1]
    #    V[imod]=trapz(Vprime[:imod+1],linpsi[psi_stencil])
    r_FS[imod] = np.average(np.sqrt((R_sym[imod] - R0[i])**2 +
                                    (Z_sym[imod] - Z_avg[i])**2),
                            weights=qpsi[i] * R_sym[imod]**2 / F[i])
    amhd[imod] = -qpsi[i] ** 2*R0[i]*pprime[i]*8*np.pi*1e-7/Bref_miller ** 2/ \
                 ravg_spl.derivatives(linpsi[i])[1]
    #    amhd_Miller[imod]=-2*Vprime[imod]/(2*pi)**2*(V[imod]/2/pi**2/R0[i])**0.5*4e-7*pi*pprime[i]
    dq_dr_avg[imod] = q_spl.derivatives(r_avg[i])[1]
    dq_dpsi[imod] = q_spl_psi.derivatives(linpsi[i])[1]
    drR[imod] = R0_spl.derivatives(r_avg[i])[1]
    drZ[imod] = Z0_spl.derivatives(r_avg[i])[1]
    s_kappa[imod] = kappa_spl.derivatives(r_avg[i])[1] * r_avg[i] / kappa[imod]
    s_delta[imod] = delta_spl.derivatives(
        r_avg[i])[1] * r_avg[i] / np.sqrt(1 - delta[imod]**2)
    s_zeta[imod] = zeta_spl.derivatives(r_avg[i])[1] * r_avg[i]
Beispiel #31
0
    plt.ylabel('y')
    plt.grid()
    plt.legend()
#Cubic spline
print('Cubic Spline')
SP_coeffs = UnivariateSpline(t, y, s=0.0001)
plt.figure()
plt.plot(t, y, 'ok', label='Data Points')
plt.plot(T, SP_coeffs(T), label='Cubic Spline')
plt.title('Cubic Spline')
plt.xlabel('t')
plt.ylabel('y')
plt.grid()
plt.legend()
for z in range(len(t)):
    dx_val[z] = SP_coeffs.derivatives(t[z])[1]
print('dx_val =', dx_val)
#smoothing spline
print('Smoothing Spline')
K = np.array([1, 2, 4, 5])
for k1 in K:
    print('k =', k1)
    SP_coeffs = interpolate.UnivariateSpline(t, y, k=k1, s=0.0001)
    for z in range(len(t)):
        dx_val[z] = SP_coeffs.derivatives(t[z])[1]
    print('dx_val =', dx_val)
    plt.figure()
    plt.plot(t, y, 'ok', label='Data Points')
    plt.plot(T, SP_coeffs(T), label='Smoothing Spline, k = %s' % k1)
    plt.title('Smoothing Spline, k = %s' % k1)
    plt.xlabel('t')