Ejemplo n.º 1
0
    def setup_spline(self, spline_options=None):
        """
        Setup of the options for the spline interpolation

        Args:
            spline_options (dict): Options for cubic spline. For example,
                {"saddle_point": "zero_slope"} forces the slope at the saddle to
                be zero.
        """
        self.spline_options = spline_options
        relative_energies = self.energies - self.energies[0]
        if scipy_old_piecewisepolynomial:
            if self.spline_options:
                raise RuntimeError('Option for saddle point not available with'
                                   'old scipy implementation')
            self.spline = PiecewisePolynomial(
                self.r, np.array([relative_energies, -self.forces]).T,
                orders=3)
        else:
            # New scipy implementation for scipy > 0.18.0
            if self.spline_options.get('saddle_point', '') == 'zero_slope':
                imax = np.argmax(relative_energies)
                self.spline = CubicSpline(x=self.r[:imax + 1],
                                          y=relative_energies[:imax + 1],
                                          bc_type=((1, 0.0), (1, 0.0)))
                cspline2 = CubicSpline(x=self.r[imax:], y=relative_energies[imax:],
                                       bc_type=((1, 0.0), (1, 0.0)))
                self.spline.extend(c=cspline2.c, x=cspline2.x[1:])
            else:
                self.spline = CubicSpline(x=self.r, y=relative_energies,
                                          bc_type=((1, 0.0), (1, 0.0)))
Ejemplo n.º 2
0
def spline(x, y):
    return CubicSpline(x, y, bc_type='periodic', extrapolate='periodic')
Ejemplo n.º 3
0
# 画棱柱
import numpy as np
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.pyplot as plt
from scipy.interpolate import CubicSpline
from matplotlib import cm

#Create the profile
Radii = [1, 1.5, 1, 0.8, 1.3, 0.6, 0.5]
Zradii = [0, 1, 5, 10, 12, 14, 16]

radius = CubicSpline(Zradii, Radii, bc_type=((1, 0.5), (1, 0.0)))

# Make data
thetarange = np.linspace(0, 2 * np.pi, 100)
zrange = np.linspace(min(Zradii), max(Zradii), 100)
X = [radius(z) * np.cos(thetarange) for z in zrange]
Y = [radius(z) * np.sin(thetarange) for z in zrange]
Z = np.array([[z] for z in zrange])

# Plot the surface
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
ax.set_xlim3d(-2, 2)
ax.set_ylim3d(-2, 2)
ax.set_zlim3d(0, 20)
ax.plot_surface(X, Y, Z, cmap=cm.coolwarm)

#Plot the circles
for zz in Zradii:
    XX = radius(zz) * np.cos(thetarange)
desired_list = [1, 3, 5]

# alpha = [0.5,0.75,1.0,1.0,0.75,0.5]
# color = ['r','r','r','b','b','b']
color = ['r', 'r', 'r']
alpha = [1.0, 0.66, 0.33]

shape = np.shape(data)
plt.close('all')
plt.figure()
count = 0
for i in range(1, shape[1]):
    if i in desired_list:

        #print(i)
        cs = CubicSpline(data[:, 0] / np.max(data[:, 0]), data[:, i])
        integral = cs.integrate(0, 1)

        if i == 1:
            oreloc = np.where(data[:, i] == np.max(data[:, i]))[0][0]

        x = np.linspace(data[oreloc, 0] / np.max(data[:, 0]) - 0.5,
                        data[oreloc, 0] / np.max(data[:, 0]) + 0.5, 1000)
        #print(x)
        xplot = np.linspace(0, 1, 1000)
        y = []
        for xi in x:
            if xi > 1:
                xi = xi - 1
            # #print(xi)
            y.append(cs(xi) / integral)
Ejemplo n.º 5
0
 def test_periodic_eval(self):
     x = np.linspace(0, 2 * np.pi, 10)
     y = np.cos(x)
     S = CubicSpline(x, y, bc_type='periodic')
     assert_almost_equal(S(1), S(1 + 2 * np.pi), decimal=15)
Ejemplo n.º 6
0
def crank_nicholson_value(right: OptionRight, ex_style: ExerciseStyle,
                          strike: float, fwd_price: float, sigma: float,
                          r: float, time_to_expiry: float, n: int,
                          n_times: int, std_devs: float) -> float:
    dz = 2.0 * std_devs / (n - 1.0)
    dt = time_to_expiry / (n_times - 1.0)

    def diffusion_matrices():
        a = 1.0 / (dz * dz)

        m1 = np.zeros((n, n), float)
        np.fill_diagonal(m1[1:n - 1], a / 2)
        np.fill_diagonal(m1[1:n - 1, 1:], 2.0 / dt - a)
        np.fill_diagonal(m1[1:, 2:], a / 2)
        m1[0, 0] = 1.0
        m1[n - 1, n - 1] = 1.0

        m2 = np.zeros((n, n), float)
        np.fill_diagonal(m2[1:n - 1], -a / 2)
        np.fill_diagonal(m2[1:n - 1, 1:], 2.0 / dt + a)
        np.fill_diagonal(m2[1:, 2:], -a / 2)
        m2[0, 0] = 1.0
        m2[n - 1, n - 1] = 1.0

        return m1, m2

    zs = np.arange(-std_devs, std_devs + dz / 2.0, dz)
    z0 = zs[0]
    zn = zs[n - 1]

    (m1, m2) = diffusion_matrices()

    def diffuse(vec, next_low_value, next_high_value):
        v1 = np.matmul(m1, vec)
        v2 = np.linalg.solve(m2, v1) * exp(-r * dt)
        v2[0] = next_low_value
        v2[n - 1] = next_high_value
        return v2

    def price(z, t):
        p = strike * exp(z * sigma - 0.5 * sigma * sigma * t)
        return p

    def undiscounted_bs(z, t):
        return black_scholes(right, strike, price(z, t), sigma,
                             time_to_expiry - t)

    def intrinsic(z, t):
        return intrinsic_value(right, strike, price(z, t))

    def lower_bound(t):
        return intrinsic(z0, t)

    def upper_bound(t):
        return intrinsic(zn, t)

    # This is the vector at time n_times - 2 - can use european values here
    def penultimate_value(z):
        return undiscounted_bs(z, time_to_expiry - dt)

    vec = penultimate_value(zs)

    # Now diffuse the remaining n_times - 2 steps
    for i_near_time in range(n_times - 3, -1, -1):
        t_near = i_near_time * dt
        vec = diffuse(vec, lower_bound(t_near), upper_bound(t_near))

        if ex_style == ExerciseStyle.AMERICAN:
            intrinsics = list(map(lambda z: intrinsic(z, t_near), zs))
            vec = np.maximum(vec, intrinsics)

    prices = list(map(lambda z: price(z, 0), zs))
    cs = CubicSpline(prices, vec)
    return np.asscalar(cs(fwd_price))
Ejemplo n.º 7
0
def evalCubicSpline(t, x, cpts):
    Cubic = CubicSpline(x, cpts)
    return Cubic.__call__(t)
Ejemplo n.º 8
0
import numpy as np
import matplotlib.pyplot as plt
from scipy.interpolate import CubicSpline

sly=np.genfromtxt("SLy.txt",delimiter="  ")
fps=np.genfromtxt("FPS.txt",delimiter="    ")

nbs=sly[:,1]
rhos=sly[:,2]
Ps=sly[:,3]

nbf=fps[:,1]
rhof=fps[:,2]
Pf=fps[:,3]

cPs=CubicSpline(rhos,Ps)
rmax=rhos[len(rhos)-1]
rmin=rhos[0]
rPs=np.linspace(rmin,rmax,1000)

fig=plt.figure()
ax=plt.gca()
ax.set_title(r"SLy Equation of State [$P(\rho)$]")
ax.set_xlabel(r"$\rho\: (g/cm^3)$")
ax.set_ylabel(r"$P\: (dyn/cm^2)$")
plt.plot(rhos,Ps,'k.',label="Data Points")
plt.plot(rPs,cPs(rPs),'k-',label="Cubic Spline Interpolation")
plt.legend(loc="best")
plt.savefig("SLy_P.png")
plt.close()
Ejemplo n.º 9
0
    def interpolate(self, t_new):
        self.cs = CubicSpline(self.t, self.y)

        return self.cs(t_new)
Ejemplo n.º 10
0
maxt = {}
for r in rotrates:
	mint[r] = rhot[r][18]
	maxt[r] = rhot[r][-1]

#Get even time sequences. They will be the same for both nu and gw data, and are thus named appropriately
t = {}
for r in rotrates:
	t[r] = arange(mint[r],maxt[r],dt[r])

#Now define splines
rhospline   = {}
Rpnsspline  = {}
Tpnsspline  = {}
for r in rotrates:
	rhospline[r]   = CubicSpline(  rhot[r][18:],  rho[r][18:])
	Rpnsspline[r]  = CubicSpline( Rpnst[r][18:], Rpns[r][18:])
	Tpnsspline[r]  = CubicSpline( Tpnst[r][18:], Tpns[r][18:])

#Get even signal sequences using the splines and the previously defined even time sequences. Append "c" to the name, meaning "clean".
rhoc   = {}
Rpnsc  = {}
Tpnsc  = {}
for r in rotrates:
	rhoc[r]   =   rhospline[r](t[r])
	Rpnsc[r]  =  Rpnsspline[r](t[r])
	Tpnsc[r]  =  Tpnsspline[r](t[r])

#Remove first part of the signal, up to WindowWidth prior to the bounce time. This keeps the data and spectrogram filesizes more manageable.
ww=WindowWidth*1e-3
for r in rotrates:
Ejemplo n.º 11
0
# -*- coding: utf-8 -*-
"""
Created on Thu Sep 17 14:21:23 2020

@author: Marius
"""

import matplotlib.pyplot as plt
import numpy as np
from scipy.interpolate import CubicSpline

yfast = [0.293, 0.24, 0.216, 0.159, 0.082, 0.053, 0.097, 0.136]
xfast = [0, 0.2, 0.4, 0.6, 0.8, 1.0, 1.2, 1.4]

# y(x)
cs = CubicSpline(xfast, yfast, bc_type="natural")
xmin = 0.000
xmax = 1.401
dx = 0.001
x = np.arange(xmin, xmax, dx)
Nx = len(x)
y = cs(x)
dy = cs(x, 1)
d2y = cs(x, 2)

# Plotting

# baneform = plt.figure('y(x)',figsize=(12,3))
# plt.plot(x,y,xfast,yfast,'*')
# plt.title('Banens form')
# plt.xlabel('$x$ (m)',fontsize=20)
Ejemplo n.º 12
0
import numpy as np
import matplotlib.pyplot as plt
from scipy.interpolate import CubicSpline
import pandas as pd
plt.rcParams.update({'font.size': 14})

lattice40=pd.read_csv("data/lattice40", index_col=False, sep="\s+",names=["T","E","Cv","M","Chi","|M|"])
lattice60=pd.read_csv("data/lattice60", index_col=False, sep="\s+",names=["T","E","Cv","M","Chi","|M|"])
lattice80=pd.read_csv("data/lattice80", index_col=False, sep="\s+",names=["T","E","Cv","M","Chi","|M|"])
lattice100=pd.read_csv("data/lattice100", index_col=False, sep="\s+",names=["T","E","Cv","M","Chi","|M|"])

f_chi40 = CubicSpline(lattice40['T'], lattice40['Chi'])
f_chi60 = CubicSpline(lattice60['T'], lattice60['Chi'])
f_chi80 = CubicSpline(lattice80['T'], lattice80['Chi'])
f_chi100 = CubicSpline(lattice100['T'], lattice100['Chi'])

Tc40 = lattice40['T'].iloc[np.argmax(f_chi40(lattice40['T']))]
Tc60 = lattice60['T'].iloc[np.argmax(f_chi60(lattice60['T']))]
Tc80 = lattice80['T'].iloc[np.argmax(f_chi80(lattice80['T']))]
Tc100 = lattice100['T'].iloc[np.argmax(f_chi100(lattice100['T']))]

invL = np.linspace(0, 1./30, 100)
Tc = np.array([Tc100, Tc80, Tc60, Tc40])
invLc = np.array([1./100, 1./80, 1./60, 1./40])

# Make a linear fit using the critical temperatures
poly = np.polyfit(invLc, Tc, deg=1, full=True)

plt.figure()
plt.plot(lattice40['T'], f_chi40(lattice40['T']))
plt.plot(lattice60['T'], f_chi60(lattice60['T']))
Ejemplo n.º 13
0
def makeEmpHSS(t, rs, noHSS):
    # takes in t in days

    # fake v profile ---------------------------------------------------------------
    v_xc1 = 0.0613 * t**2 - 0.213 * t + 0.279
    v_xc2 = 0.279 * t - 0.298
    v_xb = 0.230 * t - 0.659
    v_xf = 0.277 * t - 0.147  # n_xp
    v_yp = 725 * 1e5
    v_yl = 385 * 1e5
    v = np.ones(len(rs)) * v_yl

    # identify zones
    idx1 = np.where((rs >= v_xb) & (rs <= v_xc1))
    idx2 = np.where((rs >= v_xc1) & (rs <= v_xc2))
    idx3 = np.where((rs >= v_xc2) & (rs <= v_xf))

    # zone 1 linear
    m1 = (v_yp - v_yl) / (v_xc1 - v_xb)
    v1 = v_yl + m1 * (rs - v_xb)
    v[idx1] = v1[idx1]
    # zone 2 flat
    v[idx2] = v_yp
    # zone 1 linear
    m3 = (v_yl - v_yp) / (v_xf - v_xc2)
    v3 = v_yp + m3 * (rs - v_xc2)
    v[idx3] = v3[idx3]

    # fake n profile ---------------------------------------------------------------
    n_xb = 0.143 * t - 0.371
    n_xp = 0.277 * t - 0.147
    n_yl = -0.0062 * t**2 + 0.0239 * t + 0.201
    n_yp = 0.461 * t + 0.158
    n_df = 2.676 * t**2 - 10.30 * t + 13.68
    scale_it = np.ones(len(rs))

    # identify zones
    idx1 = np.where((rs >= n_xb) & (rs <= (v_xc1)))[0]
    idx2 = np.where((rs >= (v_xc1)) & (rs <= (v_xc2)))[0]
    idx3 = np.where((rs >= (v_xc2)) & (rs <= (n_xp - 0.045)))[0]
    idx4 = np.where((rs >= (n_xp - 0.045)) & (rs <= n_xp))[0]
    idx5 = np.where(rs >= n_xp)[0]

    # zone 1 linear
    m1 = (1. - n_yl) / (v_xc1 - n_xb)
    scl1 = 1 - m1 * (rs - n_xb)
    scale_it[idx1] = scl1[idx1]
    # zone 2 linear
    m2 = n_yl / (v_xc2 - v_xc1)
    scl2 = n_yl + m2 * (rs - (v_xc1))
    scale_it[idx2] = scl2[idx2]
    # zone 3 linear
    m3 = (0.9 * (n_yp) - 2 * n_yl) / (n_xp - 0.045 - (v_xc2))
    scl3 = 2 * n_yl + m3 * (rs - (v_xc2))
    scale_it[idx3] = scl3[idx3]
    # zone 4 linear
    m4 = 0.1 * n_yp / 0.045
    scl4 = 0.9 * n_yp + m4 * (rs - n_xp + 0.045)
    scale_it[idx4] = scl4[idx4]
    # zone 5 exponential
    scl5 = 1 + (n_yp - 1) * np.exp(-n_df * (rs - n_xp))
    scale_it[idx5] = scl5[idx5]

    # scale the HSS free case
    n = scale_it * noHSS[0]

    # fake Br profile ---------------------------------------------------------------
    xp = 0.283 * t - 0.213
    yp = 0.0850 * t**2 + 0.0852 * t + 0.620
    xl = 0.307 * t - 0.862
    yl = -0.224 * t + 1.679
    xm = 0.342 * t - 0.757
    scale_it = np.ones(len(rs))

    # identify zones
    idx1 = np.where((rs >= (xl - (xm - xl))) & (rs <= xm))[0]
    idx2 = np.where(rs >= xm)[0]

    # zone 1 - 2nd order polynomial
    a2 = (1 - yl) / (xl - xm)**2
    a1 = -2 * a2 * xl
    a0 = yl + a2 * xl**2
    scl1 = a2 * rs**2 + a1 * rs + a0
    scale_it[idx1] = scl1[idx1]
    # zone 2 - normal dist
    scl2 = 1 + (yp - 1) * np.exp(-(rs - xp)**2 / ((xp - xm) / 2.5)**2)
    scale_it[idx2] = scl2[idx2]

    Br = scale_it * noHSS[2]

    # fake Bp profile ---------------------------------------------------------------
    xp = 0.253 * t - 0.0711
    yp = 0.0916 * t**2 - 0.185 * t + 1.054
    xm = 0.241 * t - 0.151
    xl = 0.0609 * t**2 - 0.281 * t + 0.581
    yl = -0.155 * t + 1.154
    scale_it = np.ones(len(rs))

    # identify zones
    idx1 = np.where((rs >= (xl - (xm - xl))) & (rs <= xm))[0]
    idx2 = np.where(rs >= xm)[0]

    # zone 1 - 2nd order polynomial
    a2 = (1 - yl) / (xl - xm)**2
    a1 = -2 * a2 * xl
    a0 = yl + a2 * xl**2
    scl1 = a2 * rs**2 + a1 * rs + a0
    scale_it[idx1] = scl1[idx1]
    # zone 2 - normal dist
    scl2 = 1 + (yp - 1) * np.exp(-(rs - xp)**2 / ((xp - xm) / 1.5)**2)
    scale_it[idx2] = scl2[idx2]

    Blon = scale_it * noHSS[3]

    # fake T profile ---------------------------------------------------------------
    T_x1 = 0.286 * t - 0.894
    T_x2 = 0.318 * t - 0.860
    T_x4 = 0.303 * t - 0.362
    T_x5 = 0.302 * t - 0.248
    T_x3 = T_x4 - (T_x5 - T_x4)
    T_y1 = -0.0787 * t**2 + 0.454 * t + 2.813
    T_y2 = 0.0824 * t**2 + 0.631 * t + 2.032
    scale_it = np.ones(len(rs))

    # identify zones
    idx1 = np.where((rs >= T_x1) & (rs <= T_x2))[0]
    idx2 = np.where((rs >= T_x2) & (rs <= T_x3))[0]
    idx3 = np.where((rs >= T_x3) & (rs <= T_x4))[0]
    idx4 = np.where((rs >= T_x4) & (rs <= T_x5))[0]

    # zone 1 linear
    m1 = (T_y1 - 1.) / (T_x2 - T_x1)
    scl1 = 1 + m1 * (rs - T_x1)
    scale_it[idx1] = scl1[idx1]

    # zone 2 flat
    scale_it[idx2] = T_y1

    # zone 3 linear
    m3 = (T_y2 - T_y1) / (T_x4 - T_x3)
    scl3 = T_y1 + m3 * (rs - T_x3)
    scale_it[idx3] = scl3[idx3]

    # zone 4 linear
    m4 = (1 - T_y2) / (T_x5 - T_x4)
    scl4 = T_y2 + m4 * (rs - T_x4)
    scale_it[idx4] = scl4[idx4]

    T = scale_it * noHSS[4]

    frho = CubicSpline(rs * 1.5e13, n, bc_type='natural')
    fv = CubicSpline(rs * 1.5e13, v, bc_type='natural')
    fBr = CubicSpline(rs * 1.5e13, Br, bc_type='natural')
    fBlon = CubicSpline(rs * 1.5e13, Blon, bc_type='natural')
    fT = CubicSpline(rs * 1.5e13, T, bc_type='natural')

    return [frho, fv, fBr, fBlon, fT]
Ejemplo n.º 14
0
def makeSW(fname, time=None, doAll=False):
    SWfs = []
    # assume n in cm^-3, v in km/s, B in nT, T in K
    # rs in AU whether in text file or passed
    # the output functions are in g, cm/s, G, K and take r in cm

    # check whether fname is a file or the 1 AU sw params
    if isinstance(fname, str):
        ext = fname[-3:]
        if ext != 'npy':
            data = np.genfromtxt(fname, dtype=float)
            rs = data[:, 0] * 1.5e13
            ns = data[:, 1]
            vs = data[:, 2]
            Brs = data[:, 3]
            Blons = data[:, 4]
            Ts = data[:, 5]
        else:
            # dict keys
            #'Blon[nT]', 'vclt[km/s]', 'T[K]', 'n[1/cm^3]', 'vr[km/s]', 'Br[nT]', 'vlon[km/s]', 'Bclt[nT]', 'r[AU]'
            idx = 0
            if time != None: idx = time
            data = np.atleast_1d(
                np.load(fname, allow_pickle=True, encoding='latin1'))[idx]
            rs = np.array(data['r[AU]']) * 1.5e13
            ns = np.array(data['n[1/cm^3]'])
            vs = np.array(data['vr[km/s]'])
            Brs = np.array(data['Br[nT]'])
            Blons = np.array(data['Blon[nT]'])
            Ts = np.array(data['T[K]'])
            if doAll:
                vclts = np.array(data['vclt[km/s]'])
                vlons = np.array(data['vlon[km/s]'])
                Bclts = np.array(data['Bclt[nT]'])
                fvlon = CubicSpline(rs, vlons * 1e5, bc_type='natural')
                fvclt = CubicSpline(rs, vclts * 1e5, bc_type='natural')
                fBclt = CubicSpline(rs, Bclts / 1e5, bc_type='natural')

        # make functions relating r to the 5 parameters
        frho = CubicSpline(rs, 1.67e-24 * ns, bc_type='natural')
        fv = CubicSpline(rs, vs * 1e5, bc_type='natural')
        fBr = CubicSpline(rs, Brs / 1e5, bc_type='natural')
        fBlon = CubicSpline(rs, Blons / 1e5, bc_type='natural')
        fT = CubicSpline(rs, Ts, bc_type='natural')

    else:
        nSW = fname[0]
        vSW = fname[1] * 1e5
        BSW = fname[2]
        TSW = fname[3]
        rSW = fname[4] * 1.5e13
        # n profile
        frho = lambda x: 1.67e-24 * nSW * (rSW / x)**2
        # assume constant v
        fv = lambda x: vSW
        # B profile
        BphiBr = 2.7e-6 * rSW / vSW
        Br_rSW = BSW / np.sqrt(1 + BphiBr**2)
        # scale Br based on distance
        fBr = lambda x: Br_rSW * (rSW / x)**2 / 1e5
        # lon = Br * parker factor
        fBlon = lambda x: Br_rSW * (rSW / x)**2 * 2.7e-6 * x / vSW / 1e5
        # T profile
        fT = lambda x: TSW * np.power(x / rSW, -0.58)

    if doAll:
        return [frho, fv, fBr, fBlon, fT, fvlon, fvclt, fBclt]
    else:
        return [frho, fv, fBr, fBlon, fT]
Ejemplo n.º 15
0
class NEBAnalysis(MSONable):
    """
    An NEBAnalysis class.
    """

    def __init__(self, r, energies, forces, structures, spline_options=None):
        """
        Initializes an NEBAnalysis from the cumulative root mean squared distances
        between structures, the energies, the forces, the structures and the
        interpolation_order for the analysis.

        Args:
            r: Root mean square distances between structures
            energies: Energies of each structure along reaction coordinate
            forces: Tangent forces along the reaction coordinate.
            structures ([Structure]): List of Structures along reaction
                coordinate.
            spline_options (dict): Options for cubic spline. For example,
                {"saddle_point": "zero_slope"} forces the slope at the saddle to
                be zero.
        """
        self.r = np.array(r)
        self.energies = np.array(energies)
        self.forces = np.array(forces)
        self.structures = structures
        self.spline_options = spline_options if spline_options is not None \
            else {}

        # We do a piecewise interpolation between the points. Each spline (
        # cubic by default) is constrained by the boundary conditions of the
        # energies and the tangent force, i.e., the derivative of
        # the energy at each pair of points.

        self.setup_spline(spline_options=self.spline_options)

    def setup_spline(self, spline_options=None):
        """
        Setup of the options for the spline interpolation

        Args:
            spline_options (dict): Options for cubic spline. For example,
                {"saddle_point": "zero_slope"} forces the slope at the saddle to
                be zero.
        """
        self.spline_options = spline_options
        relative_energies = self.energies - self.energies[0]
        if scipy_old_piecewisepolynomial:
            if self.spline_options:
                raise RuntimeError('Option for saddle point not available with'
                                   'old scipy implementation')
            self.spline = PiecewisePolynomial(
                self.r, np.array([relative_energies, -self.forces]).T,
                orders=3)
        else:
            # New scipy implementation for scipy > 0.18.0
            if self.spline_options.get('saddle_point', '') == 'zero_slope':
                imax = np.argmax(relative_energies)
                self.spline = CubicSpline(x=self.r[:imax + 1],
                                          y=relative_energies[:imax + 1],
                                          bc_type=((1, 0.0), (1, 0.0)))
                cspline2 = CubicSpline(x=self.r[imax:], y=relative_energies[imax:],
                                       bc_type=((1, 0.0), (1, 0.0)))
                self.spline.extend(c=cspline2.c, x=cspline2.x[1:])
            else:
                self.spline = CubicSpline(x=self.r, y=relative_energies,
                                          bc_type=((1, 0.0), (1, 0.0)))

    @classmethod
    def from_outcars(cls, outcars, structures, **kwargs):
        """
        Initializes an NEBAnalysis from Outcar and Structure objects. Use
        the static constructors, e.g., :class:`from_dir` instead if you
        prefer to have these automatically generated from a directory of NEB
        calculations.

        Args:
            outcars ([Outcar]): List of Outcar objects. Note that these have
                to be ordered from start to end along reaction coordinates.
            structures ([Structure]): List of Structures along reaction
                coordinate. Must be same length as outcar.
            interpolation_order (int): Order of polynomial to use to
                interpolate between images. Same format as order parameter in
                scipy.interplotate.PiecewisePolynomial.
        """
        if len(outcars) != len(structures):
            raise ValueError("# of Outcars must be same as # of Structures")

        # Calculate cumulative root mean square distance between structures,
        # which serves as the reaction coordinate. Note that these are
        # calculated from the final relaxed structures as the coordinates may
        # have changed from the initial interpolation.
        r = [0]
        prev = structures[0]
        for st in structures[1:]:
            dists = np.array([s2.distance(s1) for s1, s2 in zip(prev, st)])
            r.append(np.sqrt(np.sum(dists ** 2)))
            prev = st
        r = np.cumsum(r)

        energies = []
        forces = []
        for i, o in enumerate(outcars):
            o.read_neb()
            energies.append(o.data["energy"])
            if i in [0, len(outcars) - 1]:
                forces.append(0)
            else:
                forces.append(o.data["tangent_force"]) 
        forces = np.array(forces)
        r = np.array(r)
        return cls(r=r, energies=energies, forces=forces,
                   structures=structures, **kwargs)

    def get_extrema(self, normalize_rxn_coordinate=True):
        """
        Returns the positions of the extrema along the MEP. Both local
        minimums and maximums are returned.

        Args:
            normalize_rxn_coordinate (bool): Whether to normalize the
                reaction coordinate to between 0 and 1. Defaults to True.

        Returns:
            (min_extrema, max_extrema), where the extrema are given as
            [(x1, y1), (x2, y2), ...].
        """
        x = np.arange(0, np.max(self.r), 0.01)
        y = self.spline(x) * 1000

        scale = 1 if not normalize_rxn_coordinate else 1 / self.r[-1]
        min_extrema = []
        max_extrema = []
        for i in range(1, len(x) - 1):
            if y[i] < y[i-1] and y[i] < y[i+1]:
                min_extrema.append((x[i] * scale, y[i]))
            elif y[i] > y[i-1] and y[i] > y[i+1]:
                max_extrema.append((x[i] * scale, y[i]))
        return min_extrema, max_extrema

    def get_plot(self, normalize_rxn_coordinate=True, label_barrier=True):
        """
        Returns the NEB plot. Uses Henkelman's approach of spline fitting
        each section of the reaction path based on tangent force and energies.

        Args:
            normalize_rxn_coordinate (bool): Whether to normalize the
                reaction coordinate to between 0 and 1. Defaults to True.
            label_barrier (bool): Whether to label the maximum barrier.

        Returns:
            matplotlib.pyplot object.
        """
        plt = pretty_plot(12, 8)
        scale = 1 if not normalize_rxn_coordinate else 1 / self.r[-1]
        x = np.arange(0, np.max(self.r), 0.01)
        y = self.spline(x) * 1000
        relative_energies = self.energies - self.energies[0]
        plt.plot(self.r * scale, relative_energies * 1000, 'ro',
                 x * scale, y, 'k-', linewidth=2, markersize=10)
        plt.xlabel("Reaction coordinate")
        plt.ylabel("Energy (meV)")
        plt.ylim((np.min(y) - 10, np.max(y) * 1.02 + 20))
        if label_barrier:
            data = zip(x * scale, y)
            barrier = max(data, key=lambda d: d[1])
            plt.plot([0, barrier[0]], [barrier[1], barrier[1]], 'k--')
            plt.annotate('%.0f meV' % barrier[1],
                         xy=(barrier[0] / 2, barrier[1] * 1.02),
                         xytext=(barrier[0] / 2, barrier[1] * 1.02),
                         horizontalalignment='center')
        plt.tight_layout()
        return plt

    @classmethod
    def from_dir(cls, root_dir, relaxation_dirs=None, **kwargs):
        """
        Initializes a NEBAnalysis object from a directory of a NEB run.
        Note that OUTCARs must be present in all image directories. For the
        terminal OUTCARs from relaxation calculations, you can specify the
        locations using relaxation_dir. If these are not specified, the code
        will attempt to look for the OUTCARs in 00 and 0n directories,
        followed by subdirs "start", "end" or "initial", "final" in the
        root_dir. These are just some typical conventions used
        preferentially in Shyue Ping's MAVRL research group. For the
        non-terminal points, the CONTCAR is read to obtain structures. For
        terminal points, the POSCAR is used. The image directories are
        assumed to be the only directories that can be resolved to integers.
        E.g., "00", "01", "02", "03", "04", "05", "06". The minimum
        sub-directory structure that can be parsed is of the following form (
        a 5-image example is shown):

        00:
        - POSCAR
        - OUTCAR
        01, 02, 03, 04, 05:
        - CONTCAR
        - OUTCAR
        06:
        - POSCAR
        - OUTCAR

        Args:
            root_dir (str): Path to the root directory of the NEB calculation.
            relaxation_dirs (tuple): This specifies the starting and ending
                relaxation directories from which the OUTCARs are read for the
                terminal points for the energies.

        Returns:
            NEBAnalysis object.
        """
        neb_dirs = []

        for d in os.listdir(root_dir):
            pth = os.path.join(root_dir, d)
            if os.path.isdir(pth) and d.isdigit():
                i = int(d)
                neb_dirs.append((i, pth))
        neb_dirs = sorted(neb_dirs, key=lambda d: d[0])
        outcars = []
        structures = []

        # Setup the search sequence for the OUTCARs for the terminal
        # directories.
        terminal_dirs = []
        if relaxation_dirs is not None:
            terminal_dirs.append(relaxation_dirs)
        terminal_dirs.append((neb_dirs[0][1], neb_dirs[-1][1]))
        terminal_dirs.append([os.path.join(root_dir, d)
                              for d in ["start", "end"]])
        terminal_dirs.append([os.path.join(root_dir, d)
                              for d in ["initial", "final"]])

        for i, d in neb_dirs:
            outcar = glob.glob(os.path.join(d, "OUTCAR*"))
            contcar = glob.glob(os.path.join(d, "CONTCAR*"))
            poscar = glob.glob(os.path.join(d, "POSCAR*"))
            terminal = i == 0 or i == neb_dirs[-1][0]
            if terminal:
                for ds in terminal_dirs:
                    od = ds[0] if i == 0 else ds[1]
                    outcar = glob.glob(os.path.join(od, "OUTCAR*"))
                    if outcar:
                        outcar = sorted(outcar)
                        outcars.append(Outcar(outcar[-1]))
                        break
                else:
                    raise ValueError("OUTCAR cannot be found for terminal "
                                     "point %s" % d)
                structures.append(Poscar.from_file(poscar[0]).structure)
            else:
                outcars.append(Outcar(outcar[0]))
                structures.append(Poscar.from_file(contcar[0]).structure)
        return NEBAnalysis.from_outcars(outcars, structures, **kwargs)

    def as_dict(self):
        """
        Dict representation of NEBAnalysis.

        Returns:
            JSON serializable dict representation.
        """
        return {"@module": self.__class__.__module__,
                "@class": self.__class__.__name__,
                'r': jsanitize(self.r),
                'energies': jsanitize(self.energies),
                'forces': jsanitize(self.forces),
                'structures': [s.as_dict() for s in self.structures]}
Ejemplo n.º 16
0
            print(line)
    elif mode is 1:
        target.append(float(line))

x = [x for x in range(len(reference))]

fig, axs = plt.subplots(1, 2)

x = np.array(x, dtype=float)
reference = np.array(reference, dtype=float)
target = np.array(target, dtype=float)

offset = (max(reference) * 0.0)
target = [x + offset for x in target]

p1 = CubicSpline(x, reference)
p2 = CubicSpline(x, target)

# p1 = np.polynomial.chebyshev.chebfit(x, reference, 3)
# p2 = np.polynomial.chebyshev.chebfit(x, target, 3)

axs[0].set_title("Evaluate FFT", fontsize=16)
axs[0].set_xlabel('bin', fontsize=18)
axs[0].set_ylabel('amplitude', fontsize=16)
axs[0].plot(x, reference, 'ro', label="reference", color='red')
axs[0].plot(x, target, 'ro', label="target", color='blue')
axs[0].plot(x, p2(x), color='cyan')  # plot first order polynomial
axs[0].plot(x, p1(x), color='pink')  # plot first order polynomial
axs[0].legend(loc='upper left')
# axs[0].set_xscale('log')
#axs[0].set_yscale('log')
Ejemplo n.º 17
0
    def pdf(self, x, use_splines = False):
        r"""
        Approximate the posterior probability density function (PDF) for given
        inputs `x`.

        By default, the PDF is approximated by computing the derivative of the
        piece-wise linear approximation of the CDF as computed by the :code:`cdf`
        function.

        If :code:`use_splines` is set to :code:`True`, the PDF is computed from
        a spline fit to the approximate CDF.

        Arguments:

            x(np.array): Array of shape `(n, m)` containing `n` inputs for which
                         to predict the conditional quantiles.

            use_splines(bool): Whether or not to use a spline fit to the CDF to
            approximate the PDF.

        Returns:

            Tuple (xs, fs) containing the :math: `x`-values in `xs` and corresponding
            values of the approximate posterior PDF :math: `F(x)` in `fs`.

        """

        y_pred = np.zeros(self.quantiles.size)
        y_pred = self.predict(x).ravel()

        y = np.zeros(y_pred.size + 1)
        y[1:-1] = 0.5 * (y_pred[1:] + y_pred[:-1])
        y[0] = 2 * y_pred[0] - y_pred[1]
        y[-1] = 2 * y_pred[-1] - y_pred[-2]

        if not use_splines:

            p = np.zeros(y.size)
            p[1:-1] = np.diff(self.quantiles) / np.diff(y_pred)
        else:

            y = np.zeros(y_pred.size + 2)
            y[1:-1] = y_pred
            y[0] = 3 * y_pred[0] - 2 * y_pred[1]
            y[-1] = 3 * y_pred[-1] - 2 * y_pred[-2]
            q = np.zeros(self.quantiles.size + 2)
            q[1:-1] = np.array(self.quantiles)
            q[0] = 0.0
            q[-1] = 1.0

            sr = CubicSpline(y, q, bc_type = "clamped")
            y = np.linspace(y[0], y[-1], 101)
            p = sr(y, nu = 1)

        return y, p


        y_pred = np.zeros(self.quantiles.size + 2)
        y_pred[1:-1] = self.predict(x)
        y_pred[0] = 2.0 * y_pred[1] - y_pred[2]
        y_pred[-1] = 2.0 * y_pred[-2] - y_pred[-3]

        if use_splines:
            x_t = np.zeros(x.size + 2)
            x_t[1:-1] = x
            x_t[0] = 2 * x[0] - x[1]
            x_t[-1] = 2 * x[-1] - x[-2]
            y_t = np.zeros(y.size + 2)
            y_t[1:-1] = y
            y_t[-1] = 1.0

        else:
            logger.info(y)
            x_new = np.zeros(x.size - 1)
            x_new[2:-2] = 0.5 * (x[2:-3] + x[3:-2])
            x_new[0:2] = x[0:2]
            x_new[-2:] = x[-2:]
            y_new = np.zeros(y.size - 1)
            y_new[1:-1] = np.diff(y[1:-1]) / np.diff(x[1:-1])
        return x_new, y_new
Ejemplo n.º 18
0
# -*- coding: utf-8 -*-
"""
Created on Sat Feb 29 15:17:36 2020

@author: nikon
"""

from scipy.interpolate import CubicSpline
import numpy as np

# import data
data = np.loadtxt("0_foreground_1w_sim.txt", delimiter=",")
freq = data[0]
signal = data[1]

cs = CubicSpline(freq, signal,
                 bc_type='natural')  # interpolate using a cubic spline

new_freq = np.arange(50, 99, 0.25)
new_sig = np.zeros_like(new_freq)

for i in range(len(new_freq)):
    new_sig[i] = cs(new_freq[i])

data = np.array([new_freq, new_sig])

np.savetxt("0_foreground_1w_sim.txt", data, delimiter=",")
Ejemplo n.º 19
0
# Asumo que el nivel isoeléctrico esta 70ms antes del latido
# Genero una matriz con los niveles isoeléctricos, no tomo un solo punto, si no
# que voy a promediar determinada cantidad L de puntos que se encuentren en el intervalo PQ
L = 30
dt = 70

nIso = []
for i in posQRS:
    nIso.append(ecg_one_lead[i - dt - (L // 2):i - dt + (L // 2)])

nIso = np.array(nIso)

# Promedio los segmentos
nIso = np.transpose(np.mean(nIso, axis=1))

# Interpolo los puntos para obtener el estimador de la señal interferente
cs = CubicSpline(posQRS - dt, nIso)
estBni = cs(np.arange(N))

# Obtengo el estimador x del ecg
estXni = ecg_one_lead - estBni

plt.figure(2)
plt.plot(np.arange(N), ecg_one_lead, label="Señal de ECG original")
plt.plot(np.arange(N), estBni, label="Estimador B de la interferente")
plt.plot(np.arange(N), estXni, label="Estimador x del ECG")
plt.plot(posQRS, qrs, 'o', label="Posicion de los latidos")
plt.legend(bbox_to_anchor=(0.85, 0.98), loc=2, borderaxespad=0.)
plt.grid()
Ejemplo n.º 20
0
class NEBAnalysis(MSONable):
    """
    An NEBAnalysis class.
    """
    def __init__(self, r, energies, forces, structures, spline_options=None):
        """
        Initializes an NEBAnalysis from the cumulative root mean squared distances
        between structures, the energies, the forces, the structures and the
        interpolation_order for the analysis.

        Args:
            r: Root mean square distances between structures
            energies: Energies of each structure along reaction coordinate
            forces: Tangent forces along the reaction coordinate.
            structures ([Structure]): List of Structures along reaction
                coordinate.
            spline_options (dict): Options for cubic spline. For example,
                {"saddle_point": "zero_slope"} forces the slope at the saddle to
                be zero.
        """
        self.r = np.array(r)
        self.energies = np.array(energies)
        self.forces = np.array(forces)
        self.structures = structures
        self.spline_options = spline_options if spline_options is not None \
            else {}

        # We do a piecewise interpolation between the points. Each spline (
        # cubic by default) is constrained by the boundary conditions of the
        # energies and the tangent force, i.e., the derivative of
        # the energy at each pair of points.

        self.setup_spline(spline_options=self.spline_options)

    def setup_spline(self, spline_options=None):
        """
        Setup of the options for the spline interpolation

        Args:
            spline_options (dict): Options for cubic spline. For example,
                {"saddle_point": "zero_slope"} forces the slope at the saddle to
                be zero.
        """
        self.spline_options = spline_options
        relative_energies = self.energies - self.energies[0]
        if scipy_old_piecewisepolynomial:
            if self.spline_options:
                raise RuntimeError('Option for saddle point not available with'
                                   'old scipy implementation')
            self.spline = PiecewisePolynomial(
                self.r,
                np.array([relative_energies, -self.forces]).T,
                orders=3)
        else:
            # New scipy implementation for scipy > 0.18.0
            if self.spline_options.get('saddle_point', '') == 'zero_slope':
                imax = np.argmax(relative_energies)
                self.spline = CubicSpline(x=self.r[:imax + 1],
                                          y=relative_energies[:imax + 1],
                                          bc_type=((1, 0.0), (1, 0.0)))
                cspline2 = CubicSpline(x=self.r[imax:],
                                       y=relative_energies[imax:],
                                       bc_type=((1, 0.0), (1, 0.0)))
                self.spline.extend(c=cspline2.c, x=cspline2.x[1:])
            else:
                self.spline = CubicSpline(x=self.r,
                                          y=relative_energies,
                                          bc_type=((1, 0.0), (1, 0.0)))

    @classmethod
    def from_outcars(cls, outcars, structures, **kwargs):
        """
        Initializes an NEBAnalysis from Outcar and Structure objects. Use
        the static constructors, e.g., :class:`from_dir` instead if you
        prefer to have these automatically generated from a directory of NEB
        calculations.

        Args:
            outcars ([Outcar]): List of Outcar objects. Note that these have
                to be ordered from start to end along reaction coordinates.
            structures ([Structure]): List of Structures along reaction
                coordinate. Must be same length as outcar.
            interpolation_order (int): Order of polynomial to use to
                interpolate between images. Same format as order parameter in
                scipy.interplotate.PiecewisePolynomial.
        """
        if len(outcars) != len(structures):
            raise ValueError("# of Outcars must be same as # of Structures")

        # Calculate cumulative root mean square distance between structures,
        # which serves as the reaction coordinate. Note that these are
        # calculated from the final relaxed structures as the coordinates may
        # have changed from the initial interpolation.
        r = [0]
        prev = structures[0]
        for st in structures[1:]:
            dists = np.array([s2.distance(s1) for s1, s2 in zip(prev, st)])
            r.append(np.sqrt(np.sum(dists**2)))
            prev = st
        r = np.cumsum(r)

        energies = []
        forces = []
        for i, o in enumerate(outcars):
            o.read_neb()
            energies.append(o.data["energy"])
            if i in [0, len(outcars) - 1]:
                forces.append(0)
            else:
                forces.append(o.data["tangent_force"])
        forces = np.array(forces)
        r = np.array(r)
        return cls(r=r,
                   energies=energies,
                   forces=forces,
                   structures=structures,
                   **kwargs)

    def get_extrema(self, normalize_rxn_coordinate=True):
        """
        Returns the positions of the extrema along the MEP. Both local
        minimums and maximums are returned.

        Args:
            normalize_rxn_coordinate (bool): Whether to normalize the
                reaction coordinate to between 0 and 1. Defaults to True.

        Returns:
            (min_extrema, max_extrema), where the extrema are given as
            [(x1, y1), (x2, y2), ...].
        """
        x = np.arange(0, np.max(self.r), 0.01)
        y = self.spline(x) * 1000

        scale = 1 if not normalize_rxn_coordinate else 1 / self.r[-1]
        min_extrema = []
        max_extrema = []
        for i in range(1, len(x) - 1):
            if y[i] < y[i - 1] and y[i] < y[i + 1]:
                min_extrema.append((x[i] * scale, y[i]))
            elif y[i] > y[i - 1] and y[i] > y[i + 1]:
                max_extrema.append((x[i] * scale, y[i]))
        return min_extrema, max_extrema

    def get_plot(self, normalize_rxn_coordinate=True, label_barrier=True):
        """
        Returns the NEB plot. Uses Henkelman's approach of spline fitting
        each section of the reaction path based on tangent force and energies.

        Args:
            normalize_rxn_coordinate (bool): Whether to normalize the
                reaction coordinate to between 0 and 1. Defaults to True.
            label_barrier (bool): Whether to label the maximum barrier.

        Returns:
            matplotlib.pyplot object.
        """
        plt = pretty_plot(12, 8)
        scale = 1 if not normalize_rxn_coordinate else 1 / self.r[-1]
        x = np.arange(0, np.max(self.r), 0.01)
        y = self.spline(x) * 1000
        relative_energies = self.energies - self.energies[0]
        plt.plot(self.r * scale,
                 relative_energies * 1000,
                 'ro',
                 x * scale,
                 y,
                 'k-',
                 linewidth=2,
                 markersize=10)
        plt.xlabel("Reaction coordinate")
        plt.ylabel("Energy (meV)")
        plt.ylim((np.min(y) - 10, np.max(y) * 1.02 + 20))
        if label_barrier:
            data = zip(x * scale, y)
            barrier = max(data, key=lambda d: d[1])
            plt.plot([0, barrier[0]], [barrier[1], barrier[1]], 'k--')
            plt.annotate('%.0f meV' % (np.max(y) - np.min(y)),
                         xy=(barrier[0] / 2, barrier[1] * 1.02),
                         xytext=(barrier[0] / 2, barrier[1] * 1.02),
                         horizontalalignment='center')
        plt.tight_layout()
        return plt

    @classmethod
    def from_dir(cls, root_dir, relaxation_dirs=None, **kwargs):
        """
        Initializes a NEBAnalysis object from a directory of a NEB run.
        Note that OUTCARs must be present in all image directories. For the
        terminal OUTCARs from relaxation calculations, you can specify the
        locations using relaxation_dir. If these are not specified, the code
        will attempt to look for the OUTCARs in 00 and 0n directories,
        followed by subdirs "start", "end" or "initial", "final" in the
        root_dir. These are just some typical conventions used
        preferentially in Shyue Ping's MAVRL research group. For the
        non-terminal points, the CONTCAR is read to obtain structures. For
        terminal points, the POSCAR is used. The image directories are
        assumed to be the only directories that can be resolved to integers.
        E.g., "00", "01", "02", "03", "04", "05", "06". The minimum
        sub-directory structure that can be parsed is of the following form (
        a 5-image example is shown):

        00:
        - POSCAR
        - OUTCAR
        01, 02, 03, 04, 05:
        - CONTCAR
        - OUTCAR
        06:
        - POSCAR
        - OUTCAR

        Args:
            root_dir (str): Path to the root directory of the NEB calculation.
            relaxation_dirs (tuple): This specifies the starting and ending
                relaxation directories from which the OUTCARs are read for the
                terminal points for the energies.

        Returns:
            NEBAnalysis object.
        """
        neb_dirs = []

        for d in os.listdir(root_dir):
            pth = os.path.join(root_dir, d)
            if os.path.isdir(pth) and d.isdigit():
                i = int(d)
                neb_dirs.append((i, pth))
        neb_dirs = sorted(neb_dirs, key=lambda d: d[0])
        outcars = []
        structures = []

        # Setup the search sequence for the OUTCARs for the terminal
        # directories.
        terminal_dirs = []
        if relaxation_dirs is not None:
            terminal_dirs.append(relaxation_dirs)
        terminal_dirs.append((neb_dirs[0][1], neb_dirs[-1][1]))
        terminal_dirs.append(
            [os.path.join(root_dir, d) for d in ["start", "end"]])
        terminal_dirs.append(
            [os.path.join(root_dir, d) for d in ["initial", "final"]])

        for i, d in neb_dirs:
            outcar = glob.glob(os.path.join(d, "OUTCAR*"))
            contcar = glob.glob(os.path.join(d, "CONTCAR*"))
            poscar = glob.glob(os.path.join(d, "POSCAR*"))
            terminal = i == 0 or i == neb_dirs[-1][0]
            if terminal:
                for ds in terminal_dirs:
                    od = ds[0] if i == 0 else ds[1]
                    outcar = glob.glob(os.path.join(od, "OUTCAR*"))
                    if outcar:
                        outcar = sorted(outcar)
                        outcars.append(Outcar(outcar[-1]))
                        break
                else:
                    raise ValueError("OUTCAR cannot be found for terminal "
                                     "point %s" % d)
                structures.append(Poscar.from_file(poscar[0]).structure)
            else:
                outcars.append(Outcar(outcar[0]))
                structures.append(Poscar.from_file(contcar[0]).structure)
        return NEBAnalysis.from_outcars(outcars, structures, **kwargs)

    def as_dict(self):
        """
        Dict representation of NEBAnalysis.

        Returns:
            JSON serializable dict representation.
        """
        return {
            "@module": self.__class__.__module__,
            "@class": self.__class__.__name__,
            'r': jsanitize(self.r),
            'energies': jsanitize(self.energies),
            'forces': jsanitize(self.forces),
            'structures': [s.as_dict() for s in self.structures]
        }
Ejemplo n.º 21
0
for i in range(numele):  #organizar en columnas cada elemento
    x = list()
    y = list()
    for j in range(tamxele):
        x.append(float(L[elem[i] + j].split()[0]))
        y.append(float(L[elem[i] + j].split()[1]))
    X.append(x)
    Y.append(y)
    del x
    del y

for i in range(numele):  #graficar malla
    for j in range(nn):
        x = X[i][j * nn:j * nn + nn]
        y = Y[i][j * nn:j * nn + nn]
        cs = CubicSpline(y, x)
        ys = np.linspace(np.min(y), np.max(y), res)
        plt.plot(cs(ys), ys, 'k', lw=0.1)

    for j in range(nn):
        Yh = list()
        Xh = list()
        for k in range(nn):
            Xh.append(X[i][k * nn + j])
            Yh.append(Y[i][k * nn + j])
        cs = CubicSpline(Xh, Yh)
        xs = np.linspace(np.min(Xh), np.max(Xh), res)
        del Yh
        del Xh
        plt.plot(xs, cs(xs), 'k', lw=0.1)
Ejemplo n.º 22
0
 def cspline_antideriv(x, y, axis=0):
     return CubicSpline(x, y, axis).antiderivative()
Ejemplo n.º 23
0
 def spline(self,y):   
     cs = CubicSpline(self.x,y)
     y_ = cs(self.x_)
     return y_
Ejemplo n.º 24
0
 def test_small_dx(self):
     rng = np.random.RandomState(0)
     x = np.sort(rng.uniform(size=100))
     y = 1e4 + rng.uniform(size=100)
     S = CubicSpline(x, y)
     self.check_correctness(S, tol=1e-13)
Ejemplo n.º 25
0
import numpy as np
import Beregning_av_hastigheter as funk
from scipy.interpolate import CubicSpline
import math

#Horisontal avstand mellom festepunktene er 0.200 m
h = 0.200
xfast = np.asarray([0, h, 2 * h, 3 * h, 4 * h, 5 * h, 6 * h, 7 * h])
#Skriv inn y-verdiene til banens 8 festepunkter i tabellen yfast.
#Et vilkaarlig eksempel:
yfast = np.asarray([0.399, 0.310, 0.261, 0.254, 0.248, 0.211, 0.165, 0.105])
#Erstatt med egne tallverdier avlest i tracker.
#Programmet beregner de 7 tredjegradspolynomene, et
#for hvert intervall mellom to festepunkter,
#med funksjonen CubicSpline:
cs = CubicSpline(xfast, yfast, bc_type='natural')
#Funksjonen cs kan naa brukes til aa regne ut y(x), y'(x) og y''(x)
#for en vilkaarlig horisontal posisjon x, eventuelt for mange horisontale
#posisjoner lagret i en tabell:
#cs(x)   tilsvarer y(x)
#cs(x,1) tilsvarer y'(x)
#cs(x,2) tilsvarer y''(x)
#Her lager vi en tabell med x-verdier mellom 0 og 1.4 m
xmin = 0.000
xmax = 1.401
dx = 0.001
x = np.arange(xmin, xmax, dx)
#Funksjonen arange returnerer verdier paa det "halvaapne" intervallet
#[xmin,xmax), dvs slik at xmin er med mens xmax ikke er med. Her blir
#dermed x[0]=xmin=0.000, x[1]=xmin+1*dx=0.001, ..., x[1400]=xmax-dx=1.400,
#dvs x blir en tabell med 1401 elementer
Ejemplo n.º 26
0
def update(i):
    global position,velocity, dx, mass, smooth # Get positions and velocities and bin width
    N = position.shape[1]
    year = i*dt #Shows how many year has passed since the initial condition. 
    scat.set_label('%lf Myrs'%year)
    ax1.legend(loc='upper right')#Display the time in the lower right corner.
    accel = np.empty(shape = position.shape)
    for i in range(N):
        accel[0, i], accel[1, i], accel[2, i] = acceleration(position, i, Np, mass, smooth)
    velocity += accel
    
    position += (velocity+recession_vel(position,H_0))*dt # Increment positions according to their velocites
    position = apply_boundary(position, Nd, Np) # Apply boundary conditions
    ax1.set_xlim(-x_bound-x_bound*Hubble_convert(H_0)*year,x_bound+x_bound*Hubble_convert(H_0)*year)  # Set x-axis limits
    ax1.set_ylim(-y_bound-y_bound*Hubble_convert(H_0)*year,y_bound+y_bound*Hubble_convert(H_0)*year)  # Set y-axis limits
    #points.set_data(position[0,:], position[1,:]) # Show 2D projection of first 2 position coordinates
    scat.set_offsets(np.reshape(np.concatenate((position[0,:], position[1,:])), (2, Np)).T)#This line of code basically

    # if project_3d:
    #     points.set_3d_properties(position[2,:])  ## For 3D projection
    
    DD = np.ravel(tril(separation(position)))
    
    factor = Np**2/((2*x_bound)**3) #This is the number density of pair of particles in the simulation. Since we use
    #periodic boundary condition, we can also consider our simulation in a sphere. 
    
    
    h_DD, x_DD = histogram(DD,bins=xb)
    
    h = np.zeros(len(h_DD))
    x_max = 0.0
    h_max = 0.0
    for i in range(len(h_DD)):
        h[i] = h_DD[i]/((4.0/3.0*np.pi*(xb[i+1]**3-xb[i]**3)*factor))-1.0
        if (h[i] > 0):
            x_max = x_DD[i]
    
    line.set_data(x_DD[:-1],h) # Set the new data for the line in the 2nd panel
    ax2.set_xlim(0, x_max)
    ax2.set_ylim(-1, np.amax(h)+5)
    
    k = 2.0*np.pi/x_DD[:-1]
    
    
    #print(h_DD)
    variable_x = x_DD[:-1]
    #print(variable_x)
    cs = CubicSpline(variable_x, h)
    x = np.linspace(xb[0], np.sqrt(3.0)*2.0*x_bound, num=1000)
    smooth_plot = cs(x)
    smooth_line.set_data(x, smooth_plot)
    PS = np.zeros(len(k))
    PS_spline = []
    k_eff = []
    for i in range(len(k)):
        PS[i] = np.trapz(variable_x**2*h*np.sin(k[i]*variable_x)/(k[i]*variable_x)*2.0*np.pi)
        y, err = integrate.quad(integrand, 0, separation_max, args = (k[i], cs))
        if (abs(err/y) < 0.05):
            PS_spline.append(y)
            k_eff.append(k[i])
        
    PS_spline = np.array(PS_spline)
    k_eff = np.array(k_eff)
    PS_spline = PS_spline/np.amax(PS_spline)
    
    k_log = np.log10(k_eff)
    k_0 = []
    k_1 = []
    PS_spline_0 = []
    PS_spline_1 = []
    for i in range(len(k_log)):
        if k_log[i] < 0.0:
            k_0.append(k_log[i])
            PS_spline_0.append(PS_spline[i])
        else:
            k_1.append(k_log[i])
            PS_spline_1.append(PS_spline[i])
    
    
    k_0 = np.flip(np.array(k_0))
    k_1 = np.flip(np.array(k_1))
    PS_spline_0 = np.flip(np.array(PS_spline_0))
    PS_spline_1 = np.flip(np.array(PS_spline_1))
    #print(k_log[20])
    
    cs_ps_0 = InterpolatedUnivariateSpline(k_0, PS_spline_0, k = 1)
    #cs_ps_1 = CubicSpline(k_1, PS_spline_1)
    cs_ps_1 = InterpolatedUnivariateSpline(k_1, PS_spline_1, k = 2)
    #cs_ps = InterpolatedUnivariateSpline(np.flip(k_log), np.flip(PS_spline/np.amax(PS_spline)),k=5)
    
    k_plot_0 = np.log10(np.linspace(k[len(k)-1], 1, 1001))
    k_plot_1 = np.log10(np.linspace(1,20,1001))
    
    ps_0 = cs_ps_0(k_plot_0)
    ps_1 = cs_ps_1(k_plot_1)
    k_plot = np.concatenate((k_plot_0, k_plot_1))
    ps = np.concatenate((ps_0, ps_1))
    #ps = cs_ps(np.log10(k_plot))
    #smooth_plane.set_data(np.log(k_plot), cs_ps(k_plot))
    smooth_plane.set_data(10**(k_plot), ps)
    #print(np.trapz(integrand, x=variable_x))
    #print(variable_x)
    #print(power_spectrum(h, x[:-1], 10**(-6), dx)
    PS = PS/np.amax(PS)
    plane.set_data(k,PS)
    return scat, plane, smooth_line, smooth_plane, line, # Plot the points and the line
Ejemplo n.º 27
0
    def render(self, mode='human'):
        from gym.envs.classic_control import rendering
        from pyglet.gl import glRotatef, glPushMatrix


        def draw_lasting_circle(Viewer, radius=10, res=30, filled=True, **attrs):
            geom = rendering.make_circle(radius=radius, res=res, filled=filled)
            rendering._add_attrs(geom, attrs)
            Viewer.add_geom(geom)
            return geom
        
        def draw_lasting_line(Viewer, start, end, **attrs):
            geom = rendering.Line(start, end)
            rendering._add_attrs(geom, attrs)
            Viewer.add_geom(geom)
            return geom

        def make_ellipse(major=10, minor=5, res=30, filled=True):
            points = []
            for i in range(res):
                ang = 2*np.pi*i / res
                points.append((np.cos(ang)*major, np.sin(ang)*minor))
            if filled:
                return rendering.FilledPolygon(points)
            else:
                return rendering.PolyLine(points, True)


        def draw_ellipse(Viewer, major=10, minor=5, res=30, **attrs):
            geom = make_ellipse(major=major, minor=minor, res=res, filled=True)
            rendering._add_attrs(geom, attrs)
            Viewer.add_onetime(geom)
            return geom
        
        # ------------------------------------------------------------------------
        # size and position of the fish
        a,b,c = self.a,self.b,self.c
        x,y,theta = self.pos
        alpha_1, alpha_2 = self.shape
        theta = np.array([0, -alpha_1, alpha_2]) + theta
        x1 = x - np.cos(theta[0])*a[0] - np.cos(theta[1])*a[1]
        y1 = y - np.sin(theta[0])*a[0] - np.sin(theta[1])*a[1]
        x2 = x + np.cos(theta[0])*a[0] + np.cos(theta[2])*a[2]
        y2 = y + np.sin(theta[0])*a[0] + np.sin(theta[2])*a[2]
        x = np.array([x,x1,x2])
        y = np.array([y,y1,y2])
        # ------------------------------------------------------------------------
        from gym.envs.classic_control import rendering
        
        # create the image if it has not been done
        if self.viewer is None:
            self.viewer = rendering.Viewer(1000,200)
            background = draw_lasting_circle(self.viewer,radius=100, res=10)
            background.set_color(1.,1.,1.)
        
        # set viewer size
        bound = 4
        self.viewer.set_bounds(-bound+2,bound+4,-bound/4,bound/4)
        
        """draw two axes"""
        axisX = self.viewer.draw_line((-1000., 0), (1000., 0))
        axisY = self.viewer.draw_line((0,-1000.), (0,1000.))
        axisX.set_color(.5,.5,.5)
        axisY.set_color(.5,.5,.5)

        """draw a fish"""
        for i in range(3):
            link = draw_ellipse(self.viewer,major=self.a[i], minor=self.b[i], res=30, filled=True)
            lkTrans = rendering.Transform(rotation=theta[i],translation=(x[i],y[i]))
            link.add_attr(lkTrans)
            if i%3 == 0: link.set_color(.7, .1, .1)
            if i%3 == 1: link.set_color(.1, .7, .1)
            if i%3 == 2: link.set_color(.1, .1, .7)
        for i in range(2):
            eye = draw_ellipse(self.viewer,major=self.a[2]/8, minor=self.a[2]/8, res=30, filled=True)
            eyngle = theta[2]+np.pi/5.5*(i-.5)*2;
            eyeTrans = rendering.Transform(translation=(x[2]+np.cos(eyngle)*self.a[2]/2,y[2]+np.sin(eyngle)*self.a[2]/2))
            eye.add_attr(eyeTrans)
            eye.set_color(.6,.3,.4)
            
        # ------------------------------------------------------------------------
        # interpolate a smooth shape of the fish
        Npts = 7*2
        headl = 1.3
        facel = .6
        facew = 2.4
        headw = 2.6
        neckw = 2.3
        bodyw = 2.2
        waist = 2
        tailw = 1.7
        taill = 2.2
        
        referenceX = np.zeros((Npts,))
        referenceY = np.zeros((Npts,))
        
        referenceX[7], referenceY[7] = x2 + np.cos(theta[2])*a[2]*headl, y2 + np.sin(theta[2])*a[2]*headl
        
        referenceX[6], referenceY[6] = x2 + np.cos(theta[2])*a[2]*facel - np.sin(theta[2])*b[2]*facew, y2 + np.sin(theta[2])*a[2]*facel + np.cos(theta[2])*b[2]*facew
        referenceX[-6], referenceY[-6] = x2 + np.cos(theta[2])*a[2]*facel + np.sin(theta[2])*b[2]*facew, y2 + np.sin(theta[2])*a[2]*facel - np.cos(theta[2])*b[2]*facew
        
        referenceX[5], referenceY[5] = x2 - np.sin(theta[2])*b[2]*headw, y2 + np.cos(theta[2])*b[2]*headw
        referenceX[-5], referenceY[-5] = x2 + np.sin(theta[2])*b[2]*headw, y2 - np.cos(theta[2])*b[2]*headw
        
        referenceX[4], referenceY[4] = x[0] + np.cos(theta[0])*a[0] - np.sin((theta[0]+theta[2])/2)*(b[2]+b[0])*neckw/2, y[0] + np.sin(theta[0])*a[0] + np.cos((theta[0]+theta[2])/2)*(b[2]+b[0])*neckw/2
        referenceX[-4], referenceY[-4] = x[0] + np.cos(theta[0])*a[0] + np.sin((theta[0]+theta[2])/2)*(b[2]+b[0])*neckw/2, y[0] + np.sin(theta[0])*a[0] - np.cos((theta[0]+theta[2])/2)*(b[2]+b[0])*neckw/2
        
        referenceX[3], referenceY[3] = x[0] - np.sin(theta[0])*b[0]*bodyw, y[0] + np.cos(theta[0])*b[0]*bodyw
        referenceX[-3], referenceY[-3] = x[0] + np.sin(theta[0])*b[0]*bodyw, y[0] - np.cos(theta[0])*b[0]*bodyw
        
        referenceX[2], referenceY[2] = x[0] - np.cos(theta[0])*a[0] - np.sin((theta[0]+theta[1])/2)*(b[1]+b[0])*waist/2, y[0] - np.sin(theta[0])*a[0] + np.cos((theta[0]+theta[1])/2)*(b[1]+b[0])*waist/2
        referenceX[-2], referenceY[-2] = x[0] - np.cos(theta[0])*a[0] + np.sin((theta[0]+theta[1])/2)*(b[1]+b[0])*waist/2, y[0] - np.sin(theta[0])*a[0] - np.cos((theta[0]+theta[1])/2)*(b[1]+b[0])*waist/2
        
        referenceX[1], referenceY[1] = x1 - np.sin(theta[1])*b[1]*tailw, y1 + np.cos(theta[1])*b[1]*tailw
        referenceX[-1], referenceY[-1] = x1 + np.sin(theta[1])*b[1]*tailw, y1 - np.cos(theta[1])*b[1]*tailw
        
        referenceX[0], referenceY[0] = x1 - np.cos(theta[1])*a[1]*taill, y1 - np.sin(theta[1])*a[1]*taill
        
        referenceX = np.append(referenceX,referenceX[0])
        referenceY = np.append(referenceY,referenceY[0])
        
        from scipy.interpolate import CubicSpline
        p = np.linspace(0,1,num=Npts+1)
        cs = CubicSpline(p, np.stack([referenceX,referenceY]).T,bc_type='periodic')
        pnew = np.linspace(0,1,num=200)
        outout = cs(pnew)
        reference = []
        for i in range(np.size(outout,0)):
            reference.append((outout[i,0],outout[i,1]))
        # ------------------------------------------------------------------------
        
        fish = self.viewer.draw_polygon(reference, filled=False)
        fish.set_linewidth(2)
        fish.set_color(.5,.5,.5)
        
        """draw a trail behind the fish"""
        if self.oldpos is not None:
            trail = draw_lasting_circle(self.viewer, radius=0.015, res = 5)
            trTrans = rendering.Transform(translation=(np.sum(x)/3,np.sum(y)/3))
            trail.add_attr(trTrans)
            dx = self.pos[0]-self.oldpos[0]
            dy = self.pos[1]-self.oldpos[1]
            trail.set_color(0.3,0.3,0.65)
        return self.viewer.render(return_rgb_array = mode=='rgb_array')
Ejemplo n.º 28
0
plt.figure(1)
plt.plot(t_interp, y_interp, label="linear")
plt.legend(loc='lower right', ncol=2)

# FFT of the linear interpolation
Y_interp = np.fft.rfft(y_interp)
plt.figure(2)
plt.plot(np.abs(Y_interp) / 2., label="linear"
         )  # /2 for the normalization due to the doubled number of samples
plt.legend(loc='upper right', ncol=2)

#####################################################################
# spline interpolation

from scipy.interpolate import CubicSpline
cs = CubicSpline(t_smp, y_smp, bc_type='natural')  # cubic spline interpolation
# FFT of the spline-interpolated signal with doubled number of samples
Y_interp_cs = np.fft.rfft(cs(t_interp))

t_interp_100 = np.linspace(
    0., 1.,
    100 * f_smp + 1)  # increasing the sampling rate by the factor of 100

# displaying the interpolated signals
plt.figure(num=3, figsize=(6.5, 4))
plt.plot(t_smp, y_smp, 'o', label='samples')
plt.plot(t_interp_100, np.sin(t_interp_100 * omega_sin), label='exact')
plt.plot(t_interp, y_interp, label="linear")
plt.plot(t_interp, cs(t_interp), label="spline (2x)")
plt.plot(t_interp_100, cs(t_interp_100), label="spline (100x)")
plt.legend(loc='lower right', ncol=2)
Ejemplo n.º 29
0
    def on_message_element(self, bus, msg):
        plugin = msg.src.get_name()

        if plugin == 'limiter_input_level':
            peak = msg.get_structure().get_value('peak')

            self.limiter.ui_update_limiter_input_level(peak)
        elif plugin == 'limiter_output_level':
            peak = msg.get_structure().get_value('peak')

            self.limiter.ui_update_limiter_output_level(peak)
        elif plugin == 'autovolume':
            if self.limiter.autovolume_enabled:
                peak = msg.get_structure().get_value('peak')

                max_value = max(peak)

                if max_value > self.limiter.autovolume_threshold:
                    self.limiter.auto_gain(max_value)
        elif plugin == 'compressor_input_level':
            peak = msg.get_structure().get_value('peak')

            self.compressor.ui_update_compressor_input_level(peak)
        elif plugin == 'compressor_output_level':
            peak = msg.get_structure().get_value('peak')

            self.compressor.ui_update_compressor_output_level(peak)
        elif plugin == 'reverb_input_level':
            peak = msg.get_structure().get_value('peak')

            self.reverb.ui_update_reverb_input_level(peak)
        elif plugin == 'reverb_output_level':
            peak = msg.get_structure().get_value('peak')

            self.reverb.ui_update_reverb_output_level(peak)
        elif plugin == 'highpass_input_level':
            peak = msg.get_structure().get_value('peak')

            self.highpass.ui_update_highpass_input_level(peak)
        elif plugin == 'highpass_output_level':
            peak = msg.get_structure().get_value('peak')

            self.highpass.ui_update_highpass_output_level(peak)
        elif plugin == 'lowpass_input_level':
            peak = msg.get_structure().get_value('peak')

            self.lowpass.ui_update_lowpass_input_level(peak)
        elif plugin == 'lowpass_output_level':
            peak = msg.get_structure().get_value('peak')

            self.lowpass.ui_update_lowpass_output_level(peak)
        elif plugin == 'equalizer_input_level':
            peak = msg.get_structure().get_value('peak')

            self.equalizer.ui_update_equalizer_input_level(peak)
        elif plugin == 'equalizer_output_level':
            peak = msg.get_structure().get_value('peak')

            self.equalizer.ui_update_equalizer_output_level(peak)
        elif plugin == 'spectrum':
            magnitudes = msg.get_structure().get_value('magnitude')

            cs = CubicSpline(self.spectrum_freqs,
                             magnitudes[:self.spectrum_nfreqs])

            magnitudes = cs(self.spectrum_x_axis)

            max_mag = np.amax(magnitudes)
            min_mag = self.spectrum_threshold

            if max_mag > min_mag:
                magnitudes = (min_mag - magnitudes) / min_mag

                self.emit('new_spectrum', magnitudes)

        return True
Ejemplo n.º 30
0
from scipy.interpolate import CubicSpline
import matplotlib.pyplot as plt
x = np.arange(10)
y = np.sin(x)
cs = CubicSpline(x, y)
plt.figure(figsize=(6.5, 4))
plt.plot(x, y, 'o', label='data')
plt.plot(xs, np.sin(xs), label='true')
plt.plot(xs, cs(xs), label="S")
plt.plot(xs, cs(xs, 1), label="S'")
plt.plot(xs, cs(xs, 2), label="S''")
plt.plot(xs, cs(xs, 3), label="S'''")
plt.xlim(-0.5, 9.5)
plt.legend(loc='lower left', ncol=2)
plt.show()
Ejemplo n.º 31
0
def scale_spectra(common_text, list_photfiles, prefix_str='f', plot=False):
    """
    Scales spectra acccording to the values in the array 'scale_array'. Basically, this step applies
    flux calibration on the spectra.
    Args:
        common_text     : Common text of 1-D Spectra files whose spectroscopic fluxes are to be scaled
        list_photfiles  : Text list of files containing different broadband photometric magnitudes
        prefix_str      : Prefix to distinguish the scaled 1-D spectra from the original
        plot            : Boolean describing whether the scaled spectra has to be plotted
    Returns:
        None
    """
    list_files = group_similar_files('', common_text=common_text)

    for file_name in list_files:
        dict_spec = read_specflux(file_name)
        dict_phot = read_photflux(list_photfiles=list_photfiles,
                                  julian_day=read_jd(file_name))
        dict_phot = get_zflux(dict_phot, cntrl_wav=7500)
        dict_scale = dict(
            (key, str(float(dict_phot[key]) / float(dict_spec[key])))
            for key in dict_phot.keys() if key in dict_spec.keys())

        #         if len(dict_scale.keys()) > 3:
        #             del dict_scale[7500]

        # if len(dict_scale.keys()) > 4:
        #     order = 4
        # else:
        #     order = len(dict_scale.keys()) - 1

        series = pd.Series(dict_scale, dtype='float64').dropna()
        spline = CubicSpline(series.index.values,
                             series.values,
                             bc_type='natural',
                             extrapolate=True)
        spline2 = Rbf(series.index.values, series.values)

        wave_data, flux_data = read_1dspec(file_name)
        scale_data = spline(wave_data)
        scale_data[scale_data < 0] = 0

        flux_moddata = np.multiply(np.asarray(flux_data), scale_data)
        write_1dspec(ref_filename=file_name,
                     flux_array=flux_moddata,
                     prefix_str=prefix_str)

        if plot:
            fig = plt.figure(figsize=(8, 6))
            ax = fig.add_subplot(111)

            wavenew = np.linspace(float(wave_data[0]), float(wave_data[-1]),
                                  10000)
            ax.plot(series.index.values,
                    series.values,
                    'o',
                    label='Data Points')
            ax.plot(wavenew, spline(wavenew), 'k', label='CubicSpline')
            ax.plot(wavenew, spline2(wavenew), 'r', label='Rbf')

            ax.legend()
            ax.grid()
            plt.show()
            plt.close(fig)
Ejemplo n.º 32
0
flux_df.index = [
    filter_df.loc[band, 'CentreWave'] for band in flux_df.index.values
]
flux_df = flux_df.sort_index()

dict_flux = {}
for caljd in flux_df.columns.values:
    series = flux_df[caljd].dropna().apply(lambda x: float(x))

    if caljd > 110:
        wave_data = np.linspace(4000, 9200, 1000)
    else:
        wave_data = np.linspace(3100, 9200, 1000)

    spline = CubicSpline(series.index.values.tolist(),
                         series.values.tolist(),
                         bc_type='natural',
                         extrapolate=True)

    flux_data = spline(wave_data)
    flux_data[flux_data < 0] = 0
    netflux = np.trapz(flux_data, wave_data)

    dict_flux[caljd] = {}
    dict_flux[caljd]['Flux'] = netflux
    dict_flux[caljd]['Lum'] = calc_lum(netflux)[0]
    dict_flux[caljd]['LumErr'] = calc_lum(netflux)[1]

#     print caljd
#     print caljd - date_explosion
#     fig_temp = plt.figure(figsize=(10, 8))
#     ax = fig_temp.add_subplot(111)
Ejemplo n.º 33
0
def plot_mep(atom_pos, mep_energies, image_name = None, show = None):
    """
    Used for NEB method
    atom_pos (list) - xcart positions of diffusing atom along the path,
    mep_energies (list) - full energies of the system corresponding to atom_pos
    """

    #Create
    atom_pos = np.array(atom_pos)
    data = atom_pos.T #
    tck, u= interpolate.splprep(data) #now we get all the knots and info about the interpolated spline
    path = interpolate.splev(np.linspace(0,1,500), tck) #increase the resolution by increasing the spacing, 500 in this example
    path = np.array(path)


    diffs = np.diff(path.T, axis = 0)
    path_length =  np.linalg.norm( diffs, axis = 1).sum()
    mep_pos =  np.array([p*path_length for p in u])


    if 0: #plot the path in 3d
        fig = plt.figure()
        ax = Axes3D(fig)
        ax.plot(data[0], data[1], data[2], label='originalpoints', lw =2, c='Dodgerblue')
        ax.plot(path[0], path[1], path[2], label='fit', lw =2, c='red')
        ax.legend()
        plt.show()







    mine = min(mep_energies)
    eners = np.array(mep_energies)-mine

    
    
    xnew = np.linspace(0, path_length)

    # ynew = spline(mep_pos, eners, xnew )
    # spl = CubicSpline(mep_pos, eners, bc_type = 'natural' ) second-derivative zero
    spl = CubicSpline(mep_pos, eners, bc_type = 'clamped' ) #first derivative zero
    ynew = spl(xnew)

    #minimum now is always zero,
    spl_der = spl.derivative()

    mi = min(xnew)
    ma = max(xnew)
    r = spl_der.roots()

    print(r)

    r = r[ np.logical_and(mi<r, r<ma) ] # only roots inside the interval are interesting


    diff_barrier = max( spl(r) ) # the maximum value 
    print_and_log('plot_mep(): Diffusion barrier =',round(diff_barrier, 2),' eV', imp = 'y')
    # sys.exit()


    path2saved = fit_and_plot(orig = (mep_pos, eners, 'ro'), spline = (xnew, ynew, 'b-'), xlim = (-0.05, None  ),
    xlabel = 'Reaction coordinate ($\AA$)', ylabel = 'Energy (eV)', image_name =  image_name, show = show)


    return path2saved, diff_barrier