示例#1
0
    def execute(self):

        c12axis = self.calculate_c12axis()

        if self.interp_from_htc:
            cni = self.c12axis_init.shape[0]
            if self.c12axis_init[-1, 2] > 1.:
                self.c12axis_init /= self.blade_length

            # interpolate blade_ae distribution onto c12 distribution
            self.blade_ae.c12axis = np.zeros((cni, 4))
            for i in range(4):
                tck = pchip(c12axis[:, 2], c12axis[:,i])
                self.blade_ae.c12axis[:, i] = tck(self.c12axis_init[:, 2])
        else:
            ds_root = 1. / self.blade_ni_span
            ds_tip = 1. / self.blade_ni_span / 3.
            dist = np.array([[0., ds_root, 1],
                             [1., ds_tip, self.blade_ni_span]])
            x = distfunc(dist)
            self.blade_ae.c12axis = np.zeros((x.shape[0], 4))
            for i in range(4):
                tck = pchip(c12axis[:, 2], c12axis[:,i])
                self.blade_ae.c12axis[:, i] = tck(x)

        # scale main axis according to radius
        self.blade_ae.c12axis[:,:3] *= self.blade_length

        self.blade_ae.radius = self.blade_length + self.hub_radius
        self.blade_ae.s = self.bladegeom.smax * self.bladegeom.s * self.blade_length
        self.blade_ae.rthick = self.bladegeom.rthick * 100.
        self.blade_ae.chord = self.bladegeom.chord * self.blade_length
        self.blade_ae.aeset = np.ones(len(self.blade_ae.s))
示例#2
0
 def test_endslopes(self):
     # this is a smoke test for gh-3453: PCHIP interpolator should not
     # set edge slopes to zero if the data do not suggest zero edge derivatives
     x = np.array([0.0, 0.1, 0.25, 0.35])
     y1 = np.array([279.35, 0.5e3, 1.0e3, 2.5e3])
     y2 = np.array([279.35, 2.5e3, 1.50e3, 1.0e3])
     for pp in (pchip(x, y1), pchip(x, y2)):
         for t in (x[0], x[-1]):
             assert_(pp(t, 1) != 0)
示例#3
0
    def test_cast(self):
        # regression test for integer input data, see gh-3453
        data = np.array([[0, 4, 12, 27, 47, 60, 79, 87, 99, 100], [-33, -33, -19, -2, 12, 26, 38, 45, 53, 55]])
        xx = np.arange(100)
        curve = pchip(data[0], data[1])(xx)

        data1 = data * 1.0
        curve1 = pchip(data1[0], data1[1])(xx)

        assert_allclose(curve, curve1, atol=1e-14, rtol=1e-14)
示例#4
0
def bdrate(file1, file2, anchorfile):
    if anchorfile:
        anchor = flipud(loadtxt(anchorfile));
    a = flipud(loadtxt(file1));
    b = flipud(loadtxt(file2));
    rates = [0.06,0.2];
    qa = a[:,0]
    qb = b[:,0]
    ra = a[:,2]*8./a[:,1]
    rb = b[:,2]*8./b[:,1]
    bdr = zeros((4,4))
    ret = {}
    for m in range(0,len(met_index)):
        try:
            ya = a[:,3+m];
            yb = b[:,3+m];
            if anchorfile:
                yr = anchor[:,3+m];
            #p0 = interp1d(ra, ya, interp_type)(rates[0]);
            #p1 = interp1d(ra, ya, interp_type)(rates[1]);
            if anchorfile:
                p0 = yr[0]
                p1 = yr[-1]
            else:
                minq = 20
                maxq = 55
                try:
                    minqa_index = qa.tolist().index(minq)
                    maxqa_index = qa.tolist().index(maxq)
                    minqb_index = qb.tolist().index(minq)
                    maxqb_index = qb.tolist().index(maxq)
                except ValueError:
                    q_not_found = True
                    minqa_index = -1
                    maxqa_index = 0
                    minqb_index = -1
                    maxqb_index = 0
                p0 = max(ya[maxqa_index],yb[maxqb_index])
                p1 = min(ya[minqa_index],yb[minqb_index])
            a_rate = pchip(ya, log(ra))(arange(p0,p1,abs(p1-p0)/5000.0));
            b_rate = pchip(yb, log(rb))(arange(p0,p1,abs(p1-p0)/5000.0));
            if not len(a_rate) or not len(b_rate):
                bdr = NaN;
            else:
                bdr=100 * (exp(mean(b_rate-a_rate))-1);
        except ValueError:
            bdr = NaN
        except linalg.linalg.LinAlgError:
            bdr = NaN
        except IndexError:
            bdr = NaN
        if abs(bdr) > 1000:
            bdr = NaN
        ret[m] = bdr
    return ret
示例#5
0
    def execute(self):

        if self.interp_from_htc:
            c12axis = self.c12axis_init.copy()
        else:
            c12axis = self.calculate_c12axis()

        ds_root = 1. / self.blade_ni_span
        ds_tip = 1. / self.blade_ni_span / 3.
        dist = np.array([[0., ds_root, 1], [1., ds_tip, self.blade_ni_span]])

        x = distfunc(dist)
        self.c12axis = np.zeros((x.shape[0], 4))
        for i in range(4):
            tck = pchip(c12axis[:, 2], c12axis[:, i])
            self.c12axis[:, i] = tck(x)

        # scale main axis according to radius
        self.c12axis[:, :3] *= self.blade_length

        l = ((self.c12axis[1:, 0]-self.c12axis[:-1, 0])**2 +
             (self.c12axis[1:, 1]-self.c12axis[:-1, 1])**2 +
             (self.c12axis[1:, 2]-self.c12axis[:-1, 2])**2)**.5

        self.blade_ae.s = self.bladegeom.s * sum(l) / self.bladegeom.s[-1]
        self.blade_ae.rthick = self.bladegeom.rthick * 100.
        self.blade_ae.chord = self.bladegeom.chord * self.blade_length
        self.blade_ae.aeset = np.ones(len(self.blade_ae.s))
示例#6
0
    def interpolate(self):
        pitch = np.zeros((self.nframes))
        pitch[:] = self.samp_values
        pitch2 = medfilt(self.samp_values, self.SMOOTH_FACTOR)

        # This part in the original code is kind of confused and caused
        # some problems with the extrapolated points before the first
        # voiced frame and after the last voiced frame. So, I made some
        # small modifications in order to make it work better.
        edges = self.edges_finder(pitch)
        first_sample = pitch[0]
        last_sample = pitch[-1]

        if len(np.nonzero(pitch2)[0]) < 2:
            pitch[pitch == 0] = self.PTCH_TYP
        else:
            nz_pitch = pitch2[pitch2 > 0]
            pitch2 = scipy_interp.pchip(np.nonzero(pitch2)[0],
                                        nz_pitch)(range(self.nframes))
            pitch[pitch == 0] = pitch2[pitch == 0]
        if self.SMOOTH > 0:
            pitch = medfilt(pitch, self.SMOOTH_FACTOR)
        try:
            if first_sample == 0:
                pitch[:edges[0]-1] = pitch[edges[0]]
            if last_sample == 0:
                pitch[edges[-1]+1:] = pitch[edges[-1]]
        except:
            pass
        self.samp_interp = pitch
示例#7
0
    def test_nag(self):
        # Example from NAG C implementation,
        # http://nag.com/numeric/cl/nagdoc_cl25/html/e01/e01bec.html
        # suggested in gh-5326 as a smoke test for the way the derivatives
        # are computed (see also gh-3453)
        from scipy._lib.six import StringIO
        dataStr = '''
          7.99   0.00000E+0
          8.09   0.27643E-4
          8.19   0.43750E-1
          8.70   0.16918E+0
          9.20   0.46943E+0
         10.00   0.94374E+0
         12.00   0.99864E+0
         15.00   0.99992E+0
         20.00   0.99999E+0
        '''
        data = np.loadtxt(StringIO(dataStr))
        pch = pchip(data[:,0], data[:,1])

        resultStr = '''
           7.9900       0.0000
           9.1910       0.4640
          10.3920       0.9645
          11.5930       0.9965
          12.7940       0.9992
          13.9950       0.9998
          15.1960       0.9999
          16.3970       1.0000
          17.5980       1.0000
          18.7990       1.0000
          20.0000       1.0000
        '''
        result = np.loadtxt(StringIO(resultStr))
        assert_allclose(result[:,1], pch(result[:,0]), rtol=0., atol=5e-5)
def pchipPlot(X, Y, x_new, **args):
    title = 'pchip_plot'
    if(args.has_key('wn')):
        if(type(args['wn']) == int):
	    title = title+str(args['wn'])
            plt.figure(title)
    else:
        plt.figure(title)
    pc = pchip(X,Y)
    plt.plot(X,Y, linestyle = '-')
    plt.plot(x_new, pc(x_new), linestyle = '--')
    plt.legend(['original', 'Spline pchip '], loc='best')
    plt.ylabel(r'epsilon', size = 12)
    a=plt.gca()
    a.set_yscale('log')
    a.set_xscale('log')
    #Here Im going to save the file on disk if the
    #user want and show the plot on the screen
    if(args.has_key('save')):
        if(type(args['save']) == bool and args['save'] ==  True):
            plt.savefig(title.strip()+'.png')
            plt.draw()
    if(args.has_key('view')):
        if(type(args['view']) == bool and args['view'] == True):
            plt.show()
示例#9
0
    def solve_nonlinear(self, params, unknowns, resids):

        # we need to dig into the _ByObjWrapper val to get the array
        # values out
        # pf_in = {name: val['val'].val for name, val in params.iteritems()}
        pf_in = {}
        pf_in['s'] = params['s']
        pf_in['x'] = params['x']
        pf_in['y'] = params['y']
        pf_in['z'] = params['z']
        pf_in['rot_x'] = params['rot_x']
        pf_in['rot_y'] = params['rot_y']
        pf_in['rot_z'] = params['rot_z']
        pf_in['chord'] = params['chord']
        pf_in['rthick'] = params['rthick']
        pf_in['p_le'] = params['p_le']

        if _PGL_installed:
            pf = redistribute_planform(pf_in, s=self.s_new, spline_type=self.spline_type)
        else:
            pf = {}
            for k, v in pf_in.iteritems():
                spl = pchip(pf_in['s'], v)
                pf[k] = spl(self.s_new)

        for k, v in pf.iteritems():
            unknowns[k+self._suffix] = v
        unknowns['athick'+self._suffix] = pf['chord'] * pf['rthick']
示例#10
0
    def solve_nonlinear(self, params, unknowns, resids):

        # we need to dig into the _ByObjWrapper val to get the array
        # values out
        # pf_in = {name: val['val'].val for name, val in params.iteritems()}
        pf_in = {}
        pf_in["s"] = params["s"]
        pf_in["x"] = params["x"]
        pf_in["y"] = params["y"]
        pf_in["z"] = params["z"]
        pf_in["rot_x"] = params["rot_x"]
        pf_in["rot_y"] = params["rot_y"]
        pf_in["rot_z"] = params["rot_z"]
        pf_in["chord"] = params["chord"]
        pf_in["rthick"] = params["rthick"]
        pf_in["p_le"] = params["p_le"]

        if _PGL_installed:
            pf = redistribute_planform(pf_in, s=self.s_new, spline_type=self.spline_type)
        else:
            pf = {}
            for k, v in pf_in.iteritems():
                spl = pchip(pf_in["s"], v)
                pf[k] = spl(self.s_new)

        for k, v in pf.iteritems():
            unknowns[k + self._suffix] = v
        unknowns["athick" + self._suffix] = pf["chord"] * pf["rthick"]
 def __init__(self, dz, w1, w2, w3, a, b):
     # INFO 1kcal = 4184e20 kg A^2 s^-2
     # TODO check bounds of a, b
     self.dz = dz
     self.x = np.array([-dz, -dz+1, -a, -b, 0, b, a, dz-1, dz])
     self.y = np.array([0, 0, w1, w2, w3, w2, w1, 0, 0])
     self.pmf = pchip(self.x, self.y)
     self.derivative = self.pmf.derivative(1)
示例#12
0
def splinePlot(p, X, Y, cl, l):
  p.plot(X, Y, 'o', mfc=cl, c=cl)
  #f = interp1d(X, Y, kind='pchip')
  #f = splrep(X, Y, s=0)
  f = pchip(X, Y)
  nx = np.linspace(X[0], X[-1], 100)
  ny = [f(x) for x in nx]
  p.plot(nx, ny, '-', color=cl, label=l)
示例#13
0
 def test_two_points(self):
     # regression test for gh-6222: pchip([0, 1], [0, 1]) fails because
     # it tries to use a three-point scheme to estimate edge derivatives,
     # while there are only two points available.
     # Instead, it should construct a linear interpolator.
     x = np.linspace(0, 1, 11)
     p = pchip([0, 1], [0, 2])
     assert_allclose(p(x), 2*x, atol=1e-15)
示例#14
0
 def __call__(self, z):
     """Parameters: z is a number, sequence or array.
     This method makes an instance f of LinInterp callable,
     so f(z) returns the interpolation value(s) at z.
     """
     if self.kind == 'pchip':
         return pchip(self.X, self.Y)(z)
     else:
         return interp1d(self.X, self.Y, kind=self.kind,
                         bounds_error=False)(z)
示例#15
0
    def execute(self):

        self.pfOut.s = self.x.copy()
        self.pfOut.blade_length = self.pfIn.blade_length
        self.pfIn._compute_s()
        for name in self.pfIn.list_vars():
            var = getattr(self.pfIn, name)
            if not isinstance(var, np.ndarray): continue
            tck = pchip(self.pfIn.s, var)
            newvar = tck(self.x) 
            setattr(self.pfOut, name, newvar)
示例#16
0
    def test_all_zeros(self):
        x = np.arange(10)
        y = np.zeros_like(x)

        # this should work and not generate any warnings
        with warnings.catch_warnings():
            warnings.filterwarnings('error')
            pch = pchip(x, y)

        xx = np.linspace(0, 9, 101)
        assert_equal(pch(xx), 0.)
示例#17
0
def itpl(PTS,numPTS,degree,sID,eID):
    t=range(len(PTS))
    ipl_t=np.linspace(sID,eID,numPTS*(eID-sID)/(len(PTS)-1)+1)
    newX=pchip(t,[row[0] for row in PTS])(ipl_t)
    newY=pchip(t,[row[1] for row in PTS])(ipl_t)
    newZ=pchip(t,[row[2] for row in PTS])(ipl_t)

    if degree==1:
        newX=interp1d(t,[row[0] for row in PTS],kind='slinear')(ipl_t)
        newY=interp1d(t,[row[1] for row in PTS],kind='slinear')(ipl_t)
        newZ=interp1d(t,[row[2] for row in PTS],kind='slinear')(ipl_t)
    elif degree==2:
        newX=interp1d(t,[row[0] for row in PTS],kind='quadratic')(ipl_t)
        newY=interp1d(t,[row[1] for row in PTS],kind='quadratic')(ipl_t)
        newZ=interp1d(t,[row[2] for row in PTS],kind='quadratic')(ipl_t)
    elif degree==3:
        newX=interp1d(t,[row[0] for row in PTS],kind='cubic')(ipl_t)
        newY=interp1d(t,[row[1] for row in PTS],kind='cubic')(ipl_t)
        newZ=interp1d(t,[row[2] for row in PTS],kind='cubic')(ipl_t)
    elif degree==4:
        newX=pchip(t,[row[0] for row in PTS])(ipl_t)
        newY=pchip(t,[row[1] for row in PTS])(ipl_t)
        newZ=pchip(t,[row[2] for row in PTS])(ipl_t)
    else:
        newX=interp1d(t,[row[0] for row in PTS],kind='quadratic')(ipl_t)
        newY=interp1d(t,[row[1] for row in PTS],kind='quadratic')(ipl_t)
        newZ=interp1d(t,[row[2] for row in PTS],kind='quadratic')(ipl_t)

    outPTS=[]
    for i in range(0,len(newX)):
        outPTS.append([newX[i],newY[i],newZ[i]])

    return outPTS
示例#18
0
    def __call__(self, x, Cx, C):
        """
        params:
        ----------
        x: array
            array with new x-distribution
        xp: array
            array with x-coordinates of spline control points
        yp: array
            array with y-coordinates of spline control points

        returns
        ---------
        ynew: array
            resampled points
        """
        spl = pchip(Cx, C)
        return spl(x)
    def plot(self, full=True, dots=False, average=0, interpolated=0):
        results = gym.monitoring.monitor.load_results(self.outdir)
        data = results[self.data_key]
        avg_data = []

        if full:
            plt.plot(data, color="blue")
        if dots:
            plt.plot(data, ".", color="black")
        if average > 0:
            average = int(average)
            for i, val in enumerate(data):
                if i % average == 0:
                    if (i + average) < len(data) + average:
                        avg = sum(data[i : i + average]) / average
                        avg_data.append(avg)
            new_data = expand(avg_data, average)
            plt.plot(new_data, color="red", linewidth=2.5)
        if interpolated > 0:
            avg_data = []
            avg_data_points = []
            n = len(data) / interpolated
            if n == 0:
                n = 1
            data_fix = 0
            for i, val in enumerate(data):
                if i % n == 0:
                    if (i + n) <= len(data) + n:
                        avg = sum(data[i : i + n]) / n
                        avg_data.append(avg)
                        avg_data_points.append(i)
                if (i + n) == len(data):
                    data_fix = n

            x = np.arange(len(avg_data))
            y = np.array(avg_data)

            interp = pchip(avg_data_points, avg_data)
            xx = np.linspace(0, len(data) - data_fix, 1000)
            plt.plot(xx, interp(xx), color="green", linewidth=3.5)

        # pause so matplotlib will display
        # may want to figure out matplotlib animation or use a different library in the future
        plt.pause(0.000001)
示例#20
0
def calc_spec(cube, nparams):

    # add on H2 and He to VMRs

    vmr = cube[:nvmr]
    vmrsum = np.sum(10**vmr)
    h2 = h2p * (1 - vmrsum)
    he = hep * (1 - vmrsum)
    vmr = np.append(vmr, [np.log10(h2), np.log10(he)])
    vmr = np.tile(vmr, (len(pres), 1))

    # calculate temperature and height profiles

    mass, rad, T0, alpha, Teff, tau0, n, Pbase, FSH, opac = cube[nvmr:nparams]
    tau0 = 10**tau0
    FSH = 10**FSH
    # vmr, mass, rad, T0, alpha, Teff, tau0, n, Pbase, FSH, opac = pickle.load(open('prob.pic','rb'))
    # pdb.set_trace()
    (H, temp) = robcat.tempprof(T0, alpha, Teff, tau0, n, vmr, mass, rad)
    if all(H == np.zeros(len(temp))):
        return np.zeros(len(spec))

# Find height of cloud

    Hbase = pchip(10**pres[::-1], H[::-1])
    Hb = np.asscalar(Hbase(10**Pbase))

    # find folder to calculate in

    ith = 999

    # run model
    # pickle.dump( (vmr, mass, rad, T0, alpha, Teff, tau0, n, Pbase, FSH, opac) , open('dump.pic', 'wb'))
    # pdb.set_trace()

    model = nemesisPyMult.nemesispymult(runname, len(spec), len(temp),
                                        vmr.shape[1], ith, temp, vmr, mass,
                                        rad, H, Hb, opac, FSH, prad, pvar,
                                        pimag, preal, Hb2, opac2, FSH2, nav,
                                        flat, flon, solzen, emzen, azi, wt)
    # loglikelihood= -0.5*( np.sum( (spec-model)**2/yerr**2 ) )
    # pdb.set_trace()
    return model
示例#21
0
def toSky(cs):
    c1 = cs.T[0]
    c2 = cs.T[1]
    c3 = cs.T[2]
    r = np.sqrt(c1**2. + c2**2. + c3**2.)
    dec = np.arcsin(c3 / r) / D2R
    ra = np.arccos(c1 / np.sqrt(c1**2. + c2**2.)) / D2R
    zmn = z_at_value(Kos.comoving_distance, np.amin(r) * u.Mpc)
    zmx = z_at_value(Kos.comoving_distance, np.amax(r) * u.Mpc)
    zmn = zmn - (zstp + zmn % zstp)
    zmx = zmx + (2 * zstp - zmx % zstp)
    ct = np.array([
        np.linspace(zmn, zmx, int(np.ceil(zmn / zstp))),
        Kos.comoving_distance(np.linspace(zmn, zmx,
                                          int(np.ceil(zmn / zstp)))).value
    ]).T
    r2z = interpolate.pchip(*ct[:, ::-1].T)
    z = r2z(r)
    return z, ra, dec
示例#22
0
    def plot(self, full=True, dots=False, average=0, interpolated=0):
        results = gym.monitoring.load_results(self.outdir)
        data = results[self.data_key]
        avg_data = []

        if full:
            plt.plot(data, color='blue')
        if dots:
            plt.plot(data, '.', color='black')
        if average > 0:
            average = int(average)
            for i, val in enumerate(data):
                if i % average == 0:
                    if (i + average) < len(data) + average:
                        avg = sum(data[i:i + average]) / average
                        avg_data.append(avg)
            new_data = expand(avg_data, average)
            plt.plot(new_data, color='red', linewidth=2.5)
        if interpolated > 0:
            avg_data = []
            avg_data_points = []
            n = len(data) / interpolated
            if n == 0:
                n = 1
            data_fix = 0
            for i, val in enumerate(data):
                if i % n == 0:
                    if (i + n) <= len(data) + n:
                        avg = sum(data[i:i + n]) / n
                        avg_data.append(avg)
                        avg_data_points.append(i)
                if (i + n) == len(data):
                    data_fix = n

            x = np.arange(len(avg_data))
            y = np.array(avg_data)
            #print x
            #print y
            #print str(len(avg_data)*n)
            #print data_fix
            interp = pchip(avg_data_points, avg_data)
            xx = np.linspace(0, len(data) - data_fix, 1000)
            plt.plot(xx, interp(xx), color='green', linewidth=3.5)
示例#23
0
def geta_zju_pchip(x,
                   alpha, beta, delta,
                   kgrid,
                   ppgrid=None,
                   is_opt=True):
    m = kgrid.shape[0]
    if ppgrid is None:
        ppgrid = kgrid
    pp = spi.pchip(ppgrid, x)
    kp = pp(kgrid)
    c = np.power(kgrid, alpha) + (1 - delta) * kgrid - kp
    kpp = pp(kp)
    cp = np.power(kp, alpha) + (1 - delta) * kp - kpp
    r = np.power(c, -1) - beta * np.power(cp, -1) * \
        (alpha * np.power(kp, alpha - 1) + 1 - delta)
    if is_opt:
        return r
    else:
        return pp, kp, kpp, c, cp, r
示例#24
0
def toSky(cs, H0, Om_m, zstep):
    """Convert redshift, RA, and Dec to comoving coordinates.

    Parameters
    ----------
    cs : ndarray
        Comoving xyz-coordinates table [x,y,z], assuming input cosmology.
    H0 : float
        Hubble's constant in km/s/Mpc.
    Om_m : float
        Value of matter density.
    zstep : float
        Redshift step size for converting distance to redshift.

    Returns
    -------
    z : float
        Object redshift.
    ra : float
        Object right ascension, in decimal degrees.
    dec : float
        Object declination, in decimal degrees.
    """
    Kos = FlatLambdaCDM(H0, Om_m)
    c1 = cs.T[0]
    c2 = cs.T[1]
    c3 = cs.T[2]
    r = np.sqrt(c1**2. + c2**2. + c3**2.)
    dec = np.arcsin(c3 / r) / D2R
    ra = (np.arccos(c1 / np.sqrt(c1**2. + c2**2.)) * np.sign(c2) / D2R) % 360
    zmn = z_at_value(Kos.comoving_distance, np.amin(r) * u.Mpc)
    zmx = z_at_value(Kos.comoving_distance, np.amax(r) * u.Mpc)
    zmn = zmn - (zstep + zmn % zstep)
    zmx = zmx + (2 * zstep - zmx % zstep)
    ct = np.array([
        np.linspace(zmn, zmx, int(np.ceil(zmn / zstep))),
        Kos.comoving_distance(np.linspace(zmn, zmx,
                                          int(np.ceil(zmn / zstep)))).value
    ]).T
    r2z = interpolate.pchip(*ct[:, ::-1].T)
    z = r2z(r)
    #z = H0*r/c
    return z, ra, dec
def interpolate_zeros(params, method='pchip', min_val=0):
    """
    Interpolate 0 values
    :param params: 1D data vector
    :param method:
    :param factor: factor for interpolation (must be integer)
    :return: interpolated 1D vector by a given factor
    """

    voiced = np.array(params, float)
    for i in range(0, len(voiced)):
        if voiced[i] == min_val:
            voiced[i] = np.nan

    # last_voiced = len(params) - np.nanargmax(params[::-1] > 0)

    if np.isnan(voiced[-1]):
        voiced[-1] = np.nanmin(voiced)
    if np.isnan(voiced[0]):
        voiced[0] = np.nanmean(voiced)

    not_nan = np.logical_not(np.isnan(voiced))

    indices = np.arange(len(voiced))
    if method == 'spline':
        interp = interpolate.UnivariateSpline(indices[not_nan],
                                              voiced[not_nan],
                                              k=2,
                                              s=0)
        # return voiced parts intact
        smoothed = interp(indices)
        for i in range(0, len(smoothed)):
            if not np.isnan(voiced[i]):
                smoothed[i] = params[i]

        return smoothed

    elif method == 'pchip':
        interp = interpolate.pchip(indices[not_nan], voiced[not_nan])
    else:
        interp = interpolate.interp1d(indices[not_nan], voiced[not_nan],
                                      method)
    return interp(indices)
示例#26
0
    def __call__(self, x, Cx, C):
        """
        params:
        ----------
        x: array
            array with new x-distribution
        xp: array
            array with x-coordinates of spline control points
        yp: array
            array with y-coordinates of spline control points

        returns
        ---------
        ynew: array
            resampled points
        """

        spl = pchip(Cx, C)
        return spl(x)
示例#27
0
文件: path.py 项目: halvorot/gym-auv
    def __init__(self, waypoints:list) -> None:
        """Initializes path based on specified waypoints."""

        self.init_waypoints = waypoints.copy()

        for _ in range(3):
            self._arclengths = _arc_len(waypoints)
            path_coords = interpolate.pchip(x=self._arclengths, y=waypoints, axis=1)
            path_derivatives = path_coords.derivative()
            path_dderivatives = path_derivatives.derivative()
            waypoints = path_coords(np.linspace(self._arclengths[0], self._arclengths[-1], 1000))

        self._waypoints = waypoints.copy()
        self._path_coords = path_coords
        self._path_derivatives = path_derivatives
        self._path_dderivatives = path_dderivatives

        S = np.linspace(0, self.length, 10*self.length)
        self._points = np.transpose(self._path_coords(S))
        self._linestring = shapely.geometry.LineString(self._points)
示例#28
0
    def plot(self, env, full=True, dots=False, average=0, interpolated=0):
        if self.data_key is rewards_key:
            data = gym.wrappers.Monitor.get_episode_rewards(env)
        else:
            data = gym.wrappers.Monitor.get_episode_lengths(env)

        avg_data = []
        plt.clf()
        if full:
            plt.plot(data, color=self.line_color)
        if dots:
            plt.plot(data, '.', color='black')
        if average > 0:
            average = int(average)
            for i, val in enumerate(data):
                if i % average == 0:
                    if (i + average) <= len(data):
                        avg = sum(data[i:i + average]) / average
                        avg_data.append(avg)
            new_data = self.expand(avg_data, average)
            plt.plot(new_data, color='red', linewidth=2.5)
        if interpolated > 0:
            avg_data = []
            avg_data_points = []
            n = len(data) / interpolated
            if n == 0:
                n = 1
            for i, val in enumerate(data):
                if i % n == 0:
                    if (i + n) <= len(data) + n:
                        avg = sum(data[i:i + n]) / n
                        avg_data.append(avg)
                        avg_data_points.append(i)

            interp = pchip(np.array(avg_data_points), np.array(avg_data))
            xx = np.linspace(0, len(data) - 1, 1000)
            plt.plot(xx, interp(xx), color='green', linewidth=3.5)

        # pause so matplotlib will display
        # may want to figure out matplotlib animation or use a different library in the future
        plt.pause(0.000001)
示例#29
0
def get_cluster_interp(depth_values, x_values, bandwidth=10):
    # Clustering
    depth_values_v = np.vstack(depth_values)
    clustering = MeanShift(bandwidth=bandwidth).fit(depth_values_v)
    cluster_labels = clustering.labels_

    # Average by cluster
    depth_clusters = clustering.cluster_centers_.ravel()
    x_clusters = np.full_like(depth_clusters, np.nan)
    for i in range(len(depth_clusters)):
        x_clusters[i] = np.mean(x_values[cluster_labels == i])

    # Sort arrays by depth
    depth_index = np.argsort(depth_clusters)
    x_clusters = x_clusters[depth_index]
    depth_clusters = depth_clusters[depth_index]

    # Do a PCHIP interpolation
    interpolator = interpolate.pchip(depth_clusters, x_clusters)
    
    return depth_clusters, x_clusters, interpolator
示例#30
0
    def __init__(self, data, Length, D1):
        self._coilable = None
        self._maxStrain = None
        
        self._stress_values_trimmed = None
        self._strain_values_trimmed = None
        self._max_location_stress = None
        self._max_location_strain = None
        
        self._vector = None
        self._curve = None
        self._energy_abs = None
        self._buckling_load = None
        
        
        self._coilable = data['coilable'][0] 
        self._maxStrain = Length/Length
        temp_U = data['riks_RP_Zplus_U'][:-2,2] 
        temp_F = data['riks_RP_Zplus_RF'][:-2,2] 
        stress_values = 1000*(temp_F/(np.pi*0.25*(D1)**2))
        strain_values = temp_U/Length   
        vec_indices = np.where(stress_values <= 0)

        self._stress_values_trimmed = abs(stress_values[vec_indices])
        self._strain_values_trimmed = abs(strain_values[vec_indices])
        self._max_location_stress = np.argmax(self._stress_values_trimmed)
        self._max_location_strain = np.argmax(self._strain_values_trimmed)
        #if np.max(strain_values_trimmed[max_location_strain])> np.min([0.8,maxStrain]):                        

        if self._strain_values_trimmed[-1]<1.0:       
            self._strain_values_trimmed = np.append(self._strain_values_trimmed,1.0)
            self._stress_values_trimmed = np.append(self._stress_values_trimmed,0.0)
        vector = np.linspace(np.min(self._strain_values_trimmed),np.max(self._maxStrain),10000)
        interp = interpolate.pchip(self._strain_values_trimmed,self._stress_values_trimmed)                                                             
        curve = interp(vector)
        self._vector = vector[:-1]
        self._curve = curve[:-1]

        self._energy_abs = simps(self._curve,x=self._vector)
        self._buckling_load = data['P_p3_crit'][0]/(np.pi*0.25*(D1)**2)*1000 
示例#31
0
def z_pinterp(var, pres, std_z):
    """ deals with masked ones
    """
    if np.count_nonzero(~var.mask) < 2:
        bad = np.empty((len(std_z), ))
        bad.fill(np.nan)
        return bad
    else:
        if any(pres[1:] < pres[:-1]):
            sidx = np.argsort(pres)
            pres, var = pres[sidx], var[sidx]
        if np.ma.isMaskedArray(pres):
            if len(pres.compressed()) == len(pres):
                pres = pres.compressed()
            else:
                # make sure pres mask and var mask are same
                newmask = np.logical_or(pres.mask, var.mask)
                pres.mask = newmask
                var.mask = newmask
                pres = pres.filled()
        fp = pchip(pres[~var.mask], var.compressed(), extrapolate=False)
        return fp(std_z)
示例#32
0
def getcanonicalhrf(duration, tr):
    # inputs
    if duration == 0:
        duration = 0.1

    # obtain canonical response to a 0.1-s stimulus
    hrf = basichrf()

    # convolve to get the predicted response to the desired stimulus duration
    trold = 0.1
    hrf = np.convolve(hrf, np.ones(int(np.max([1,
                                               np.round(duration / trold)]))))

    sampler = np.asarray(np.arange(0, int((hrf.shape[0] - 1) * trold), tr))

    # resample to desired TR
    hrf = pchip(np.asarray(range(hrf.shape[0])) * trold, hrf)(sampler)

    # make the peak equal to one
    hrf = hrf / np.max(hrf)

    return hrf
示例#33
0
def env(dur, f, n, file, pd):
    sampFreq, sndd = wavfile.read(file, 'rb')

    snd = sndd[:, 0]

    # espectrograma

    freq, times, sx = signal.spectrogram(snd,
                                         fs=sampFreq,
                                         window='hanning',
                                         nperseg=int(round(sampFreq /
                                                           (1 + f))),
                                         detrend=False,
                                         scaling='spectrum')

    # calculo envolvente para cada armónico

    s = sx[int(n), :]

    index, value = max(enumerate(s), key=operator.itemgetter(1))

    s = s / s[index]

    peaks, dk = find_peaks(s, distance=int(np.round(len(snd) / pd)))

    si = np.concatenate(([0], s[peaks], [0]))
    t = np.concatenate(([0], times[peaks], [len(snd) / sampFreq]))

    pk = pchip(t, si)
    if (dur == 0):
        return [0]

    xnew = np.arange(0, t[len(t) - 1], t[len(t) - 1] / (dur * sampFreq))

    pnew = pk(xnew)

    pnew = (abs(pnew) + pnew) / 2

    return pnew
示例#34
0
    def execute(self):
        self.dict = HAWC2InputDict()
        self.dict.read(self.htc_master_file)
        self.htc = self.dict.htc
        self.vartrees.body_order = self.dict.body_order

        for section in self.htc:
            if section.name == 'simulation':
                self.add_simulation(section)
            elif section.name == 'wind':
                self.add_wind(section)
            elif section.name == 'aero':
                self.add_aero(section)
            elif section.name == 'aerodrag':
                self.add_aerodrag(section)
            elif section.name == 'new_htc_structure':
                self.add_structure(section)
            elif section.name == 'output':
                self.add_output(section)
            elif section.name == 'dll':
                self.add_dlls(section)
            elif section.name == 'hawcstab2':
                self.add_hawcstab2(section)

        # count number of blades
        for iblade in range(1, 10):
            if 'blade' + str(iblade) not in self.vartrees.body_order:
                self.vartrees.rotor.nblades = iblade - 1
                break

        # copy blade twist from c2_def to blade_ae vartree
        if hasattr(self.vartrees.main_bodies, 'blade1'):
            from scipy.interpolate import pchip
            c12 = self.vartrees.main_bodies.blade1.c12axis
            tck = pchip(c12[:, 2], c12[:, 3])
            twist = tck(self.vartrees.blade_ae.s)
            self.vartrees.blade_ae.twist = twist
            self.vartrees.blade_ae.c12axis = c12.copy()
            self.vartrees.blade_structure = self.vartrees.main_bodies.blade1.beam_structure
    def execute(self):
        self.dict = HAWC2InputDict()
        self.dict.read(self.htc_master_file)
        self.htc = self.dict.htc
        self.vartrees.body_order = self.dict.body_order

        for section in self.htc:
            if section.name == 'simulation':
                self.add_simulation(section)
            elif section.name == 'wind':
                self.add_wind(section)
            elif section.name == 'aero':
                self.add_aero(section)
            elif section.name == 'aerodrag':
                self.add_aerodrag(section)
            elif section.name == 'new_htc_structure':
                self.add_structure(section)
            elif section.name == 'output':
                self.add_output(section)
            elif section.name == 'dll':
                self.add_dlls(section)
            elif section.name == 'hawcstab2':
                self.add_hawcstab2(section)

        # count number of blades
        for iblade in range(1, 10):
            if 'blade'+str(iblade) not in self.vartrees.body_order:
                self.vartrees.rotor.nblades = iblade-1
                break

        # copy blade twist from c2_def to blade_ae vartree
        if hasattr(self.vartrees.main_bodies, 'blade1'):
            from scipy.interpolate import pchip
            c12 = self.vartrees.main_bodies.blade1.c12axis
            tck = pchip(c12[:, 2], c12[:, 3])
            twist = tck(self.vartrees.blade_ae.s)
            self.vartrees.blade_ae.twist = twist
            self.vartrees.blade_ae.c12axis = c12.copy()
            self.vartrees.blade_structure = self.vartrees.main_bodies.blade1.beam_structure
示例#36
0
def fitter(data, **kwargs):
    def listify_dicts(dicts):
        if len(dicts) == 0:
            return {}
        ld = {}
        for d in dicts:
            for k in d.keys():
                if k not in ld.keys():
                    ld[k] = [d[k]]
                else:
                    ld[k].append(d[k])
        return ld

    if dw.zoo.is_multiindex_dataframe(data):
        return listify_dicts([fitter(d, **kwargs) for d in dw.unstack(data)])
    elif type(data) is list:
        return listify_dicts([fitter(d, **kwargs) for d in data])

    transpose = kwargs.pop('transpose', False)
    assert 'axis' in kwargs.keys(), ValueError('Must specify axis')

    if kwargs['axis'] == 1:
        return fitter(data.T, **dw.core.update_dict(kwargs, {'axis': int(not kwargs['axis']), 'transpose': True}))

    assert kwargs['axis'] == 0, ValueError('invalid transformation')

    if dw.zoo.is_multiindex_dataframe(data):
        x = np.array(data.index.levels[-1])
    else:
        x = data.index.values

    resampled_x = np.linspace(np.min(x), np.max(x), num=kwargs['n_samples'])
    pchip = pd.Series(index=data.columns)
    for c in data.columns:
        pchip[c] = interpolate.pchip(x, data[c].values)

    return {'x': x, 'resampled_x': resampled_x, 'pchip': pchip, 'transpose': transpose, 'axis': kwargs['axis'],
            'n_samples': kwargs['n_samples']}
示例#37
0
    def get_dNdEE(self, mother, daughter):
        """Differential parent-->neutrino (mother--daughter) yield"""
        ihijo = 20
        e_grid = self.mceq.e_grid
        delta = self.mceq.e_widths
        x_range = e_grid[ihijo] / e_grid
        rr = ParticleProperties.rr(mother, daughter)
        dNdEE_edge = ParticleProperties.br_2body(mother, daughter) / (1 - rr)
        dN_mat = self.mceq._decays.get_matrix(
            (ParticleProperties.pdg_id[mother], 0),
            (ParticleProperties.pdg_id[daughter], 0))
        dNdEE = dN_mat[ihijo] * e_grid / delta
        logx = np.log10(x_range)
        logx_width = -np.diff(logx)[0]
        good = (logx + logx_width / 2 < np.log10(1 - rr)) & (x_range >= 5.e-2)

        x_low = x_range[x_range < 5e-2]
        dNdEE_low = np.array([dNdEE[good][-1]] * x_low.size)
        dNdEE_interp = lambda x_: interpolate.pchip(
            np.concatenate([[1 - rr], x_range[good], x_low])[::-1],
            np.concatenate([[dNdEE_edge], dNdEE[good], dNdEE_low])[::-1],
            extrapolate=True)(x_) * np.heaviside(1 - rr - x_, 1)
        return x_range, dNdEE, dNdEE_interp
示例#38
0
    def pchip6h_1h(time_index, windpredict1h):
        # got data6h data
        data6h = pchip6h(time_index, windpredict1h)
        # convert 6hourly to hourly
        # prepare data
        time_index_6h = data6h.iloc[:, 0]
        winddata_6h = data6h.iloc[:, 1]
        pchip_obj_6h = pchip(time_index_6h, winddata_6h)
        # convert to 6houly time_index
        time_index_1h = pd.date_range(time_index_6h.iloc[0],
                                      time_index_6h.iloc[-1],
                                      freq="15min")
        # test & predict
        windpredict1h = pchip_obj_6h(time_index_1h)

        print("MMMiiiiiiiiiiiiiii of windpredict15m:")
        print(windpredict1h.min())
        # windpredict1h = pchip_obj_after1h(time_index_1h)
        time_index_1h = pd.Series(time_index_1h)
        windpredict1h = pd.Series(windpredict1h)
        framelist = [time_index_1h, windpredict1h]
        data1h = pd.concat(framelist, axis=1)
        return data1h
示例#39
0
文件: a4.py 项目: Sheldonsu28/CSC336
def Q5():
    f_cubic = interpolate.CubicSpline(years, pops, bc_type='natural')
    xs = np.linspace(1900, 1990, 1000)
    ys_cubic = f_cubic(xs)

    matrix_4 = np.vander((years - 1940) / 40)
    coeffs = np.linalg.solve(matrix_4, pops)
    ys_poly = np.polyval(coeffs, (xs - 1940) / 40)

    f_hermit = interpolate.pchip(years, pops)
    ys_hermit = f_hermit(xs)

    plt.figure()
    plt.plot(xs, ys_hermit, label="Interpolation with PCHIP")
    plt.plot(xs, ys_cubic, label="Interpolation with CubicSpline")
    print(ys_hermit[-1], ys_cubic[-1])
    plt.plot(xs, ys_poly, label="Interpolation with Poly fit")
    plt.plot(1990, 248709873, "ro", label="Real data at 1990")
    plt.title("Interpolate population up to 1990")
    plt.xlabel("Years")
    plt.ylabel("Population")
    plt.legend()
    plt.savefig("Q5.png")
    plt.show()
示例#40
0
    def pchip6h(time_index, windpredict1h):
        pchip_obj_after1h = pchip(time_index, windpredict1h)
        # convert to 6houly time_index
        time_index_2 = pd.date_range(time_index.iloc[0],
                                     time_index.iloc[-1],
                                     freq="360min")
        # test & predict, dyp
        windpredict6h = pchip_obj_after1h(
            time_index_2)  #* random.uniform(0.9,1.1) - 2 # if < 0 : set to 0.
        # windpredict6h = pchip_obj_after1h(time_index_2) * random.uniform(0.9,1.1) *0.8 # 相关性变差,影响最大最小

        # print(windpredict6h.min())

        time_index_2 = pd.Series(time_index_2)
        windpredict6h = pd.Series(windpredict6h)

        framelist = [time_index_2, windpredict6h]
        data6h = pd.concat(framelist, axis=1)
        #print(data6h.iloc[:,0])
        print("average of data6h after mos:")
        print("should be the same as above.")
        print(data6h.mean())
        # data6h = data6h[data6h.windpredict6h > 0]
        return data6h
示例#41
0
 def test_roots(self):
     # regression test for gh-6357: .roots method should work
     p = pchip([0, 1], [-1, 1])
     r = p.roots()
     assert_allclose(r, 0.5)
示例#42
0
 def _make_random(self, npts=20):
     np.random.seed(1234)
     xi = np.sort(np.random.random(npts))
     yi = np.random.random(npts)
     return pchip(xi, yi), xi, yi
示例#43
0
 def pchip_antideriv2(x, y, axis=0):
     return pchip(x, y, axis).derivative(2)
示例#44
0
 def pchip_deriv(x, y, axis=0):
     return pchip(x, y, axis).derivative()
示例#45
0
def get_interpolated(x, y):
    # dense x and interpolator for the smooth curve for plotting
    xx = np.linspace(x[0], x[-1], len(x) * 10)
    interp = pchip(x, y)
    return xx, interp(xx)
示例#46
0
def bdrate(file1, file2, anchorfile):
    if anchorfile:
        anchor = flipud(loadtxt(anchorfile))
    a = loadtxt(file1)
    b = loadtxt(file2)
    a = a[a[:, 0].argsort()]
    b = b[b[:, 0].argsort()]
    a = flipud(a)
    b = flipud(b)
    rates = [0.06, 0.2]
    qa = a[:, 0]
    qb = b[:, 0]
    ra = a[:, 2] * 8. / a[:, 1]
    rb = b[:, 2] * 8. / b[:, 1]
    bdr = zeros((4, 4))
    ret = {}
    for m in range(0, len(met_index)):
        try:
            ya = a[:, 3 + m]
            yb = b[:, 3 + m]
            if anchorfile:
                yr = anchor[:, 3 + m]
            #p0 = interp1d(ra, ya, interp_type)(rates[0]);
            #p1 = interp1d(ra, ya, interp_type)(rates[1]);
            if anchorfile:
                p0 = yr[0]
                p1 = yr[-1]
                yya = ya
                yyb = yb
                rra = ra
                rrb = rb
            else:
                minq = 20
                maxq = 55
                try:
                    # path if quantizers 20 and 55 are in set
                    minqa_index = qa.tolist().index(minq)
                    maxqa_index = qa.tolist().index(maxq)
                    minqb_index = qb.tolist().index(minq)
                    maxqb_index = qb.tolist().index(maxq)
                    yya = ya[maxqa_index:minqa_index + 1]
                    yyb = yb[maxqb_index:minqb_index + 1]
                    rra = ra[maxqa_index:minqa_index + 1]
                    rrb = rb[maxqb_index:minqb_index + 1]
                except ValueError:
                    # path if quantizers 20 and 55 are not found - use
                    # entire range of quantizers found, and fit curve
                    # on all the points, and set q_not_found to print
                    # a warning
                    q_not_found = True
                    minqa_index = -1
                    maxqa_index = 0
                    minqb_index = -1
                    maxqb_index = 0
                    yya = ya
                    yyb = yb
                    rra = ra
                    rrb = rb
                p0 = max(ya[maxqa_index], yb[maxqb_index])
                p1 = min(ya[minqa_index], yb[minqb_index])
            a_rate = pchip(yya, log(rra))(arange(p0, p1,
                                                 abs(p1 - p0) / 5000.0))
            b_rate = pchip(yyb, log(rrb))(arange(p0, p1,
                                                 abs(p1 - p0) / 5000.0))
            if not len(a_rate) or not len(b_rate):
                bdr = NaN
            else:
                bdr = 100 * (exp(mean(b_rate - a_rate)) - 1)
        except ValueError:
            bdr = NaN
        except linalg.linalg.LinAlgError:
            bdr = NaN
        except IndexError:
            bdr = NaN
        if abs(bdr) > 1000:
            bdr = NaN
        ret[m] = bdr
    return ret
示例#47
0
 def pchip_antideriv(x, y, axis=0):
     return pchip(x, y, axis).derivative()
示例#48
0
def interpolate_bladestructure(st3d, s_new):
    """
    interpolate a blade structure definition onto
    a new spanwise distribution using pchip

    parameters
    ----------
    st3d: dict
        dictionary with blade structural definition
    s_new: array
        1-d array with new spanwise distribution

    returns
    -------
    st3dn: dict
        blade structural definition interpolated onto s_new distribution
    """

    st3dn = {}
    sorg = st3d['s']
    st3dn['s'] = s_new
    st3dn['version'] = st3d['version']
    st3dn['materials'] = st3d['materials']
    st3dn['matprops'] = st3d['matprops']
    st3dn['failmat'] = st3d['failmat']
    st3dn['failcrit'] = st3d['failcrit']
    st3dn['web_def'] = st3d['web_def']
    st3dn['regions'] = []
    st3dn['webs'] = []

    DPs = np.zeros((s_new.shape[0], st3d['DPs'].shape[1]))
    for i in range(st3d['DPs'].shape[1]):
        tck = pchip(sorg, st3d['DPs'][:, i])
        DPs[:, i] = tck(s_new)
    st3dn['DPs'] = DPs

    for r in st3d['regions']:
        rnew = {}
        rnew['layers'] = r['layers']
        Ts = r['thicknesses']
        As = r['angles']
        tnew = np.zeros((s_new.shape[0], Ts.shape[1]))
        anew = np.zeros((s_new.shape[0], As.shape[1]))
        for i in range(Ts.shape[1]):
            tck = pchip(sorg, Ts[:, i])
            tnew[:, i] = tck(s_new)
            tck = pchip(sorg, As[:, i])
            anew[:, i] = tck(s_new)
        rnew['thicknesses'] = tnew.copy()
        rnew['angles'] = anew.copy()
        st3dn['regions'].append(rnew)
    for r in st3d['webs']:
        rnew = {}
        rnew['layers'] = r['layers']
        Ts = r['thicknesses']
        As = r['angles']
        tnew = np.zeros((s_new.shape[0], Ts.shape[1]))
        anew = np.zeros((s_new.shape[0], As.shape[1]))
        for i in range(Ts.shape[1]):
            tck = pchip(sorg, Ts[:, i])
            tnew[:, i] = tck(s_new)
            tck = pchip(sorg, As[:, i])
            anew[:, i] = tck(s_new)
        rnew['thicknesses'] = tnew.copy()
        rnew['angles'] = anew.copy()
        st3dn['webs'].append(rnew)

    return st3dn
示例#49
0
Lvals=[]


for row in table[:]:
    
    colours.append(abL_2_sRGB(a_val=row['LAB_A'],b_val=row['LAB_B'],L_val=row['LAB_L']))
    tvals.append(row[tvals_in]/100)
    avals.append(row['LAB_A'])
    bvals.append(row['LAB_B'])
    Lvals.append(row['LAB_L'])

h5file.close()


# Create Interpolation functions for a,b,L
a_pchip=interpolate.pchip(tvals,avals)
b_pchip=interpolate.pchip(tvals,bvals)
L_pchip=interpolate.pchip(tvals,Lvals)

pch_Inter=[a_pchip,b_pchip,L_pchip]




# Start plotting
# Disable depth shading
plt.ion()
art3d.zalpha = lambda *args:args[0]


# Setup canvas
示例#50
0
nc.close()

phi = phi*np.pi/180  # angle between major axis and east [rad] (beware sign)

# ------ extract density profile, compute N2 ------------------
if clim == "lucky":
    nc = Dataset(cname,'r')
    T = nc.variables['temp_roms_avg'][:]
    S = nc.variables['salt_roms_avg'][:]
    zz = nc.variables['depth'][:]
    nz = zz.size
    
rho = np.sort(rhop(T,S)) #SW_Density(T,S) # sorting is cheating here
rho0 = rho.mean()
frho = itp.pchip(zz[::-1],rho[::-1],extrapolate=True)
N2_tmp = -(g/rho0)*(2*np.pi)**2*frho.derivative()(zz)    # # has to be in [(rad s-1)^2]
# temporary fixing:
if N2_tmp[-1]==0: N2_tmp[-1] = 1e-8
indneg, = np.where(N2_tmp<=0.)
for ii in indneg:
    N2_tmp[ii] = (N2_tmp[ii-1] + N2_tmp[ii+1])/2
fN2 = itp.pchip(zz[::-1],N2_tmp[::-1],extrapolate=True)    

# fit exponential profile
slope,intercept,r_val,p_val,std_err = stats.linregress(zz,np.log(N2_tmp**0.5))
N0  = np.exp(intercept)/(2*np.pi)
b   = 1./slope
N2b = fN2(hgrid)
        
示例#51
0
#!/usr/bin/env python3

from numpy import *
from scipy import *
from scipy.interpolate import interp1d
from scipy.interpolate import pchip
import sys
import os
import argparse
import json

a = flipud(loadtxt(sys.argv[1]));
b = flipud(loadtxt(sys.argv[2]));

for m in range(0,11):
    try:
        ya = a[:,3+m]
        yb = b[:,3+m]
        ra = a[:,2]*8./a[:,1]
        rb = b[:,2]*8./b[:,1]
        a_rate = pchip(ya, log(ra))(float(sys.argv[3]))
        b_rate = pchip(yb, log(rb))(float(sys.argv[3]))
        print(exp(b_rate - a_rate) - 1)
    except IndexError:
        print('NaN')
    except ValueError:
        print('NaN')
示例#52
0
def calc_LR(xdf, xcat, optdf,catopt,nm, qm, Q, rmag, NX,rsearch=5.0,\
            lth = None, LRfrac=0.2,lrmax=None,\
            magname = 'imag_psf',xerrname='xposerr',
            xra = 'RA', xdec = 'DEC', ora = 'ra', odec = 'dec',
            opticalid = 'hscid',opterr = 0.1,pdf='Rayleigh',first=False):
    '''
    input variables:
    xdf, xcat, optdf,catopt,optdf,nm, qm, Q, rmag, rsearch=5.0,\
    magname = 'rmag_psf',xerrname='xposerr',
    xra = 'RA', xdec = 'DEC', ora = 'ra', odec = 'dec',
    opticalid = 'hscid'
    For computing LR for every optical source within rsearch:
    '''
    if first:
        print('first calc_LR')
    idxmm, idhsc, d2d, d3d = catopt.search_around_sky(xcat, rsearch * u.arcsec)
    match = pd.DataFrame({'xid':idxmm,'optid':idhsc,'dist':d2d.arcsec,\
    'rmag':optdf.loc[idhsc,magname].values,'xposerr':xdf.loc[idxmm,xerrname],\
    'raopt':optdf.loc[idhsc,ora].values,'decopt':optdf.loc[idhsc,odec].values,\
    'rax':xdf.loc[idxmm,xra].values,'decx':xdf.loc[idxmm,xdec].values,\
    'optname':optdf.loc[idhsc,opticalid].values})

    #print('match len = ',len(match), 'xid nunique = ', match.xid.nunique())
    fr = pdf_sep_gen(match.dist.values, match.xposerr.values, opterr, pdf=pdf)
    n_m = pchip(rmag, nm)  #, bounds_error=False,fill_value='extrapolate')
    q_m = pchip(rmag, qm)  #, bounds_error=False,fill_value='extrapolate')
    fnm = n_m(match.rmag.values)
    fqm = q_m(match.rmag.values)
    fqm[np.where(fqm < 0.)] = 1e-8
    fnm[np.where(fnm < 0.)] = 1e-8
    LR = fr * fqm / fnm
    match['LR'] = pd.Series(LR, index=match.index)
    match['matchid'] = pd.Series(range(len(match)), index=match.index)
    match['raoff'] = pd.Series((match.rax - match.raopt) * 3600.,
                               index=match.index)
    match['decoff'] = pd.Series((match.decx - match.decopt) * 3600.,
                                index=match.index)
    #several situations :
    #1. all matches are unique, no further action is required.
    if match.xid.nunique() - len(match) == 0:
        return match, match, 1.0, 1.0, match.LR.min()
    else:
        if lth is None:
            #If the array of lth values is not provided,
            #guess it by assuming that only NX sources would be reliable,
            #so loop through the LR values around that LR quantile
            #qcenter = match.LR.quantile(float(NX)/len(match))
            qcenter = 1. - 1.5 * float(NX) / len(match)
            if qcenter < 0.:
                qcenter = 0.1
            lth = np.linspace(0.5 * qcenter, min([2.0 * qcenter, 0.95]), 30.)
            #print(lth)
        if lrmax is None:
            #first
            R, C, LRth = calc_RC(match, lth, Q, NX, LRfrac=LRfrac, first=first)
            lthmax = LRth[np.argmax((R + C))]
            if not np.isscalar(lthmax):
                if len(lthmax) >= 1:
                    lthmax = lthmax[0]
            goodmatch, R, C, LRth = calc_RCMAX(match,
                                               lthmax,
                                               Q,
                                               len(xcat),
                                               LRfrac=LRfrac)
            return match, goodmatch, R, C, lthmax, LRth
        else:
            goodmatch, R, C, LRth = calc_RCMAX(match,
                                               lrmax,
                                               Q,
                                               len(xcat),
                                               LRfrac=LRfrac)
            return match, goodmatch, R, C, lrmax, LRth
示例#53
0
def display (all_latencies, directory): # all_latencies : solver -> #tm(0-23) -> path_len -> frac_traffic
    scheme_latency_dist = OrderedDict()  # solver -> path_length -> (frac_tput_mean, frac_tput_std)
    for solver in all_latencies.keys():
        lat_percentile = get_latency_percentile(all_latencies, solver)
        scheme_latency_dist[solver] = lat_percentile
    CommonConf.setupMPPDefaults()
    fmts = CommonConf.getLineFormatsDict()
    mrkrs = CommonConf.getLineMarkersDict()
    mrkrsize = CommonConf.getLineMarkersSizeDict()
    mrkrlw = CommonConf.getLineMarkersLWDict()
    colors = CommonConf.getLineColorsDict()
    fig = pp.figure(figsize=(6,5))
    ax = fig.add_subplot(111)
    max_lat = 0
    for solver, latencies in scheme_latency_dist.iteritems():
        max_lat = max(max_lat, max(latencies.keys()))

    gap=1
    for solver, latencies in scheme_latency_dist.iteritems():
        xs = []
        xsall = sorted(latencies.keys())
        for i in range(0,len(xsall)):
          if (i%gap==0):
            xs.append(xsall[i])
        if (len(xsall)-1)%gap!=0:
          xs.append(xsall[-1])

        ys = [latencies[lat][0] for lat in xs]
        ydevs = [latencies[lat][1] for lat in xs]
        ax.plot((xs[-1], max_lat), (ys[-1], ys[-1]), linestyle=':',
                marker=mrkrs[solver],
                color=colors[solver],
                markersize=mrkrsize[solver],
                markerfacecolor='none',
                markeredgecolor=colors[solver],
                markeredgewidth=mrkrsize[solver]/4,
                linewidth=mrkrlw[solver])

        new_xs = np.linspace(min(xs), max(xs), (max(xs)-min(xs))*3+1)
        print new_xs
        yinterp = pchip(xs, ys)
        ydevsinterp = pchip(xs, ydevs)
        new_ys = yinterp(new_xs)
        new_ydevs = ydevsinterp(new_xs)
        ax.errorbar(new_xs, new_ys, yerr=new_ydevs,
                label=CommonConf.gen_label(solver), marker=mrkrs[solver],
                linestyle=fmts[solver],
                color=colors[solver],
                markersize=mrkrsize[solver],
                markerfacecolor='none',
                markeredgecolor=colors[solver],
                markeredgewidth=mrkrsize[solver]/4,
                markevery=len(new_xs)/4,
                errorevery=len(new_xs)/4,
                linewidth=mrkrlw[solver])
    ax.set_xlabel(X_LABEL)
    ax.set_ylabel(Y_LABEL)
    ax.spines['right'].set_visible(False)
    ax.spines['top'].set_visible(False)
    ax.yaxis.set_ticks_position('left')
    ax.xaxis.set_ticks_position('bottom')
    ax.legend(loc='best', borderaxespad=1., fancybox=True)
    pp.subplots_adjust(left=0.1, right=0.8, top=0.9, bottom=0.1)
    pp.ylim(0.0,1.05)
    xmin,xmax = ax.get_xlim()
    xmax = (xmax + (xmax%2))
    pp.xlim(3, xmax)
    pp.tight_layout()
    pp.savefig(directory+"/LatencyCDF.svg")
示例#54
0
 def _make_random(self, npts=20):
     np.random.seed(1234)
     xi = np.sort(np.random.random(npts))
     yi = np.random.random(npts)
     return pchip(xi, yi), xi, yi
示例#55
0
 def pchip_deriv2(x, y, axis=0):
     return pchip(x, y, axis).derivative(2)
示例#56
0
    for i in range(len(series)):
        [
            time_used_glpk, timeslot_used_glpk, time_used_graph,
            timeslot_used_graph
        ] = get_info(i, len(count), first_char)
        fig, axs = plt.subplots(2, 1)
        axs[0].set_title(
            str(series[i][0]) + ' teachers, ' + str(series[i][1]) + ' rooms')
        axs[0].set_xlabel('Courses')
        axs[0].set_ylabel('Time (second)')

        time_used_glpk = np.array(time_used_glpk)
        time_used_graph = np.array(time_used_graph)

        smooth_count = np.linspace(arr_count.min(), arr_count.max(), 240)
        pch = pchip(arr_count, time_used_glpk)
        smooth_time_used_glpk = pch(smooth_count)
        pch = pchip(arr_count, time_used_graph)
        smooth_time_used_graph = pch(smooth_count)
        axs[0].plot(smooth_count,
                    smooth_time_used_glpk,
                    marker='.',
                    markevery=10)
        axs[0].plot(smooth_count,
                    smooth_time_used_graph,
                    marker='.',
                    markevery=10)

        axs[1].axis('tight')
        axs[1].axis('off')
        columns = tuple(count)
示例#57
0
# List of Earth masses converted to M_Jup
x_new = toJupMass(MEarth)

# np.linspace(min,max,n) creates a list of n values between min and max
jupX = np.linspace(min(MJup),max(MJup),500)
earthX = np.linspace(min(x_new),max(x_new),500)

# convert radius and masses to a density
IceD = toDensity(MEarth, IceR)
RockD = toDensity(MEarth, RockR)
IronD = toDensity(MEarth, IronR)

lineColor = 'LightGray'

# Interpolate a line between the data points
interp = pchip(np.array(x_new),np.array(IceD))
ax.loglog(earthX, interp(earthX), c=lineColor, lw=4.0)
interp = pchip(np.array(x_new),np.array(RockD))
ax.loglog(earthX, interp(earthX), c=lineColor, lw=4.0)
interp = pchip(np.array(x_new),np.array(IronD))
ax.loglog(earthX, interp(earthX), c=lineColor, lw=4.0)

# Convert Jupiter radii and masses to density
H_HE_D = []
for idx in range(0,len(MJup)):
   g_mass = MJup[idx] * 1.898e30
   cm_rad = H_HE[idx] * 6.9911e9
   density = g_mass/( (4.0/3.0) * constants.pi * (cm_rad**3))
   H_HE_D.append(density)

interp = pchip(np.array(MJup),np.array(H_HE_D))
示例#58
0
def interpolate_bladestructure(st3d, s_new):
    """
    interpolate a blade structure definition onto
    a new spanwise distribution using pchip

    parameters
    ----------
    st3d: dict
        dictionary with blade structural definition
    s_new: array
        1-d array with new spanwise distribution

    returns
    -------
    st3dn: dict
        blade structural definition interpolated onto s_new distribution
    """

    st3dn = {}
    sorg = st3d['s']
    st3dn['s'] = s_new.copy()
    st3dn['version'] = st3d['version']
    st3dn['materials'] = st3d['materials']
    st3dn['matprops'] = st3d['matprops']
    st3dn['failmat'] = st3d['failmat']
    st3dn['failcrit'] = st3d['failcrit']
    st3dn['web_def'] = st3d['web_def']
    st3dn['dominant_regions'] = st3d['dominant_regions']
    st3dn['cap_DPs'] = st3d['cap_DPs']
    st3dn['le_DPs'] = st3d['le_DPs']
    st3dn['te_DPs'] = st3d['le_DPs']
    try:
        st3dn['struct_angle'] = st3d['struct_angle']
        st3dn['cap_DPs'] = st3d['cap_DPs']
        st3dn['te_DPs'] = st3d['te_DPs']
        st3dn['le_DPs'] = st3d['le_DPs']
        names = ['cap_center_ps',
                 'cap_center_ss',
                 'cap_width_ps',
                 'cap_width_ss',
                 'te_width',
                 'le_width']
        names.extend(['w%02dpos' % i for i in range(1, len(st3d['web_def']))])
        for name in names:
            tck = pchip(sorg, st3d[name])
            st3dn[name] = tck(s_new)
    except:
        print 'no geo3d data'

    st3dn['regions'] = []
    st3dn['webs'] = []
    if 'bonds' in st3d:
        st3dn['bonds'] = []
        st3dn['bond_def'] = st3d['bond_def']

    DPs = np.zeros((s_new.shape[0], st3d['DPs'].shape[1]))
    for i in range(st3d['DPs'].shape[1]):
        tck = pchip(sorg, st3d['DPs'][:, i])
        DPs[:, i] = tck(s_new)
    st3dn['DPs'] = DPs

    for r in st3d['regions']:
        rnew = {}
        rnew['layers'] = r['layers']
        Ts = r['thicknesses']
        As = r['angles']
        tnew = np.zeros((s_new.shape[0], Ts.shape[1]))
        anew = np.zeros((s_new.shape[0], As.shape[1]))
        for i in range(Ts.shape[1]):
            tck = pchip(sorg, Ts[:, i])
            tnew[:, i] = tck(s_new)
            tck = pchip(sorg, As[:, i])
            anew[:, i] = tck(s_new)
        rnew['thicknesses'] = tnew.copy()
        rnew['angles'] = anew.copy()
        st3dn['regions'].append(rnew)
    for r in st3d['webs']:
        rnew = {}
        rnew['layers'] = r['layers']
        Ts = r['thicknesses']
        As = r['angles']
        tnew = np.zeros((s_new.shape[0], Ts.shape[1]))
        anew = np.zeros((s_new.shape[0], As.shape[1]))
        for i in range(Ts.shape[1]):
            tck = pchip(sorg, Ts[:, i])
            tnew[:, i] = tck(s_new)
            tck = pchip(sorg, As[:, i])
            anew[:, i] = tck(s_new)
        rnew['thicknesses'] = tnew.copy()
        rnew['angles'] = anew.copy()
        st3dn['webs'].append(rnew)
    if 'bonds' in st3d:
        for r in st3d['bonds']:
            rnew = {}
            rnew['layers'] = r['layers']
            Ts = r['thicknesses']
            As = r['angles']
            tnew = np.zeros((s_new.shape[0], Ts.shape[1]))
            anew = np.zeros((s_new.shape[0], As.shape[1]))
            for i in range(Ts.shape[1]):
                tck = pchip(sorg, Ts[:, i])
                tnew[:, i] = tck(s_new)
                tck = pchip(sorg, As[:, i])
                anew[:, i] = tck(s_new)
            rnew['thicknesses'] = tnew.copy()
            rnew['angles'] = anew.copy()
            st3dn['bonds'].append(rnew)

    return st3dn
示例#59
0
            module_name = moduleid_to_string(module)
            fname = f'direction_tau_concepts_' \
                    f'{module_name.lower()}_{measure.strip().lower()}_' \
                    f'{gap}_{samples}.png'
            fname = os.path.join(out_folder, fname)

            # IoU
            x_a = tau_series.copy()
            y_a = results[module][measure]
            x_a, y_a = clean_arrays(x_a, y_a)
            plt.plot(x_a, y_a, 'o', label='F1', color='tab:blue')

            # Smoothen IoU
            if len(x_a) > 2:
                x_smooth_a = np.linspace(gap, max(x_a), 100)
                f_a = pchip(x_a, y_a)
                plt.plot(x_smooth_a, f_a(x_smooth_a), '--', color='tab:blue')

            # Half ratio
            if measure == 'propagation':
                plt.axhline(y=ratio_lowest, color='gray', linestyle='dashed')

            # Legend
            plt.legend(loc='best')

            # log scale
            if measure == 'total':
                plt.yscale('log')

            # axis info
            xlab, ylab = lab
示例#60
0
def interpolarize(ynL):
    #converts an array of the type [x,y(x)] into a continuous interpolated function f(x) between the points xmin and xmax of the array
    print 'the min value of x is ' + repr(ynL[0][0])
    print 'the max value of x is ' + repr(ynL[-1][0])
    return pchip(ynL[:,0],ynL[:,1])