def set_irf( self, irf=None, wraptime=None, dispersion=None ):
        """
        The detector response isn't a delta-function, meaning that what
        you measure isn't the true time-dependence of the system you are
        measuring. It's the time dependence of the system convolved with
        the response of the detector. This method sets the measured
        trace of the detector response that will be used to convolve
        the mult-exponential model before fitting (thereby taking this
        convolution into account without doing nearly-impossible
        numerical deconvolution).
        """
        if isinstance( irf, Trace ):
            self.irf = irf
        elif type(irf) == str:
            self.irf = Trace( irf )
            if wraptime is not None:
                self.irf.wrapcurves( wraptime )
            elif self.wraptime is not None:
                self.irf.wrapcurves( self.wraptime )
        
        self.irf_dispersion = dispersion
        if dispersion is not None:
            # this is meant to address chromatic dispersion within the setup
            # (e.g. optical fiber)
            # don't bother with normalization b/c it gets normalized to unit area below anyway.
            #original = self.irf.curves[0].copy()
            #self.irf.curves[0][dispersion:] += original[:-dispersion]
            #self.irf.curves[0][:-dispersion] += original[dispersion:]
            len1 = len(self.irf.curves[0])
            chain_of_three = pylab.zeros( 3*len1 ) # stack three curves end-to-end so cspline1d_eval doesn't have to extrapolate beyond data
            chain_of_three[:len1] = self.irf.curves[0][:]
            chain_of_three[len1:2*len1] = self.irf.curves[0][:]
            chain_of_three[-len1:] = self.irf.curves[0][:]
            g = cspline1d(chain_of_three)
            smoothed = pylab.zeros( len1 )
            std_dev = dispersion/1000.0
            for t0 in pylab.linspace(-2*std_dev, 2*std_dev, 50):
                weight = pylab.exp( -t0**2/2.0/std_dev**2 )
                smoothed += weight * cspline1d_eval( g, self.irf.t[0]-t0, dx=self.irf.t[0][1], x0=-self.irf.t[0][-1] )
            self.irf.curves[0] = smoothed
            
        normalized = self.irf.curves[0].astype(numpy.float)/float(sum(self.irf.curves[0])) # normalize integral to 1, just like delta-function!!!
        self.irf.curves[0] = normalized.copy()

        self.irf_generator = cspline1d(self.irf.curves[0])
        self.irf_dt = self.irf.t[0][1]-self.irf.t[0][0]
        self.irf_t0 = self.irf.t[0][0]
        
        if False:
            """not sure this matters if we do interpolation
            """
            # difference in degree of binning (e.g. 8ps vs. 4ps is bin difference of 2)
            bin_difference = pylab.np.int( self.resolution / self.irf.resolution )
            if bin_difference != 1:
                raise ValueError("Have not yet tested deconvolution with different resolution than detector trace!!!")
                d = self.irf.curves[0]
                detector_binned = pylab.zeros( len(d)/bin_difference )
                for i in range( len(detector_binned ) ):
                    detector_binned[i] = sum( d[i*bin_difference : i*bin_difference+bin_difference] )
示例#2
0
def TransformSpectrum(args):
    flux, invvar, c0, c1, newwave = args
    smoother = 3.0

    fitc = cspline1d(flux, lamb=smoother)
    fitc_iv = cspline1d(invvar, lamb=smoother)
    newf = cspline1d_eval(fitc, newwave, dx=c1, x0=c0)
    newiv   = cspline1d_eval(fitc_iv, newwave, dx=c1, x0=c0)

    return (newf, newiv)
示例#3
0
def TransformSpectrum(args):
    flux, invvar, c0, c1, newwave = args
    smoother = 3.0

    fitc = cspline1d(flux, lamb=smoother)
    fitc_iv = cspline1d(invvar, lamb=smoother)
    newf = cspline1d_eval(fitc, newwave, dx=c1, x0=c0)
    newiv = cspline1d_eval(fitc_iv, newwave, dx=c1, x0=c0)

    return (newf, newiv)
示例#4
0
def splineIntBig0LittleLog(y,x,xNew,splinecoeff = 0.):
    """
    Use the scipy spline interpolation, but linearly extrapolate at the edges,
    since scipy.signal.cspline1d assumes periodic boundary conditions
    """
    if len(x) < 4:
        return interpolateLin(y,x,xNew)

    if isinstance(xNew,float):
        wasfloat = 1
        xNew = N.array([xNew])
    else:
        wasfloat = 0

    whereSpline = N.where((xNew >= x[0]) * (xNew <= x[-1]))[0]
    whereLittle = N.where(xNew < x[0])[0]
    whereBig = N.where(xNew >= x[-1])[0]
    
    ans = xNew * 0.
    if len(whereSpline) > 0:
        if isinstance(splinecoeff,float): # not pre-calculated.
            splinecoeff = SS.cspline1d(y)
        ans[whereSpline] = SS.cspline1d_eval(splinecoeff, xNew[whereSpline], dx=x[1]-x[0], x0 = x[0])

    if len(whereLittle) > 0:
        xw = xNew[whereLittle]
        logx,logy = N.log(x[:2]),N.log(y[:2])
        ans[whereLittle] = N.exp(logy[0] + (N.log(xw)-logx[0])/(logx[1]-logx[0])*(logy[1]-logy[0]))


    if wasfloat:
        return ans[0]
    else:
        return ans
示例#5
0
def splineIntLinExt(y,x,xNew,splinecoeff = 0.):
    """
    Use the scipy spline interpolation, but linearly extrapolate at the edges,
    since scipy.signal.cspline1d assumes periodic boundary conditions
    """    
    if len(x) < 4:
        return interpolateLin(y,x,xNew)

    if isinstance(xNew,float):
        wasfloat = 1
        xNew = M.array([xNew])
    else:
        wasfloat = 0

    whereSpline = N.where((xNew > x[0]) * (xNew < x[-1]))[0]
    whereLin = N.where((xNew <= x[0]) + (xNew >= x[-1]))[0]
    
    ans = xNew * 0.
    if len(whereSpline) > 0:
        if isinstance(splinecoeff,float): # not pre-calculated.
            splinecoeff = SS.cspline1d(y)
        ans[whereSpline] = SS.cspline1d_eval(splinecoeff, xNew[whereSpline], dx=x[1]-x[0], x0 = x[0])

    if len(whereLin) > 0:
        ans[whereLin] = interpolateLin(y,x,xNew[whereLin])

    if wasfloat:
        return ans[0]
    else:
        return ans
示例#6
0
def interp_T(T, Ts, PPs):
    xvals = zeros(1)
    xvals[0] = T
    Ps = list()
    for i in PPs:
        cj = cspline1d(i)
        Ps.append(cspline1d_eval(cj, xvals, dx=500.0, x0=Ts[0]))
    return Ps
示例#7
0
def interp_T(T,Ts,PPs):
    xvals=zeros(1)
    xvals[0]=T
    Ps=list()
    for i in PPs:
        cj=cspline1d(i)
        Ps.append(cspline1d_eval(cj,xvals,dx=500.0,x0=Ts[0]))
    return Ps
def wiggle(x, origin=0, posFill='black', negFill=None, lineColor='black', 
        resampleRatio=10, rescale=False, ymin=0, ymax=None, ax=None):
    """Plots a "wiggle" trace
    Input:
        x: input data (1D numpy array)
        origin: (default, 0) value to fill above or below (float)
        posFill: (default, black) color to fill positive wiggles with (string 
            or None)
        negFill: (default, None) color to fill negative wiggles with (string 
            or None)
        lineColor: (default, black) color of wiggle trace (string or None)
        resampleRatio: (default, 10) factor to resample traces by before 
            plotting (1 = raw data) (float)
        rescale: (default, False) If True, rescale "x" to be between -1 and 1
        ymin: (default, 0) The minimum y to use for plotting
        ymax: (default, len(x)) The maximum y to use for plotting
        ax: (default, current axis) The matplotlib axis to plot onto
    Output:
        a matplotlib plot on the current axes
    """
    from matplotlib import pyplot as plt
    from scipy.signal import cspline1d, cspline1d_eval

    if ymax is None:
        ymax = x.size

    # Rescale so that x ranges from -1 to 1
    if rescale:
        x = x.astype(np.float)
        x -= x.min()
        x /= x.ptp()
        x *= 2
        x -= 1

    # Interpolate at resampleRatio x the previous density
    y = np.linspace(0, x.size, x.size)
    interp_y = np.linspace(0, x.size, x.size * resampleRatio)
    cj = cspline1d(x)
    interpX = cspline1d_eval(cj,interp_y) #,dx=1,x0=0
    newy = np.linspace(ymax, ymin, interp_y.size)
    if origin == None: 
        origin = interpX.mean()

    # Plot
    if ax is None:
        ax = plt.gca()
        plt.hold(True)
    if posFill is not None: 
        ax.fill_betweenx(newy, interpX, origin,
                where=interpX > origin,
                facecolor=posFill)
    if negFill is not None:
        ax.fill_betweenx(newy, interpX, origin,
                where=interpX < origin,
                facecolor=negFill)
    if lineColor is not None:
        ax.plot(interpX, newy, color=lineColor)
示例#9
0
文件: med.py 项目: ttrainor/_tdl
def spline_interpolate(oldx, oldy, newx, smoothing=0.001, **kw):
    """
    newy = spline_interpolate(oldx, oldy, newx)
    1-dimensional cubic spline, for cases where oldx and newx are on a uniform grid.
    """
    return cspline1d_eval(cspline1d(oldy),
                          newx,
                          dx=oldx[1] - oldx[0],
                          x0=oldx[0])
示例#10
0
def wiggle(x, origin=0, posFill='black', negFill=None, lineColor='black',
        resampleRatio=10, rescale=False, ymin=0, ymax=None, ax=None):
    """Plots a "wiggle" trace
    Input:
        x: input data (1D numpy array)
        origin: (default, 0) value to fill above or below (float)
        posFill: (default, black) color to fill positive wiggles with (string
            or None)
        negFill: (default, None) color to fill negative wiggles with (string
            or None)
        lineColor: (default, black) color of wiggle trace (string or None)
        resampleRatio: (default, 10) factor to resample traces by before
            plotting (1 = raw data) (float)
        rescale: (default, False) If True, rescale "x" to be between -1 and 1
        ymin: (default, 0) The minimum y to use for plotting
        ymax: (default, len(x)) The maximum y to use for plotting
        ax: (default, current axis) The matplotlib axis to plot onto
    Output:
        a matplotlib plot on the current axes
    """
    from matplotlib import pyplot as plt
    from scipy.signal import cspline1d, cspline1d_eval

    if ymax is None:
        ymax = x.size

    # Rescale so that x ranges from -1 to 1
    if rescale:
        x = x.astype(np.float)
        x -= x.min()
        x /= x.ptp()
        x *= 2
        x -= 1

    # Interpolate at resampleRatio x the previous density
    y = np.linspace(0, x.size, x.size)
    interp_y = np.linspace(0, x.size, x.size * resampleRatio)
    cj = cspline1d(x)
    interpX = cspline1d_eval(cj,interp_y) #,dx=1,x0=0
    newy = np.linspace(ymax, ymin, interp_y.size)
    if origin == None:
        origin = interpX.mean()

    # Plot
    if ax is None:
        ax = plt.gca()
        plt.hold(True)
    if posFill is not None:
        ax.fill_betweenx(newy, interpX, origin,
                where=interpX > origin,
                facecolor=posFill)
    if negFill is not None:
        ax.fill_betweenx(newy, interpX, origin,
                where=interpX < origin,
                facecolor=negFill)
    if lineColor is not None:
        ax.plot(interpX, newy, color=lineColor)
示例#11
0
    def test_basic(self):
        y = array([1, 2, 3, 4, 3, 2, 1, 2, 3.0])
        x = arange(len(y))
        dx = x[1] - x[0]
        cj = signal.cspline1d(y)

        x2 = arange(len(y) * 10.0) / 10.0
        y2 = signal.cspline1d_eval(cj, x2, dx=dx, x0=x[0])

        # make sure interpolated values are on knot points
        assert_array_almost_equal(y2[::10], y, decimal=5)
示例#12
0
    def test_basic(self):
        y=array([1,2,3,4,3,2,1,2,3.0])
        x=arange(len(y))
        dx=x[1]-x[0]
        cj = signal.cspline1d(y)

        x2=arange(len(y)*10.0)/10.0
        y2=signal.cspline1d_eval(cj, x2, dx=dx,x0=x[0])

        # make sure interpolated values are on knot points
        assert_array_almost_equal(y2[::10], y, decimal=5)
示例#13
0
文件: pt.py 项目: jizhi/project_TL
    def run(self):
        """
        create an inifile from the parameters and run camb on it
        and store the results in k,pk
        """
        self.printIniFile()
        os.system(self.cambPath + "/camb " + self.iniName)
        self.k, self.pk = utils.readColumns(self.cp.output_root + "_matterpower.dat")

        self.logk, self.logpk = M.log(self.k), M.log(self.pk)
        self.pkSplineCoeff = SS.cspline1d(self.logpk)
示例#14
0
文件: spidr.py 项目: abalckin/tesla
 def run(self):
     _, fileExtension = os.path.splitext(self.fileName)
     if fileExtension == '.gmv':
         print('Geomagnetic variation')
         with open(self.fileName, 'rt') as csvdata:
             date = []
             value = []
             for row in csv.reader(csvdata):
                 if ('#' in row[0]):
                     self.header.append(row)
                 else:
                     date.append(row[0])
                     value.append(row[1])
         self.notifyProgress.emit(20)
     elif fileExtension == '.ske':
         print('Kp estimation')
         with open(self.fileName, 'rt') as csvdata:
             date = []
             value = []
             for row in csv.reader(csvdata, delimiter=' '):
                 if ('#' in row[0]):
                     self.header.append(row)
                 else:
                     print(row)
                     if int(row[7]) < 2:
                         date.append(
                             dt.datetime.strptime(
                                 ''.join((row[0], row[1], row[2], row[4])),
                                 '%Y%m%d%H%M')),
                         value.append(float(row[-1]) -
                                      float(row[-14]))  # 4h
                         # value.append(float(row[-1])-float(row[19]))  # 1h
         self.notifyProgress.emit(20)
     signal_src = np.array((date, value), dtype=np.dtype('a25'))
     signal = signal_src[:,
                         np.logical_not(
                             np.isnan(signal_src[1, :].astype(np.float)))]
     self.notifyProgress.emit(60)
     if self.interpolate:
         self.time = signal_src[0, :].astype(np.datetime64).astype(
             dt.datetime)
         dx = dates.date2num(self.time[1]) - dates.date2num(self.time[0])
         cj = cspline1d(signal[1, :].astype(float))
         self.value = cspline1d_eval(cj,
                                     dates.date2num(self.time),
                                     dx=dx,
                                     x0=dates.date2num(self.time[0]))
     else:
         self.time = dates.signal[0, :].astype(np.datetime64).astype(
             dt.datetime)
         self.value = signal[1, :].astype(np.float)
     self.notifyProgress.emit(80)
     self.loaded.emit()
示例#15
0
文件: pt.py 项目: JohanComparat/pyLPT
    def runEHuShiftBAO(self, sig8, shiftfact=1.):
        #Output EHu file
        f = file('ehu.in','w')

        f.write((str(self.cp.omega_baryon + self.cp.omega_cdm))+', '+str(self.cp.omega_lambda)+', '+\
                str(self.cp.omega_neutrino)+', '+str(self.cp.omega_baryon)+'\n')
        f.write(str(self.cp.hubble/100.)+', '+str(self.cp.temp_cmb)+', '+str(self.cp.massless_neutrinos)+'\n')
        f.write(str(self.cp.transfer_redshift[0])+'\n')
        f.write(str(self.cp.transfer_kmax)+', '+str(self.cp.transfer_k_per_logint)+'\n')
        f.write('1\n')
        tilt = self.cp.scalar_spectral_index[0]
        f.write(str(tilt)+'\n')
        f.write('0\n')

        f.close()

        # run EHu code
        os.system('~/ehu/power < ehu.in')

        # read into c.k, c.pk
        eh = N.loadtxt('trans.dat')
        self.k = eh[:,0]*1.
        self.logk = M.log(self.k)

        if (getnowig):
            self.trans = eh[:,3]*1.
        else:
            self.trans = eh[:,1]*1.

        #M.loglog(self.k,self.trans)
        #M.show()
        if tilt == 1.:
            delH =  1.94e-5*(self.cp.omega_cdm + self.cp.omega_baryon)**(-0.785)
            delta = delH**2*(3000.0*self.k/(self.cp.hubble/100.))**4*self.trans**2
        else:
            delH =  1.94e-5*self.cp.omega_cdm**(-0.785 - 0.05*M.log(tilt))\
                   * M.exp(-0.95*(tilt - 1.) - 0.169*(tilt - 1)**2)
            delta = delH**2*(3000.0*self.k/(self.cp.hubble/100.))**(3 + tilt)*self.trans**2

        # Just an approximate normalization; really need sig8.
            
        self.pk = (2.*M.pi**2 * delta/self.k**3)*(self.cp.hubble/100.)**3

        if self.cp.transfer_redshift[0] > 0.:
            ps = PowerSpectrum(self.cp)
            sig8use = sig8*ps.d1(self.cp.transfer_redshift[0])/ps.d1(0.)
        else:
            sig8use = sig8
        normalizePk(self,sig8use) # sets c.logpk, too
        self.pkSplineCoeff = SS.cspline1d(self.logpk)

        return
示例#16
0
def doresample(orig_x, orig_y, new_x, method='cubic', padlen=0, antialias=False):
    """
    Resample data from one spacing to another.  By default, does not apply any antialiasing filter.

    Parameters
    ----------
    orig_x
    orig_y
    new_x
    method
    padlen

    Returns
    -------

    """
    pad_y = tide_filt.padvec(orig_y, padlen=padlen)
    tstep = orig_x[1] - orig_x[0]
    if padlen > 0:
        pad_x = np.concatenate((np.arange(orig_x[0] - padlen * tstep, orig_x[0], tstep),
                                orig_x,
                                np.arange(orig_x[-1] + tstep, orig_x[-1] + tstep * (padlen + 1), tstep)))
    else:
        pad_x = orig_x
    if padlen > 0:
        print('padlen=', padlen)
        print('tstep=', tstep)
        print(pad_x)

    # antialias and ringstop filter
    init_freq = len(pad_x) / (pad_x[-1] - pad_x[0])
    final_freq = len(new_x) / (new_x[-1] - new_x[0])
    if antialias and (init_freq > final_freq):
        aafilterfreq = final_freq / 2.0
        aafilter = tide_filt.noncausalfilter(filtertype='arb', usebutterworth=False)
        aafilter.setarb(0.0, 0.0, 0.95 * aafilterfreq, aafilterfreq)
        pad_y = aafilter.apply(init_freq, pad_y)

    if method == 'cubic':
        cj = signal.cspline1d(pad_y)
        return tide_filt.unpadvec(
            np.float64(signal.cspline1d_eval(cj, new_x, dx=(orig_x[1] - orig_x[0]), x0=orig_x[0])), padlen=padlen)
    elif method == 'quadratic':
        qj = signal.qspline1d(pad_y)
        return tide_filt.unpadvec(
            np.float64(signal.qspline1d_eval(qj, new_x, dx=(orig_x[1] - orig_x[0]), x0=orig_x[0])), padlen=padlen)
    elif method == 'univariate':
        interpolator = sp.interpolate.UnivariateSpline(pad_x, pad_y, k=3, s=0)  # s=0 interpolates
        return tide_filt.unpadvec(np.float64(interpolator(new_x)), padlen=padlen)
    else:
        print('invalid interpolation method')
        return None
示例#17
0
文件: pt.py 项目: JohanComparat/pyLPT
    def pkInterp(self, knew, calcsplinecoeff = False):
        """
        P(knew) interpolated in log space for arbitrary k.
        Call with calcsplinecoeff = True if the power spectrum (self.pk) might have changed recently.
        """
        if (calcsplinecoeff):
            self.pkSplineCoeff = SS.cspline1d(self.logpk)

        w0 = N.where(knew > 0.)[0]
        newy = 0.*knew
        # will handle the case k = 0
        newy[w0] = M.exp(utils.splineIntLinExt(self.logpk, self.logk, M.log(knew[w0]),splinecoeff = self.pkSplineCoeff))
        return newy
示例#18
0
文件: spidr.py 项目: abalckin/tesla
 def run(self):
     _, fileExtension = os.path.splitext(self.fileName)
     if fileExtension == '.gmv':
         print('Geomagnetic variation')
         with open(self.fileName, 'rt') as csvdata:
             date = []
             value = []
             for row in csv.reader(csvdata):
                 if ('#' in row[0]):
                     self.header.append(row)
                 else:
                     date.append(row[0])
                     value.append(row[1])
         self.notifyProgress.emit(20)
     elif fileExtension == '.ske':
         print('Kp estimation')
         with open(self.fileName, 'rt') as csvdata:
             date = []
             value = []
             for row in csv.reader(csvdata, delimiter=' '):
                 if ('#' in row[0]):
                     self.header.append(row)
                 else:
                     print(row)
                     if int(row[7]) < 2:
                         date.append(
                             dt.datetime.strptime(
                                 ''.join((row[0], row[1], row[2],
                                         row[4])),
                                 '%Y%m%d%H%M')),
                         value.append(float(row[-1])-float(row[-14]))  # 4h
                         # value.append(float(row[-1])-float(row[19]))  # 1h
         self.notifyProgress.emit(20)
     signal_src = np.array((date, value), dtype=np.dtype('a25'))
     signal = signal_src[:, np.logical_not(
         np.isnan(signal_src[1, :].astype(np.float)))]
     self.notifyProgress.emit(60)
     if self.interpolate:
         self.time = signal_src[0, :].astype(np.datetime64).astype(
             dt.datetime)
         dx = dates.date2num(self.time[1])-dates.date2num(self.time[0])
         cj = cspline1d(signal[1, :].astype(float))
         self.value = cspline1d_eval(cj, dates.date2num(self.time),
                                     dx=dx,
                                     x0=dates.date2num(self.time[0]))
     else:
         self.time = dates.signal[0, :].astype(np.datetime64).astype(
             dt.datetime)
         self.value = signal[1, :].astype(np.float)
     self.notifyProgress.emit(80)
     self.loaded.emit()
示例#19
0
def interpol(x1, y1, x_out, plot=False):
    from scipy.signal import cspline1d, cspline1d_eval
    #assumes that data points are evenly spaced

    dx = x1[1] - x1[0]
    cj = cspline1d(y1)
    y_out = cspline1d_eval(cj, x_out, dx=dx, x0=x1[0])
    if plot:
        from pylab import plot, show, legend
        plot(x_out, y_out, 'ob', x1, y1, 'xg', ms=2)
        legend(['interpolated', 'original'])
        show()

    return y_out
示例#20
0
def interp_around(X_sc,s_fracpeak,s_before,s_after,kind='cubic'):
    n_c = X_sc.shape[1]
    n_s = s_before+s_after
    Out_sc = np.empty((n_s,n_c),dtype=np.float32)
    for i_c in xrange(n_c):
        if kind == 'cubic':
            coeffs = cspline1d(X_sc[:,i_c])
            Out_sc[:,i_c] = cspline1d_eval(coeffs,
                                       newx=np.arange(s_fracpeak - s_before,s_fracpeak+s_after,dtype=np.float32))
        elif kind == "linear":
            Out_sc[:,i_c] = interp1d(np.arange(X_sc.shape[0]),X_sc[:,i_c],
                                     bounds_error=True,kind=kind)(np.arange(s_fracpeak - s_before,s_fracpeak+s_after,dtype=np.float32))
        else: raise Exception("kind must be 'linear' or 'cubic'")
    return Out_sc
示例#21
0
def interpol(x1,y1,x_out,plot=False):
	from scipy.signal import cspline1d, cspline1d_eval
	#assumes that data points are evenly spaced

	dx=x1[1]-x1[0]
	cj=cspline1d(y1)
	y_out=cspline1d_eval(cj,x_out,dx=dx,x0=x1[0])
	if plot:
		from pylab import plot, show,legend
		plot(x_out,y_out,'ob',x1,y1,'xg',ms=2)
		legend(['interpolated','original'])
		show()

	return y_out
示例#22
0
文件: utils.py 项目: excelly/xpy-ml
def SplineResample(y, n_x, x_range = None, smoother = 3):
    ''' Resample a signal y using spline.

    y: the signal must be sampled on uniform x
    n_x: number of points for the new signal
    x_range: tuple (start x, end x) of the signal
    smoother: spline smoothing strength
    '''

    if x_range is None: x_range = (0., 1.)
    spcoeffs = cspline1d(y, lamb = smoother)
    return cspline1d_eval(
        spcoeffs, linspace(x_range[0], x_range[1], n_x), 
        dx = (x_range[1]-x_range[0])/(len(y)-1.0), x0 = x_range[0])
示例#23
0
文件: utils.py 项目: excelly/xpy-ml
def SplineResample(y, n_x, x_range=None, smoother=3):
    ''' Resample a signal y using spline.

    y: the signal must be sampled on uniform x
    n_x: number of points for the new signal
    x_range: tuple (start x, end x) of the signal
    smoother: spline smoothing strength
    '''

    if x_range is None: x_range = (0., 1.)
    spcoeffs = cspline1d(y, lamb=smoother)
    return cspline1d_eval(spcoeffs,
                          linspace(x_range[0], x_range[1], n_x),
                          dx=(x_range[1] - x_range[0]) / (len(y) - 1.0),
                          x0=x_range[0])
def evalBeams(arrBeams,guessShearLoad,spc,debugFlag = False):
    
    lastBeam = arrBeams[-1]
    arrStrainEnergy = np.zeros(np.size(arrBeams))
    arrSurfEnergy = np.zeros(np.size(arrBeams))
    lastIndex = np.size(arrBeams)
    maxSurfEnergy = -gamma * lastBeam.w*(lastBeam.Lt - lastBeam.L)
    arrResults = np.array([])
    
    for j,beam in enumerate(arrBeams):
        if debugFlag:
            print('Beam Length: %f of %f'%(beam.L*scale,beam.Lt*scale))
        
        dispSearch(beam=beam,initLoad = guessShearLoad,goal=spc/2/scale,tol=1e-7,right=0,left=0)
        guessShearLoad = beam.shearLoad
        
        if debugFlag:
            print('Solved Beam -- TipDisp: %s Goal: %s Force: %s' % (beam.yTipDisplacement()*scale,spc/2,beam.shearLoad))
        
        arrStrainEnergy[j] = beam.calculateStrainEnergy()
        arrSurfEnergy[j] = -gamma * beam.w *(beam.Lt - beam.L)
        if arrStrainEnergy[j] >= np.abs(maxSurfEnergy): 
            # since there is more bending energy than surface energy stop computing 
            print('Super stiff beam')
            lastIndex = j
            break
    
    if lastIndex > 0:   # This ensures that we have more than one data point before trying to interpolate
        interpLens = np.linspace(arrBeamLens[0],arrBeamLens[lastIndex-1],num=100,endpoint=True) # Generate x values for which to interpolate
        csFit = cspline1d((arrStrainEnergy[0:lastIndex]+arrSurfEnergy[0:lastIndex]))    # Generate cubic spline fit to the sub dataset
        interpTotalEnergy = cspline1d_eval(csFit,interpLens,dx=(arrBeamLens[1]-arrBeamLens[0]), x0 = arrBeamLens[0])    # Generate the interpolated values from the fit and x points
        finalLen = interpLens[interpTotalEnergy.argmin()]   # find the minimum of the energy balance and grab index to choose the appropriate length
        
        if debugFlag:
            print('beamLens shape: %s arrStrain: %s'%(arrBeamLens[0:lastIndex].shape,arrStrainEnergy[0:lastIndex].shape))
            mpl.figure()
            mpl.hold(True)
            mpl.plot(arrBeamLens[0:lastIndex]*scale,arrStrainEnergy[0:lastIndex]*scale)
            mpl.plot(arrBeamLens[0:lastIndex]*scale,arrSurfEnergy[0:lastIndex]*scale)
            mpl.plot(interpLens*scale,interpTotalEnergy*scale,arrBeamLens[0:lastIndex]*scale,(arrStrainEnergy+arrSurfEnergy)[0:lastIndex]*scale,'o')
        arrResults = np.array([arrBeamLens[0:lastIndex],arrStrainEnergy[0:lastIndex]])
    else:   # since there is only one datapoint then use that as the value
        finalLen = arrBeamLens[lastIndex]
        arrResults = np.array([arrBeamLens[lastIndex],arrStrainEnergy[lastIndex]])
    
    
    
    return (finalLen,arrResults)
示例#25
0
文件: pt.py 项目: jizhi/project_TL
def normalizePk(c, sig8):
    """
    renormalize the power spectrum to the given sigma_8

    Note: this doesn't take into account the redshift; it just blindly sets c.pk to have
    sigma_8 = sig8.
    """
    sig2now = sigma2fromPk(c, 8.0)
    # print 'sig2now=',sig2now
    c.pk *= sig8 ** 2 / sig2now
    c.logpk = M.log(c.pk)

    c.cp.scalar_amp[0] = c.cp.scalar_amp[0] * sig8 ** 2 / sig2now[0]  # inelegant tuple change

    # for scipy splines
    c.pkSplineCoeff = SS.cspline1d(c.logpk)
    return sig2now
示例#26
0
def peakdetect_spline(y_axis, x_axis, pad_len=20):
    """
    Performs a b-spline interpolation on the data to increase resolution and
    send the data to the 'peakdetect_zero_crossing' function for peak
    detection.

    Omitting the x_axis is forbidden as it would make the resulting x_axis
    value silly if it was returned as the index 50.234 or similar.

    will find the same amount of peaks as the 'peakdetect_zero_crossing'
    function, but might result in a more precise value of the peak.

    keyword arguments:
    y_axis -- A list containing the signal over which to find peaks

    x_axis -- A x-axis whose values correspond to the y_axis list and is used
        in the return to specify the position of the peaks.
        x-axis must be equally spaced.

    pad_len -- By how many times the time resolution should be increased by,
        e.g. 1 doubles the resolution.
        (default: 20)


    return: two lists [max_peaks, min_peaks] containing the positive and
        negative peaks respectively. Each cell of the lists contains a tuple
        of: (position, peak_value)
        to get the average peak value do: np.mean(max_peaks, 0)[1] on the
        results to unpack one of the lists into x, y coordinates do:
        x, y = zip(*max_peaks)
    """
    # check input data
    x_axis, y_axis = _datacheck_peakdetect(x_axis, y_axis)
    # could perform a check if x_axis is equally spaced
    #if np.std(np.diff(x_axis)) > 1e-15: raise ValueError
    # perform spline interpolations
    dx = x_axis[1] - x_axis[0]
    x_interpolated = np.linspace(x_axis.min(), x_axis.max(),
                                 len(x_axis) * (pad_len + 1))
    cj = cspline1d(y_axis)
    y_interpolated = cspline1d_eval(cj, x_interpolated, dx=dx, x0=x_axis[0])
    # get peaks
    max_peaks, min_peaks = peakdetect_zero_crossing(y_interpolated,
                                                    x_interpolated)

    return [max_peaks, min_peaks]
示例#27
0
文件: pt.py 项目: jizhi/project_TL
    def pkInterp(self, knew, calcsplinecoeff=False):
        """
        P(knew) interpolated in log space for arbitrary k.
        Call with calcsplinecoeff = True if the power spectrum (self.pk) might have changed recently.
        """
        if calcsplinecoeff:
            self.pkSplineCoeff = SS.cspline1d(self.logpk)

        # newy = M.exp(utils.splineIntLinExt(self.logpk, self.logk, M.log(knew),splinecoeff = self.pkSplineCoeff))
        w0 = N.where(knew <= 0.0)
        wn0 = N.where(knew > 0.0)
        newy = 0.0 * knew
        newy[wn0] = M.exp(
            utils.splineIntLinExt(self.logpk, self.logk, M.log(knew[wn0]), splinecoeff=self.pkSplineCoeff)
        )
        # do 0
        newy[w0] = 0.0
        return newy
def sr_interpol2(x,y,ytarget,doplot=0,factor=10):
    dx = x[1]-x[0]
    newx = linspace(min(x),max(x),factor*len(x))

    cj = cspline1d(y)
    newy = cspline1d_eval(cj, newx, dx=dx,x0=x[0])

    ysq = (ytarget-newy)**2
    index = where(ysq == min(ysq))

    if doplot:
        clf()
        plot(x,y,'o')
        plot(newx,newy)
        plot(newx[index],newy[index],'o')
        show()

    return newx[index[0][0]]
示例#29
0
    def fitSpline(self, degree=2):
        """
        **SUMMARY**

        A function to generate a spline curve fitting over the points in LineScan with
        order of precision given by the parameter degree

        **PARAMETERS**

        * *degree* - the precision of the generated spline 

        **RETURNS**

        The spline as a LineScan fitting over the initial values of LineScan

        **EXAMPLE**

        >>> import matplotlib.pyplot as plt
        >>> img = Image("lenna")
        >>> ls = img.getLineScan(pt1=(10,10)),pt2=(20,20)).normalize()
        >>> spline = ls.fitSpline()
        >>> plt.plot(ls)
        >>> plt.show()
        >>> plt.plot(spline)
        >>> plt.show()
        
        **NOTES**

        Implementation taken from http://www.scipy.org/Cookbook/Interpolation  

        """
        if degree > 4:
            degree = 4  # No significant improvement with respect to time usage
        if degree < 1:
            warnings.warn('LineScan.fitSpline - degree needs to be >= 1')
            return None
        retVal = None
        y = np.array(self)
        x = np.arange(0, len(y), 1)
        dx = 1
        newx = np.arange(0, len(y) - 1, pow(0.1, degree))
        cj = sps.cspline1d(y)
        retVal = sps.cspline1d_eval(cj, newx, dx=dx, x0=x[0])
        return retVal
示例#30
0
    def fitSpline(self,degree=2):
        """
        **SUMMARY**

        A function to generate a spline curve fitting over the points in LineScan with
        order of precision given by the parameter degree

        **PARAMETERS**

        * *degree* - the precision of the generated spline 

        **RETURNS**

        The spline as a LineScan fitting over the initial values of LineScan

        **EXAMPLE**

        >>> import matplotlib.pyplot as plt
        >>> img = Image("lenna")
        >>> ls = img.getLineScan(pt1=(10,10)),pt2=(20,20)).normalize()
        >>> spline = ls.fitSpline()
        >>> plt.plot(ls)
        >>> plt.show()
        >>> plt.plot(spline)
        >>> plt.show()
        
        **NOTES**

        Implementation taken from http://www.scipy.org/Cookbook/Interpolation  

        """
        if degree > 4:
            degree = 4  # No significant improvement with respect to time usage
        if degree < 1:
            warnings.warn('LineScan.fitSpline - degree needs to be >= 1')
            return None
        retVal = None
        y = np.array(self)
        x = np.arange(0,len(y),1)
        dx = 1
        newx = np.arange(0,len(y)-1,pow(0.1,degree))
        cj = sps.cspline1d(y)
        retVal = sps.cspline1d_eval(cj,newx,dx=dx,x0=x[0])
        return retVal
示例#31
0
def peakdetect_spline(y_axis, x_axis, pad_len=20):
    """
    Performs a b-spline interpolation on the data to increase resolution and
    send the data to the 'peakdetect_zero_crossing' function for peak 
    detection.
    
    Omitting the x_axis is forbidden as it would make the resulting x_axis
    value silly if it was returned as the index 50.234 or similar.
    
    will find the same amount of peaks as the 'peakdetect_zero_crossing'
    function, but might result in a more precise value of the peak.
    
    keyword arguments:
    y_axis -- A list containing the signal over which to find peaks
    
    x_axis -- A x-axis whose values correspond to the y_axis list and is used
        in the return to specify the position of the peaks. 
        x-axis must be equally spaced.
    
    pad_len -- By how many times the time resolution should be increased by,
        e.g. 1 doubles the resolution.
        (default: 20)
    
    
    return: two lists [max_peaks, min_peaks] containing the positive and
        negative peaks respectively. Each cell of the lists contains a tuple
        of: (position, peak_value) 
        to get the average peak value do: np.mean(max_peaks, 0)[1] on the
        results to unpack one of the lists into x, y coordinates do: 
        x, y = zip(*max_peaks)
    """
    # check input data
    x_axis, y_axis = _datacheck_peakdetect(x_axis, y_axis)
    # could perform a check if x_axis is equally spaced
    #if np.std(np.diff(x_axis)) > 1e-15: raise ValueError
    # perform spline interpolations
    dx = x_axis[1] - x_axis[0]
    x_interpolated = np.linspace(x_axis.min(), x_axis.max(), len(x_axis) * (pad_len + 1))
    cj = cspline1d(y_axis)
    y_interpolated = cspline1d_eval(cj, x_interpolated, dx=dx,x0=x_axis[0])
    # get peaks
    max_peaks, min_peaks = peakdetect_zero_crossing(y_interpolated, x_interpolated)
    
    return [max_peaks, min_peaks]
示例#32
0
def _spline_interpolate(oldx, oldy, newx, smoothing=0.001,fast=True, **kw):
    """
    cubic splines for axis alignment using
    scipy.signal and/or scipy.interpolate

    newy = _spline_interpolate(oldx, oldy, newx, fast=True)
    if fast = True
       1-dimensional cubic spline for cases where
       oldx and newx are on a uniform grid.
    else
       handles multi-dimensional data, non-uniform x-grids, but is
       much slower for 1d cubic splines
    """
    from scipy.interpolate import splrep, splev
    from scipy.signal import cspline1d, cspline1d_eval
    if fast:
        return cspline1d_eval(cspline1d(oldy), newx, dx=oldx[1]-oldx[0],x0=oldx[0])
    else:
        rep = splrep(oldx,oldy,s=smoothing,full_output=False,**kw)
        return splev(newx, rep)
示例#33
0
    def wiggle(self, values):
        """
        Plot a trace in VAWT(Variable Area Wiggle Trace)
        """
        if self.zmax is None:
            self.zmax = values.size

        # Rescale so that values ranges from -1 to 1
        if self.rescale:
            values = values.astype(np.float)
            values -= values.min()
            values /= values.ptp()
            values *= 2
            values -= 1

        # Interpolate at resampleRatio x the previous density
        resample_z = np.linspace(0, values.size,
                                 values.size * self.resampleRatio)
        # cubic spline interpolation
        cj = cspline1d(values)
        resample_v = cspline1d_eval(cj, resample_z)
        print(resample_v)
        newz = resample_z
        if self.origin is None:
            self.origin = resample_v.mean()

        # Plot
        if self.posFill is not None:
            self.ax.fill_betweenx(newz,
                                  resample_v,
                                  self.origin,
                                  where=resample_v > self.origin,
                                  facecolor=self.posFill)
        if self.negFill is not None:
            self.ax.fill_betweenx(newz,
                                  resample_v,
                                  self.origin,
                                  where=resample_v < self.origin,
                                  facecolor=self.negFill)
        if self.lineColor is not None:
            self.ax.plot(resample_v, newz, color=self.lineColor, linewidth=.1)
示例#34
0
文件: pt.py 项目: jizhi/project_TL
    def kextend(self, mini, maxi, logkoverk3extrap=0, calcsplinecoeff=False):
        """
        Extend range of pk over which k is known, from 10^mini to 10^maxi.
        Keeps c.k the same in middle, but extrapolates on both sides.

        The logk/k**3 fit (used if last argument = 1) only works reliably if the highest k is around 100
        """

        log10interval = M.log10(self.k[-1] / self.k[0]) / (len(self.k) - 1.0)
        self.numtomini = M.floor((M.log10(self.k[0]) - mini) / log10interval)
        if self.numtomini < 0:
            self.numtomini = 0
        realmini = M.log10(self.k[0]) - log10interval * self.numtomini

        kx = 10.0 ** M.arange(realmini, maxi + log10interval, log10interval)
        px = self.pkInterp(kx, calcsplinecoeff=calcsplinecoeff)
        self.k = kx
        self.logk = M.log(kx)
        self.pk = px

        kmax = self.k[-1]

        # fit logk/k**3 at high end; this is presumably more accurate, but may crash
        if logkoverk3extrap == 1:
            p0 = (self.pk[-1] * self.k[-1] ** 3 - self.pk[-2] * self.k[-2] ** 3) / (
                M.log(self.k[-1]) - M.log(self.k[-2])
            )

            const = (
                self.pk[-1] * self.k[-1] ** 3 * M.log(self.k[-2]) - self.pk[-2] * self.k[-2] ** 3 * M.log(self.k[-1])
            ) / (self.pk[-1] * self.k[-1] ** 3 - self.pk[-2] * self.k[-2] ** 3)

            if (p0 > 0.0) * (maxi > kmax):  # if it worked okayly
                w = M.where(self.k > kmax)[0][0]
                self.pk[w:] = p0 * (self.logk[w:] - const) / self.k[w:] ** 3

        self.logpk = M.log(px)
        self.pkSplineCoeff = SS.cspline1d(self.logpk)
示例#35
0
def interp_around(X_sc, s_fracpeak, s_before, s_after, kind='cubic'):
    n_c = X_sc.shape[1]
    n_s = s_before + s_after
    Out_sc = np.empty((n_s, n_c), dtype=np.float32)
    for i_c in xrange(n_c):
        if kind == 'cubic':
            coeffs = cspline1d(X_sc[:, i_c])
            Out_sc[:,
                   i_c] = cspline1d_eval(coeffs,
                                         newx=np.arange(s_fracpeak - s_before,
                                                        s_fracpeak + s_after,
                                                        dtype=np.float32))
        elif kind == "linear":
            Out_sc[:,
                   i_c] = interp1d(np.arange(X_sc.shape[0]),
                                   X_sc[:, i_c],
                                   bounds_error=True,
                                   kind=kind)(np.arange(s_fracpeak - s_before,
                                                        s_fracpeak + s_after,
                                                        dtype=np.float32))
        else:
            raise Exception("kind must be 'linear' or 'cubic'")
    return Out_sc
def sr_interpol3(x,y,ytarget,doplot=0,factor=10):
    s = numpy.sign(numpy.diff(y)[0])
    if s==1:
        y[numpy.argmax(y)+1:] = 2*abs(max(y))
    else:
        y[numpy.argmin(y)+1:] = -2*abs(max(y))
    dx = x[1]-x[0]
    newx = linspace(min(x),max(x),factor*len(x))

    cj = cspline1d(y)
    newy = cspline1d_eval(cj, newx, dx=dx,x0=x[0])

    ysq = (ytarget-newy)**2
    index = where(ysq == min(ysq))

    if doplot:
        clf()
        plot(x,y,'o')
        plot(newx,newy)
        plot(newx[index],newy[index],'o')
        show()

    return newx[index[0][0]]
示例#37
0
    def fit_spline(self, degree=2):
        """
        Generates a spline _curve fitting over the points in LineScan with
        order of precision given by the parameter degree.
        :param degree: the precision of the generated spline.
        :return: the spline as a LineScan fitting over the initial values of
                  LineScan
        Notes:
        Implementation taken from http://www.scipy.org/Cookbook/Interpolation
        """
        if degree > 4:
            degree = 4  # No significant improvement with respect to time usage
        if degree < 1:
            warnings.warn("LineScan.fit_spline - degree needs to be >= 1.")
            return None

        y = np.array(self)
        x = np.arange(0, len(y), 1)
        dx = 1
        newx = np.arange(0, len(y) - 1, pow(0.1, degree))
        cj = signal.cspline1d(y)
        ret = signal.cspline1d_eval(cj, newx, dx=dx, x0=x[0])

        return ret
示例#38
0
    def wiggle(self, values):
        """
        Plot a trace in VAWT(Variable Area Wiggle Trace)
        """
        if self.zmax is None:
            self.zmax = values.size

        # Rescale so that values ranges from -1 to 1
        if self.rescale:
            values = values.astype(np.float)
            values -= values.min()
            values /= values.ptp()
            values *= 2
            values -= 1

        # Interpolate at resampleRatio x the previous density
        resample_z = np.linspace(0, values.size, values.size * self.resampleRatio)
        # cubic spline interpolation
        cj = cspline1d(values)
        resample_v = cspline1d_eval(cj, resample_z)
        print(resample_v)
        newz = resample_z
        if self.origin is None:
            self.origin = resample_v.mean()

        # Plot
        if self.posFill is not None:
            self.ax.fill_betweenx(newz, resample_v, self.origin,
                                  where=resample_v > self.origin,
                                  facecolor=self.posFill)
        if self.negFill is not None:
            self.ax.fill_betweenx(newz, resample_v, self.origin,
                                  where=resample_v < self.origin,
                                  facecolor=self.negFill)
        if self.lineColor is not None:
            self.ax.plot(resample_v, newz, color=self.lineColor, linewidth=.1)
示例#39
0
文件: olpchi.py 项目: akoufos/matcalc
def compute(chifile, Tfil, pressure, lamda):
    #####################
    #Get lamda and T from the temperature file
    try:
        Tfil = float(Tfil)
    except ValueError:
        fndT = False
        tlist = open(Tfil, "r")
        for line in tlist:
            if line[0] == '#':
                continue
            line = line.rstrip()
            vals = line.split(None)
            if str(vals[0]) == chifile:
                Tfil = int(vals[1])
                lamda = float(vals[2])
                fndT = True
                break
        tlist.close()
        if fndT == False:
            print "Error: Unable to find chi file " + chifile + " in temperature file " + Tfil
            print "Please manually enter the temperature or enter the temperature into the temperature list file " + Tfil
            exit(0)
    else:
        if lamda == -1.0:
            parser.print_help()
            print "\nError: When temperature is given manually, must use -l arguement to set lambda.\n"
            exit()
        if Tfil != 0 and Tfil != 300 and (Tfil < 500 or Tfil > 3000):
            parser.print_help()
            print "\nError: Need a valid sample temperature value for analysis"
            exit()

    #####################
    #Read in the chi file
    f = open(chifile, 'r')
    i = 0
    for line in f:
        if i < 3:
            i += 1
            continue
        line = line.lstrip().rstrip()
        if i == 3:
            i += 1
            length = int(line)
            break
    xxs = zeros(length)
    yys = zeros(length)
    i = 0
    for line in f:
        line = line.lstrip().rstrip()
        [xxs[i], NULL, yys[i]] = line.split(' ')
        i += 1

    #####################
    #Load in the MgO data
    mgo_a0 = 4.213
    (ARs, Ts, PPs) = loadMgO('mgo_eos.dat')
    As = [i * mgo_a0 for i in ARs]
    if Tfil == 0.0:
        Ps = PPs[:, 0]
    elif Tfil == 300.0:
        As.insert(0, mgo_a0)
        Ps = PPs[:, 1].tolist()
        Ps.insert(0, 1.0)
    else:
        Ps = interp_T(Tfil, Ts[2:], PPs[:, 2:])

    #Get the MgO lattice constant for the desired pressure
    mgo_s = UnivariateSpline(Ps, As)
    mgo_sps = linspace(min(Ps), max(Ps), len(Ps))
    mgo_sas = mgo_s(mgo_sps)
    coefs = cspline1d(mgo_sas)
    latconst = cspline1d_eval(coefs, [pressure],
                              dx=mgo_sps[1] - mgo_sps[0],
                              x0=mgo_sps[0])

    #Get the intensity factors
    mgo_ks = [3, 4, 8, 11, 12]  #Valid K vectors
    mgo_mp = [8.0, 6.0, 12.0, 24.0, 8.0]  #Multiplicity
    mgo_sf = [1.0, 3.0, 3.0, 1.0, 3.0]  #Structure Factor
    mgo_Im = [(mgo_sf[i] * mgo_sf[i]) / mgo_mp[i]
              for i in range(len(mgo_sf))]  #Intensity multiplicand

    #Find the MgO peaks at this pressure
    mgo_2th = list()
    mgo_I = list()
    for (i, K) in enumerate(mgo_ks):
        print lamda, K, latconst
        theta = asin(lamda * sqrt(K) * 0.5 / latconst)
        mgo_2th.append(degrees(theta) * 2)
        mgo_I.append(mgo_Im[i] * (1 + cos(theta * 2) * cos(theta * 2)) /
                     (sin(theta) * sin(theta) * cos(theta)))
    mxI = max(mgo_I)
    mxA = sum(xxs) / len(xxs)
    mgo_I = [(i / mxI + 1) * mxA * mxA for i in mgo_I]
    pl.figure()
    pl.plot(xxs, yys)
    [pl.plot([x, x], [0, y], color='red') for (x, y) in zip(mgo_2th, mgo_I)]
    [
        pl.text(x, 5, str(z), size='x-small')
        for (x, y, z) in zip(mgo_2th, mgo_I, mgo_ks)
    ]
    pl.title(chifile + ", T=" + str(int(Tfil)) + ", P~" + str(pressure))
    pl.show()
示例#40
0
finalpixel = np.log10(maxwavelength)
deltapix = 1e-4  #10.**(np.min(deredloglambda0))*(10.**1e-4 - 1.)
npix = (finalpixel - initialpixel) / deltapix + 1.
newwave = initialpixel + deltapix * np.arange(npix)

newflux = np.zeros((len(flux), npix + 1))
chisq = np.zeros(len(flux))

#resample spectra at single wavelength spectrum defined above
smoothing_parameter = 3.0
spline_order = 3
number_of_knots = -1

for p in range(len(flux)):
    nonzero = np.where(wavevector[p, :] != 0.)
    fitcoeff = cspline1d(flux[p], lamb=smoothing_parameter)
    newflux[p, :] = cspline1d_eval(fitcoeff,
                                   newwave,
                                   dx=wave1[p],
                                   x0=wavevector[p, 0])
    oldfit = cspline1d_eval(fitcoeff,
                            wavevector[p, nonzero][0],
                            dx=wave1[p],
                            x0=wavevector[p, 0])
    chisq[p] = np.sum(np.sqrt(
        (oldfit - flux[p])**2. * invvar[p])) / np.shape(flux[p])[0]

filename = 'pcaspectra_rest.fits'
pf.writeto(filename, newflux, clobber=True)
pf.append(filename, newwave)
pf.append(filename, z)
示例#41
0
# 
# Example showing how to use B-splines in scipy.signal to do
# interpolation. The input points must be equally spaced to use these
# routine.
# 
# <codecell>


from numpy import r_, sin
from scipy.signal import cspline1d, cspline1d_eval

x = r_[0:10]
dx = x[1]-x[0]
newx = r_[-3:13:0.1]  # notice outside the original domain 
y = sin(x) 
cj = cspline1d(y)
newy = cspline1d_eval(cj, newx, dx=dx,x0=x[0]) 
from pylab import plot, show
plot(newx, newy, x, y, 'o') 
show()

# <markdowncell>

# ![](files/Interpolation_attachments/interpolate_figure1.png
# 
# N-D interpolation for equally-spaced data
# =========================================
# 
# The scipy.ndimage package also contains spline\_filter and
# map\_coordinates which can be used to perform N-dimensional
# interpolation for equally-spaced data. A two-dimensional example is
示例#42
0
def generate(c=None,d=None,dk=None,ng=64,boxsize=128.,sigma8=0.829, sigmaalpt = 10.,scheme='muscle',largescale=None,smallscale=None,exactpk=False, seed = 42389,returnfft = False, dopbc=True, fileroot=None, returnpos=True, returndisp = False, plottest=False, returninitdens=False, justreturnc = False, returnvel=False,deltaz4vel=1./128., hubble=67.77, ombh2 = 0.048252*0.6777**2, omch2 = (0.30712-0.048252)*0.6777**2, redshift = 0.,kmax=30.,omk=0.):
    """ 
    possible inputs:
    c = Camb instance; contains cosmological parameters, power spectrum
    d = configuration-space density field
    dk = FFT of a density field

    parameters:
    sigmaalpt = Gaussian k-space interpolation smoothing scale, as in ALPT
    scheme: can be 'zeld'ovich, '2lpt', 'sc' (single-scale spherical collapse), 'muscle' (multiscale)
    largescale/smallscale: use for scale interpolation
    dopbc: False to preserve distortion of particle lattice, not enforcing periodic boundary conditions
    returnpos, returndisp, returnvel: return position, displacement field at particles, velocities [velocities only work for Zeld, 2LPT currently!]
    plottest: show a slice of the particles
    exactpk: Set each Fourier amplitude exactly to the linear power spectrum, 
             suppressing fluctuations in Fourier amplitudes
    """
    
    if (returnpos & returndisp):
        print 'Choose either position or displacement field to return'
        return
    if returnvel:
        omegam = (ombh2+omch2)/(hubble/100.)**2
        omegal = 1.-omk-omegam
        hubble_z = 100.*N.sqrt(omegam*(1.+redshift)**3 + omk*(1+redshift)**2 + omegal)
        # Valid only for simple dark energy; doesn't allow w != -1

    if ((((c == None) | (returnvel & (smallscale != None))) &
        ((d == None) & (dk == None))) | (justreturnc == True)):
        print 'c == None:', (c == None)
        print 'returnvel = ', returnvel
        c = pt.Camb(hubble=hubble, ombh2 = ombh2, omch2 = omch2,omk=omk,
                    transfer_kmax=kmax,transfer_redshift=[0.])
        if (dk == None):
            c.run()
            sigma82default_z0 = pt.normalizePk(c,sigma8)
            if redshift != 0.:
                c = pt.Camb(hubble=hubble, ombh2 = ombh2, omch2 = omch2,omk=omk,
                            transfer_kmax=kmax,transfer_redshift=[redshift])
                c.run()
                c.pk *= sigma8**2/sigma82default_z0
                c.logpk = N.log(c.pk)
                c.pkSplineCoeff = SS.cspline1d(c.logpk)
        if justreturnc:
            return c

        if (returnvel & (smallscale != None)):
            print 'Numerically computing velocity'
            cplusdeltaz = pt.Camb(hubble=hubble, ombh2 = ombh2, omch2 = omch2, omk=omk,
                                  transfer_kmax=kmax,transfer_redshift=[redshift+deltaz4vel])
            cplusdeltaz.run()

            cplusdeltaz.pk *= sigma8**2/sigma82default_z0
            cplusdeltaz.logpk = N.log(cplusdeltaz.pk)
            growthindeltaz=N.mean(c.logpk-cplusdeltaz.logpk)

            print 'camb:', growthindeltaz
            print 'naive:',2.*N.log(getgrowth(c,z=redshift)/getgrowth(c,z=redshift+deltaz4vel))
        
            noiselevel = N.std(c.logpk-cplusdeltaz.logpk)
            print 'std/mean(growth from camb)=',noiselevel/growthindeltaz
            if (noiselevel/growthindeltaz > 1./8.):
                print "Warning! deltaz so small that it's giving lots of noise."
            cplusdeltaz.pk = c.pk * N.exp(growthindeltaz)
            cplusdeltaz.logpk = c.logpk + growthindeltaz
            cplusdeltaz.pkSplineCoeff = SS.cspline1d(cplusdeltaz.logpk)
            
            #The Hubble parameter at z=redshift
            dispplusdeltaz = generate(c=cplusdeltaz,ng=ng,boxsize=boxsize,sigmaalpt = sigmaalpt,
                                     largescale=largescale,smallscale=smallscale, seed = seed,
                                     dopbc=dopbc, fileroot=fileroot, returnpos=False, returndisp=True,
                                     plottest=False, returnvel=False,exactpk=exactpk)

    # the Zel'dovich displacement-divergence in Fourier space
    if d == None:
        if dk == None:
            kgrid = getkgrid(ng=ng,boxsize=boxsize,whattoget='k')
            dk=makegauss(c,kgrid,boxsize=boxsize,exactpk=exactpk,returnfft=True,seed=seed)
            if returnfft:
                return dk
        else:
            #(over)write ng
            ng=dk.shape[0]
        d = N.fft.irfftn(dk)

    else: #shouldn't have both d and dk non-None
        print 'd supplied'
        ng = d.shape[0]
        kgrid = getkgrid(ng=ng,boxsize=boxsize,whattoget='k')
        dk = N.fft.rfftn(d)

    if ((scheme != None) & ((smallscale != None) | (largescale != None))):
        print "Please specify only 'scheme' or ('smallscale' and 'largescale')"
    
    if (smallscale == None) & (largescale == None):
        if (scheme != None):
            largescale = scheme
        else:
            print "Please specify either a full 'scheme' or "
            print "a 'smallscale' and 'largescale' displacement field scheme."
            return

    print 'largescale=',largescale

    if smallscale == 'sc':
        psismall = N.fft.rfftn(sc(-d))
    elif smallscale == 'muscle':
        psismall = N.fft.rfftn(muscle(-d))
    elif smallscale == 'zeld':
        psismall = -dk

    if largescale == 'zeld':
        psilarge = -dk
    elif largescale == '2lpt':
        psiquadratic = N.fft.rfftn(twolpt(-dk,boxsize=boxsize))
        psilarge = psiquadratic - dk
        # dk because we didn't add it in twolpt for efficiency reasons
    elif largescale == 'sc':
        psilarge = N.fft.rfftn(sc(-d))
    elif largescale == 'muscle':
        psilarge = N.fft.rfftn(muscle(-d))

    if (smallscale != None) & (largescale != None):
        psik = scalegaussinterp(psilarge,psismall,kgrid,sigma=sigmaalpt)
    elif smallscale != None:
        psik = psismall
    elif largescale != None:
        psik = psilarge

    disp = invdiv(dk=psik,boxsize=boxsize,dopsi2pos=False).reshape(ng,ng,ng,3)
    pos = psi2pos(disp,boxsize=boxsize,dopbc=dopbc)

    if returnvel: # only works for Zeld, 2LPT
        time = 1./(1.+redshift)
        print 'time, hubble_z, f_omega = ',time,hubble_z,f_omega(c,time)
        vel = disp * time *hubble_z * f_omega(c,time)
        print 'total factor = ',time*hubble_z*f_omega(c,time)
        print 'factor in gadget = ',N.sqrt(time)*hubble_z*f_omega(c,time)
        if scheme == '2lpt':
            vel += 3./7. * time * hubble_z * f2_omega(c,time)* \
                invdiv(dk=psiquadratic,boxsize=boxsize,dopsi2pos=False)#.swapaxes(0,2)

    if plottest:
        if fileroot == None:
            fileroot='plottest'
        plotslice(pos,filename=fileroot+'.png',boxsize=boxsize)

    if returndisp:
        return disp
    if (returninitdens & (returnpos == False) & (returnvel == False)):
        return d
    if (returnpos & returnvel & (returninitdens == False)):
        return pos,vel
    if (returnpos & returninitdens & (returnvel == False)):
        return pos,d
    if (returnpos & returnvel & returninitdens):
        return pos,vel,d
    if returnpos:
        return pos
    if returnvel:
        return vel
    return
示例#43
0
def splinefit(xdata,ydata):
    xs=arange(min(xdata),max(xdata),0.1/float(len(xdata)))
    cj=cspline1d(array(ydata))
    return (xs,cspline1d_eval(cj,xs,dx=xdata[1]-xdata[0],x0=xdata[0]))
示例#44
0
def doresample(orig_x, orig_y, new_x, method="cubic", padlen=0, antialias=False, debug=False):
    """
    Resample data from one spacing to another.  By default, does not apply any antialiasing filter.

    Parameters
    ----------
    orig_x
    orig_y
    new_x
    method
    padlen

    Returns
    -------

    """
    tstep = orig_x[1] - orig_x[0]
    if padlen > 0:
        rawxpad = np.linspace(0.0, padlen * tstep, num=padlen, endpoint=False)
        frontpad = rawxpad + orig_x[0] - padlen * tstep
        backpad = rawxpad + orig_x[-1] + tstep
        pad_x = np.concatenate((frontpad, orig_x, backpad))
        pad_y = tide_filt.padvec(orig_y, padlen=padlen)
    else:
        pad_x = orig_x
        pad_y = orig_y

    if debug:
        print("padlen=", padlen)
        print("tstep=", tstep)
        print("lens:", len(pad_x), len(pad_y))
        print(pad_x)
        print(pad_y)
        fig = pl.figure()
        ax = fig.add_subplot(111)
        ax.set_title("Original and padded vector")
        pl.plot(orig_x, orig_y + 1.0, pad_x, pad_y)
        pl.show()

    # antialias and ringstop filter
    init_freq = len(pad_x) / (pad_x[-1] - pad_x[0])
    final_freq = len(new_x) / (new_x[-1] - new_x[0])
    if antialias and (init_freq > final_freq):
        aafilterfreq = final_freq / 2.0
        aafilter = tide_filt.NoncausalFilter(filtertype="arb", transferfunc="trapezoidal")
        aafilter.setfreqs(0.0, 0.0, 0.95 * aafilterfreq, aafilterfreq)
        pad_y = aafilter.apply(init_freq, pad_y)

    if method == "cubic":
        cj = signal.cspline1d(pad_y)
        # return tide_filt.unpadvec(
        #   np.float64(signal.cspline1d_eval(cj, new_x, dx=(orig_x[1] - orig_x[0]), x0=orig_x[0])), padlen=padlen)
        return signal.cspline1d_eval(cj, new_x, dx=(orig_x[1] - orig_x[0]), x0=orig_x[0])
    elif method == "quadratic":
        qj = signal.qspline1d(pad_y)
        # return tide_filt.unpadvec(
        #    np.float64(signal.qspline1d_eval(qj, new_x, dx=(orig_x[1] - orig_x[0]), x0=orig_x[0])), padlen=padlen)
        return signal.qspline1d_eval(qj, new_x, dx=(orig_x[1] - orig_x[0]), x0=orig_x[0])
    elif method == "univariate":
        interpolator = sp.interpolate.UnivariateSpline(pad_x, pad_y, k=3, s=0)  # s=0 interpolates
        # return tide_filt.unpadvec(np.float64(interpolator(new_x)), padlen=padlen)
        return np.float64(interpolator(new_x))
    else:
        print("invalid interpolation method")
        return None
示例#45
0
def splinefit(xdata, ydata, nx):
    xs = arange(min(xdata), max(xdata), 1.0 / float(nx) / len(xdata))
    cj = cspline1d(ydata)
    return (xs, cspline1d_eval(cj, xs, dx=xdata[1] - xdata[0], x0=xdata[0]))
示例#46
0
newwave = initialpixel + deltapix * np.arange(npix)

newflux = np.zeros((len(flux), npix + 1))
newand = np.zeros((len(flux), npix + 1))
newor = np.zeros((len(flux), npix + 1))
chisq = np.zeros(len(flux))
newinvvar = np.zeros((len(flux), npix + 1))

#resample spectra at single wavelength spectrum defined above
smoothing_parameter = 3.0
spline_order = 3
number_of_knots = -1

for p in range(len(flux)):
    nonzero = np.where(wavevector[p, :] != 0.)
    fitcoeff = cspline1d(flux[p], lamb=smoothing_parameter)
    fitcoeff_inv = cspline1d(invvar[p], lamb=smoothing_parameter)
    newflux[p, :] = cspline1d_eval(fitcoeff,
                                   newwave,
                                   dx=wave1[p],
                                   x0=wavevector[p, 0])
    newinvvar[p, :] = cspline1d_eval(fitcoeff_inv,
                                     newwave,
                                     dx=wave1[p],
                                     x0=wavevector[p, 0])
    oldfit = cspline1d_eval(fitcoeff,
                            wavevector[p, nonzero][0],
                            dx=wave1[p],
                            x0=wavevector[p, 0])
    chisq[p] = np.sum(np.sqrt(
        (oldfit - flux[p])**2. * [p])) / np.shape(flux[p])[0]
示例#47
0
def compute():
    print ""

    #####################
    #Read in the chi file
    xxs, yys = readchi(chifilename)
    s2ti = int(sum([1 for i in xxs
                    if i < s2t]))  #index of the starting location

    #####################
    #Load in the MgO data
    mgo_a0 = 4.213
    import os
    (ARs, Ts,
     PPs) = loadMgO(os.getenv("HOME") + '/Documents/ascwork/EOS/mgo_eos.dat')
    As = [i * mgo_a0 for i in ARs]
    if Tval == 0.0:
        Ps = PPs[:, 0]
    elif Tval == 300.0:
        As.insert(0, mgo_a0)
        Ps = PPs[:, 1].tolist()
        Ps.insert(0, 1.0)
    else:
        Ps = interp_T(Tval, Ts[2:], PPs[:, 2:])

    #####################
    #Analyze the spectrum: find the peaks
    ind = s2ti
    inpleft = list()
    inpright = list()
    while True:
        ind = localMax(yys, ind, delta)
        if ind == -1:
            break
        else:
            inpleft.append(ind)

    rind = s2ti
    yreverse = yys[::-1]
    while True:
        rind = localMax(yreverse, rind, delta)

        if rind == -1 or rind < s2ti:
            break
        else:
            ind = len(yys) - rind - 1
            inpright.append(ind)

    ysmth = windowavg(yys, 15)
    ind = s2ti
    while True:
        ind = localMax(ysmth, ind, delta)
        if ind == -1:
            break
        else:
            inpleft.append(ind)

    inp = list()
    for i in inpright:
        possibles = [j for j in inpleft if abs(i - j) < 5]
        if len(possibles) > 0:
            inp.append(sum(possibles) / len(possibles))

    inp = sort(list(set(inp)))

    #Write the current peak locations to a file, to be altered by user
    towrite = "peak   leftbnd   rightbnd\n"
    towrite += "".join([str(xxs[i]) + " \n" for i in inp])
    fname = "peaks_" + chifilename.split("_")[2].split(".")[0] + ".dat"

    #Check if the file already exists, if so don't overwrite it!
    exists = False
    try:
        open(fname, "r")
        [centerxsold, startxsold, endxsold] = readpeaks(fname)
        centers2 = theta2index(xxs, centerxsold)
        starts2 = theta2index(xxs, startxsold)
        ends2 = theta2index(xxs, endxsold)
        exists = True
    except IOError as e:
        print "%s File doesn't exist creating new one." % fname
        open(fname, "w").write(towrite)
    except ValueError as e:
        pass

    #Begin interactive portion
    print "List the starting and ending points of the peaks in the file %s." % fname
    print "You can add or delete peaks as needed from here."
    print "Close the graph when done."
    p = Process(target=openemacs, args=fname)
    p.start()

    #Open up a plot so the user can select the peaks
    pl.plot(xxs, yys)
    if exists:
        for i in centers2:
            pl.scatter(xxs[i], yys[i], marker="o")
        for i in starts2:
            pl.text(xxs[i], yys[i], "S")
        for i in ends2:
            pl.text(xxs[i], yys[i], "E")
        pl.legend(["diff pattern", "center", "start", "end"])
    else:
        for i in inp:
            pl.scatter(xxs[i], yys[i])
    pl.show()
    p.join()
    print "Thanks, got it."

    #Read in the newly user generated peaks file
    [centerxs, startxs, endxs] = readpeaks(fname)
    pcenters = theta2index(xxs, centerxs)
    pstarts = theta2index(xxs, startxs)
    pends = theta2index(xxs, endxs)
    peaks = [[pcenters[i], list(),
              list(), list(), list(), 0] for i in range(len(pcenters))]

    #Check if there is enough space between the two points.
    badgap = False
    for i in range(len(pcenters)):
        if pends[i] - pstarts[i] < 8:
            badgap = True
            print ""
            print "Error not enough space between start and end points on peak #%d at %g." % (
                i + 1, xxs[pcenters[i]])
            print "Gap must be at least 7 points wide otherwise fit will be inaccurate."
    if badgap: exit()
    """
    #####################
    #Windowed average to smooth out the plot, useful for multi-peak analysis
    yysmth=windowavg(yys,11)

    #####################
    #Find the groups of peaks from the smooth plot
        (pstarts,pends,pgroups)=findclusterbounds(yysmth,inp)
        ingroup=list()
        for i in inp:
            found=False
            for group in pgroups:
                for j in group:
                    if i==j:
                        found=True
                        break
                if found:
                    break
            if not(found):
                ingroup.append(False)
            else:
                ingroup.append(True)

        ####################
        #Transfer the group peaks location from the smooth plot to the original
        for g in range(len(pgroups)):
            for s in range(len(pgroups[g])):
                smthpeak=xxs[pgroups[g][s]]
                for i in range(len(inp)):
                    if abs(smthpeak-xxs[inp[i]]) < 0.05:
                        pgroups[g][s]=i
                        break
        """
    mpeaks = list()
    for i in range(len(peaks)):
        print "Fitting peak at %g.\n" % xxs[ind]
        ind = peaks[i][0]
        start = pstarts[i]
        end = pends[i]
        [xdata, ydata, coefs] = glinitguess(xxs[start:end], yys[start:end])
        peaks[i][4] = coefs
        [peaks[i][2], peaks[i][3]] = glfit(xdata, ydata, coefs)
        peaks[i][1] = xdata
        peaks[i][5] = localMax(peaks[i][2], 0, 1)  #new peak
        """
        #####################
        #Further Refine peaks by seperating out convoluted peaks

        for i in range(len(pgroups)):
            mpeaks.append(list())
            (start,end,group)=(pstarts[i],pends[i],pgroups[i])
            groupcoefs=list()
            for p in group:
                groupcoefs.append(peaks[p][4])
            [xdata,ydata,initlsq]=mglinitguess(xxs[start:end],yys[start:end],groupcoefs)
            [mgly,lsq]=mglfit(xdata,ydata,initlsq,len(groupcoefs))
            mglx=xdata
            ilsq=extractpks(lsq,len(group))

            for j in range(len(group)):
                peaks[group[j]][3]=ilsq[j]
                [peaks[group[j]][1],peaks[group[j]][2]]=[xdata,glval(xdata,ilsq[j])]
                refpk=localMax(peaks[group[j]][2],0,1)
                peaks[group[j]][5]=refpk
            mpeaks[i].append(mglx)
            mpeaks[i].append(mgly)
        """

    #####################
    #Plot normal peaks
    #Draw the blue peaks first to be overwritten by red peaks
    pl.figure()
    pl.plot(xxs, yys, ls='dotted')
    xx = [xxs[i[0]] for i in peaks]
    yy = [yys[i[0]] for i in peaks]
    [pl.scatter(xx[i], yy[i], label=str(xx[i])) for i in range(len(peaks))]
    #[pl.text(xx[i],yy[i],str(round(xx[i],2)),position=(xx[i],yy[i]),size='x-small') for i in range(len(peaks))]
    pl.title(chifilename + ", T=" + str(Tval) + ", Kmag=sqrt(" + str(ksq) +
             ")")
    #Plot fitted peaks
    for i in range(len(peaks)):
        pl.plot(peaks[i][1], peaks[i][2])
    for i in range(len(mpeaks)):
        pl.plot(mpeaks[i][0], mpeaks[i][1], ls='dashed')

    #Plot initial guesses
    #for i in range(len(peaks)):
    #    pl.plot(peaks[i][1],glval(peaks[i][1],peaks[i][4]))
    #    print peaks[i][4]
    #for i in range(len(mpeaks)):
    #    pl.plot(mpeaks[i][0],mpeaks[i][1],ls='dashed')

    #####################
    #Find possible peak matches
    s = UnivariateSpline(As, Ps)
    sas = linspace(min(As), max(As), len(As))
    sps = s(sas)
    coefs = cspline1d(sps)
    pressure = list()
    ks = list()
    theas = list()
    thetas = list()
    i = 0
    for (ind, xgf, ygf, lsq, initlsq, npk) in peaks:  #for each peak found
        theta2 = xgf[npk]
        intens = ygf[npk]

        i += 1

        d = lamda / (2.0 * sin(radians(theta2) / 2.0))

        for j in ksq:  #for each k value given
            a = d * sqrt(float(j))
            if (a > min(As) - tol and a < max(As) + tol):
                #######################
                #Found an MgO peak
                ks.append(j)
                theas.append(a)
                thetas.append(theta2)

                pressure.append(
                    cspline1d_eval(coefs, [a], dx=sas[1] - sas[0], x0=sas[0]))
                pl.scatter(theta2, intens, s=30, c='red', marker='o')
                pl.text(theta2,
                        intens - 0.5,
                        str(round(theta2, 5)),
                        position=(theta2, intens),
                        size='x-small')

                #ksq.remove(j)
                #break

    #####################
    #Print Results

    print "Possible Matches:"
    print "kmag\t| theta2(deg)\t| pressure(GPa)\t| a (Angstroms"
    for i in range(len(ks)):
        print str(ks[i]) + "\t| " + str(round(thetas[i], 5)) + "  \t| " + str(
            round(pressure[i][0], 2)) + "   \t| " + str(round(theas[i], 5))
    avgp = sum(pressure) / len(pressure)
    print "At temperature " + str(Tval) + "K"
    err = max([fabs(i - avgp) for i in pressure])
    print "Average Pressure: " + str(round(avgp, 2)) + "GPa, Error: " + str(
        round(err, 3)) + "GPa, StdDev: " + str(round(std(pressure), 3))

    pl.show()
示例#48
0
def compute(chifile, peaksfile, Tfil, delta, ksq, writefile, minp, maxp,
            lamda):
    xxs, yys = readchi(chifile)

    #####################
    #Read in the peaks file
    found = False
    peaksx = list()
    peaksy = list()
    approx_p = 0.0
    if not (peaksfile is None):
        f = open(peaksfile, 'r')
        for line in f:
            if line[0] == '#':
                continue
            line = line.lstrip().rstrip()
            [fname, Ttemp, lamda, approx_p, line] = line.split(None, 4)
            if fname == chifile:
                lamda = float(lamda)
                approx_p = float(approx_p)
                if Tfil is None:
                    Tfil = float(Ttemp)
                found = True
                break
        if found:
            val = line.split(None, 2)
            if val[0] == '0' or val[0] == '-1':  #No information on peaks
                startloc = 0
            else:
                startloc = float(val[0])  #get the new peak info
                [peaksx.append(float(i)) for i in val[1].split(',')]
                peaksx.sort()
                i = 0
                px = peaksx[i]
                for (j, xx) in enumerate(xxs):
                    if px < xx:
                        peaksy.append(((xx - px) * yys[j] +
                                       (px - xxs[j - 1]) * yys[j - 1]) /
                                      (xxs[j] - xxs[j - 1]))
                        i += 1
                        if i == len(peaksx):
                            break
                        px = peaksx[i]
        else:
            print "\nError: Unable to find chi file " + chifile + " in peaks file " + peaksfile
            print "\nTry calling again without the peaks file arguement."
            exit(0)
    else:
        startloc = xxs[0]

    #####################
    #Load in the MgO data
    mgo_a0 = 4.213
    (ARs, Ts, PPs) = loadMgO('/home/acadien/Documents/ascwork/EOS/mgo_eos.dat')
    As = [i * mgo_a0 for i in ARs]
    if Tfil == 0.0:
        Ps = PPs[:, 0]
    elif Tfil == 300.0:
        As.insert(0, mgo_a0)
        Ps = PPs[:, 1].tolist()
        Ps.insert(0, 1.0)
    else:
        Ps = interp_T(Tfil, Ts[2:], PPs[:, 2:])

    #####################
    #Analyze the spectrum: find the peaks
    inp = list()

    startloc = max(startloc, 5.0)
    for (ind, xx) in enumerate(xxs):
        if xx >= startloc:
            break
    strtind = ind

    while True:
        ind = localMax(yys, ind, delta)
        if ind == -1:
            break
        else:
            mxslp = max(
                [fabs(yys[j] - yys[j + 1]) for j in range(ind - 5, ind + 5)])
            if mxslp > 0.1:
                inp.append(ind)

    rind = 50
    yreverse = yys[::-1]
    while True:
        rind = localMax(yreverse, rind, delta)
        if len(xxs) - rind >= strtind:
            break
        if rind == -1:
            break
        else:
            ind = len(yys) - rind
            found = False
            for pk in inp:
                if abs(pk - ind) < 3:
                    found = True
                    break
            if found == False:
                if ind < 50:
                    continue
                mxslp = max([
                    fabs(yys[j] - yys[j + 1]) for j in range(ind - 5, ind + 5)
                ])
                if mxslp > 0.1:
                    inp.append(ind)

    inp.sort()

    #####################
    #Plot normal peaks
    #Draw the blue peaks first to be overwritten by red peaks
    pl.figure()
    pl.plot(xxs, yys, ls='dotted')
    xx = [xxs[i] for i in inp]
    yy = [yys[i] for i in inp]
    [pl.scatter(a, b) for (a, b) in zip(xx, yy)]
    [pl.scatter(a, b) for (a, b) in zip(peaksx, peaksy)]
    pl.title(chifile + ", T=" + str(Tfil) + ", Kmag=sqrt(" + str(ksq) + ")")

    #####################
    #Calculate the MgO Peaks at this pressure
    #Get the MgO lattice constant for the desired pressure
    mgo_s = UnivariateSpline(Ps, As)
    print Ps
    mgo_sps = linspace(min(Ps), max(Ps), len(Ps))
    mgo_sas = mgo_s(mgo_sps)
    coefs = cspline1d(mgo_sas)
    latconst = cspline1d_eval(coefs, [approx_p],
                              dx=mgo_sps[1] - mgo_sps[0],
                              x0=mgo_sps[0])[0]

    #Get the intensity factors
    mgo_ks = [3, 4, 8, 9, 11, 12]  #Valid K vectors for MgO
    mgo_mp = [8.0, 6.0, 12.0, 6.0, 24.0, 8.0]  #Multiplicity
    mgo_sf = [1.0, 3.0, 3.0, 1.0, 1.0, 3.0]  #Structure Factor
    mgo_Im = [(mgo_sf[i] * mgo_sf[i]) / mgo_mp[i]
              for i in range(len(mgo_sf))]  #Intensity multiplier

    #Find the MgO peaks at this pressure
    mgo_2th = list()
    mgo_I = list()
    for (i, K) in enumerate(mgo_ks):
        theta = asin(lamda * sqrt(float(K)) * 0.5 / latconst)
        mgo_2th.append(degrees(theta) * 2)
        mgo_I.append(mgo_Im[i] * (1 + cos(theta * 2) * cos(theta * 2)) /
                     (sin(theta) * sin(theta) * cos(theta)))
    mxI = max(mgo_I)
    mxA = sum(xxs) / len(xxs)
    mgo_I = [(i / mxI + 1) * mxA * mxA for i in mgo_I]

    #####################
    #Find possible peak matches
    s = UnivariateSpline(As, Ps)
    sas = linspace(min(As), max(As), len(As))
    sps = s(sas)
    coefs = cspline1d(sps)
    pressure = list()
    ks = list()
    theas = list()
    thetas = list()

    peaksx.extend([xxs[i] for i in inp])
    peaksy.extend([yys[i] for i in inp])

    for j in ksq:  #for each k value given

        thepeak = mgo_2th[mgo_ks.index(next(p for p in mgo_ks if int(j) == p))]
        i = findnearest(peaksx, thepeak)

        #####################
        #Refine the peak using a Spline fit
        ind = findnearest(xxs, peaksx[i])
        lowbnd = ind - 10
        upbnd = ind + 10
        #[strt,end]=findpeakbounds(yys,findnearest(xxs,peaksx[i]),lowbnd,upbnd)
        #[xd,yd]=splinefit(xxs[strt:end],yys[strt:end],10*(end-strt))

        #pl.plot(xd,yd)
        pl.plot([thepeak, thepeak], [0, peaksy[i]], color='red')
        pl.text(thepeak, 5, str(j))

        #pkind=posmax(yd)
        #(theta2,intens)=(xd[pkind],yd[pkind])
        (theta2, intens) = (xxs[ind], yys[ind])
        """if round(theta2,2)==8.84: theta2=8.73"""

        d = lamda / (2.0 * sin(radians(theta2) / 2.0))

        a = d * sqrt(float(j))
        if (a > min(As) - tol and a < max(As) + tol):
            #Found a valid MgO peak
            pres = cspline1d_eval(coefs, [a], dx=sas[1] - sas[0], x0=sas[0])[0]
            if pres < minp or pres > maxp:
                continue
            ks.append(j)
            theas.append(a)
            thetas.append(theta2)

            pressure.append(pres)
            pl.scatter(theta2, intens, s=30, c='red', marker='o')
            pl.text(theta2,
                    intens,
                    str(round(theta2, 2)),
                    position=(theta2, intens),
                    size='x-small')

    #####################
    #Print Results

    print "Possible Matches near approximated pressure " + str(approx_p)
    print "kmag\t| theta2(deg)\t| pressure(GPa)\t| a (Angstroms"
    for i in range(len(ks)):
        print str(ks[i]) + "\t| " + str(round(thetas[i], 5)) + "  \t| " + str(
            round(pressure[i], 2)) + "   \t| " + str(round(theas[i], 5))
    avgp = sum(pressure) / len(pressure)
    print "At temperature " + str(Tfil) + "K"
    err = max([fabs(i - avgp) for i in pressure])
    print "Average Pressure: " + str(round(avgp, 2)) + "GPa, Error: " + str(
        round(err, 2)) + "GPa, StdDev: " + str(round(std(pressure), 2))

    pl.show()

    ######################
    #Write results to file
    if writefile is not None:
        print "Appending results to file " + writefile + "."
        file = open(writefile, 'a')
        file.write(chifile + "\t" + str(Tfil) + "\t" + str(round(avgp, 2)) +
                   "\t" + str(round(err, 2)) + "\t" + str(ks) + "\t" +
                   str([str(round(i, 2)) for i in pressure]) + "\n")
    def getVelocity(self, samplesData:DataFrame, smooth:str, convertToDeg:bool) -> DataFrame:
        """Method for calculating eye velocity, normally pixels converted to degrees first.

        :param samplesData: dataframe to operate on, containing appropriate eyetracker columns (Time, X, Y, etc.).
        :param smooth: algo to use, normally passed by command line argument.
        :param convertToDeg: whether data is passed in raw pixel values or visual angle degrees.
        :return: data with added *Velocity columns (and smoothed position columns).
        """
        #TODO data column names hard-coded, need refactor to global name dictionary mapper (SMI, Tobii variants)
        #  mapping goes to multiData metadata property
        #TODO B side (binocular) variant not implemented (applicable for SMI ETG)
        if all(samplesData['L POR X [px]'] == samplesData['R POR X [px]']) and all(samplesData['L POR Y [px]'] == samplesData['R POR Y [px]']):
            self.main.printToOut('Left and right channels detected equivalent. Working with one channel only.')
            samplesData.metadata['equivalent'] = True

        metadata = samplesData.metadata
        self.main.printToOut('WARNING: Dimensions metadata from samples file is considered correct and precise, and used in pixel-to-degree conversions.')
        self.main.printToOut('Now calculating velocity, be patient.')

        if metadata['equivalent']:
            #должен быть ведущий глаз
            sides = ['R']
        else:
            sides = ['L', 'R']

        #TODO skipping one channel if same
        for side in sides:
            for dim in ['X', 'Y']:
                # smoothing
                dataToSmooth = samplesData['{0} POR {1} [px]'.format(side, dim)]
                if smooth == 'savgol':
                    samplesData['{0}POR{1}PxSmoothed'.format(side, dim)] = savgol_filter(dataToSmooth, 15, 2)
                elif smooth == 'spline':
                    #scipy.interpolate.UnivariateSpline(x,y, k=1).get_coeffs()
                    samplesData['{0}POR{1}PxSmoothed'.format(side, dim)] = cspline1d(np.array(dataToSmooth), lamb=3)
                elif smooth == 'conv':
                    #width and shape of convolution, equivalent to moving average if all 1
                    win = np.array([1,1,1,1,1,1])
                    samplesData['{0}POR{1}PxSmoothed'.format(side, dim)] = convolve(np.array(dataToSmooth), in2=win, mode='same') / win.sum()
                else:
                    self.main.printToOut('ERROR: Invalid smoothing function specified.')



                if dim == 'X':
                    screenDim = metadata['screenWidthPx']
                    screenRes = metadata['screenHResMm']
                    multiplier = 1
                elif dim == 'Y':
                    screenDim = metadata['screenHeightPx']
                    screenRes = metadata['screenVResMm']
                    multiplier = -1


                if not convertToDeg:
                    self.main.printToOut('ERROR: Raw pixels in data are currently assumed, column names hard-coded.')
                    raise NotImplementedError
                else:
                    #converting to DEGREES
                    samplesData['{0}POR{1}Mm'.format(side, dim)]  = multiplier * (samplesData['{0} POR {1} [px]'.format(side, dim)] - screenDim / 2) * screenRes
                    coordsMm = samplesData['{0}POR{1}Mm'.format(side, dim)]
                    samplesData['{0}POR{1}Deg'.format(side, dim)] = np.sign(coordsMm) * coordsMm.apply(lambda x: Utils.getSeparation(x,0, 0,0,  z=metadata['headDistanceMm'],  mode='fromCartesian'))
                    #----
                    samplesData['{0}POR{1}MmSmoothed'.format(side, dim)] = multiplier * (samplesData['{0}POR{1}PxSmoothed'.format(side, dim)] - screenDim / 2) * screenRes
                    coordsMm = samplesData['{0}POR{1}MmSmoothed'.format(side, dim)]
                    samplesData['{0}POR{1}DegSmoothed'.format(side, dim)] = np.sign(coordsMm) * coordsMm.apply(lambda x: Utils.getSeparation(x,0, 0,0,  z=metadata['headDistanceMm'],  mode='fromCartesian'))



            #VELOCITY calculation
            x = samplesData['{0}PORXDeg'.format(side)]
            y = samplesData['{0}PORYDeg'.format(side)]
            row = DataFrame({'x1':x[1:].reset_index(drop=True), 'y1':y[1:].reset_index(drop=True),  'x0':x[:(len(x) - 1)].reset_index(drop=True), 'y0':y[:(len(y) - 1)].reset_index(drop=True)})
            seps = row.apply(lambda rowApply: Utils.getSeparation(x1=rowApply['x1'], y1=rowApply['y1'],  x2=rowApply['x0'], y2=rowApply['y0'],  z=metadata['headDistanceMm'],  mode='fromPolar'), axis=1)
            separation = np.hstack((1, seps))
            timelag = np.hstack((1, np.diff(samplesData['Time'])))
            samplesData['{0}Velocity'.format(side)] = separation / timelag

            #----
            x = samplesData['{0}PORXDegSmoothed'.format(side)]
            y = samplesData['{0}PORYDegSmoothed'.format(side)]
            row = DataFrame({'x1': x[1:].reset_index(drop=True), 'y1': y[1:].reset_index(drop=True), 'x0': x[:(len(x) - 1)].reset_index(drop=True), 'y0': y[:(len(y) - 1)].reset_index(drop=True)})
            seps = row.apply(lambda rowApply: Utils.getSeparation(x1=rowApply['x1'], y1=rowApply['y1'], x2=rowApply['x0'], y2=rowApply['y0'], z=metadata['headDistanceMm'], mode='fromPolar'), axis=1)
            separation = np.hstack((1, seps))
            timelag = np.hstack(( 1, np.diff(samplesData['Time']) ))
            samplesData['{0}VelocitySmoothed'.format(side)] = separation / timelag


        self.main.printToOut('Done.', status='ok')
        return samplesData
示例#50
0
y = np.sin(t)

# オブジェクト指向型の FITPACK のラッパー
spl1 = interpolate.UnivariateSpline(t, y, s=0)
y1 = spl1(tt)

# UnivariateSpline で s=0 とした場合と同じ
spl2 = interpolate.InterpolatedUnivariateSpline(t, y)
y2 = spl2(tt)

# 非オブジェクト指向型の FITPACK のラッパー
c1 = interpolate.splrep(t, y)
y3 = interpolate.splev(tt, c1)

# 3 次スプライン曲線
c2 = signal.cspline1d(y)
y4 = signal.cspline1d_eval(c2, tt)

# 2 次スプライン曲線
c3 = signal.qspline1d(y)
y5 = signal.qspline1d_eval(c3, tt)

plt.figure()
plt.plot(t, y, "o")
plt.plot(tt, y1)
plt.plot(tt, y2)
plt.plot(tt, y3)
plt.plot(tt, y4)
plt.plot(tt, y5)
plt.show()
示例#51
0
n_samples = tkinter.simpledialog.askinteger('Number of samples', 
        	'Type in the number of samples you want to be displayed (example: 3000, 6000, 10000 etc.)')

#Define ecg
record = wfdb.rdrecord(file, sampto=n_samples)
ann = wfdb.rdann(file, 'dat', sampto=n_samples)

#Filerecord
file_record = record.__dict__
#print(file_record)

wfdb.plot_items(signal=record.p_signal, title='ECG Signal',ann_samp=[ann.sample, ann.sample], time_units='samples', figsize=(10,4))

#Detect R-Peaks
signal_slice = np.ndarray.flatten(record.p_signal[0:n_samples])
smooth_signal = signal.cspline1d(signal_slice, lamb=1000)                   #smoothing the signal (filtering)
#r_peak_index = peakutils.indexes(smooth_signal, thres = 0.45, min_dist = 0.1)    # first peak detection option
peak_index = signal.find_peaks_cwt(smooth_signal, widths= np.arange(60,80))     # second peak detection option

fig, ax = plt.subplots()

ax.set_title('Detect R peak')
ax.plot(signal_slice)

p_min_distance = -20  # marking for p-wave example
p_max_distance = -60

t_min_distance = 20  # marking for t-wave example
t_max_distance = 100

for peak in peak_index:
示例#52
0
def interp(dx, knots, xis):
    cspl = cspline1d(knots)  #cupic spline coefficients
    return cspline1d_eval(cspl, xis, dx=dx, x0=0.0)
示例#53
0
文件: pt.py 项目: JohanComparat/pyLPT
    def runCosmicEmu(self, sig8):
        """
        use the CosmicEmu (Lawrence, Heitmann, Habib et al.) nonlinear power spectrum emulator.

        Note that littleh will be (re)set, according to CMB constraint
        """
        # first run in camb

        #c_lin = copy.copy(self)
        #c_lin.

        a = 1./(self.cp.transfer_redshift[0] + 1)
        if (a < 0.5)|(a > 1.):
            print 'Warning! outside range of z accuracy (1 - 0).'
        
        scalefax=N.arange(0.5,1.001,0.1)
        coltoreturn = N.where(N.abs(a - scalefax) == min(N.abs(a - scalefax)))[0][0]
        print 'returning results at a=',scalefax[coltoreturn]
        f = file('emu.in','w')
        f.write('emu.out\n')
        f.write(str(self.cp.omch2+self.cp.ombh2)+'\n')
        f.write(str(self.cp.ombh2)+'\n')
        f.write(str(self.cp.scalar_spectral_index[0])+'\n')
        f.write(str(sig8)+'\n')
        f.write(str(self.cp.w)+'\n')
        f.write('2\n')
        f.close()
        
        os.system('/Users/neyrinck/CosmicEmu_v1.1/emu.exe < emu.in > emu.err')

        # read little h
        f = open('emu.out','r')
        for i in range(6):
            dumb=f.readline()
        littleh = float(f.readline().split()[-1])
        self.cp.hubble = 100.*littleh
        print 'littleh changed to ',littleh

        f.close()
        
        emu = N.loadtxt('emu.out')
        kemu = emu[:,0]/littleh # should now be in h/Mpc
        pemu = emu[:,coltoreturn+1]*littleh**3 # should now be in (Mpc/h)^3
        if self.cp.transfer_k_per_logint == 0:
            self.cp.transfer_k_per_logint = 512.
        
        #need to get into log-scale k
        self.k = kemu[0]*10.**N.arange(0.,N.log10(kemu[-1]/kemu[0]),1./self.cp.transfer_k_per_logint)

        interpemu = SI.interp1d(N.log(kemu),N.log(pemu))#,kind='cubic')
        #self.pk = interpemu(self.k)
        self.pk = N.exp(interpemu(N.log(self.k)))
        #self.pk = utils.splineIntLinExt(pemu, kemu, self.k)

        self.logk = 1.*N.log(self.k)
        self.logpk = 1.*N.log(self.pk)

        #self.kextend(-5,3,calcsplinecoeff=True)

        self.pkSplineCoeff = SS.cspline1d(self.logpk)

        return
示例#54
0
def splinefit(xdata, ydata):
    xs = arange(min(xdata), max(xdata), 0.1 / float(len(xdata)))
    cj = cspline1d(array(ydata))
    return (xs, cspline1d_eval(cj, xs, dx=xdata[1] - xdata[0], x0=xdata[0]))
示例#55
0
#!/usr/bin/python

#coding: utf-8

from numpy import arange, cos, sin

#duas funcoes do numpy para processamento de sinais
from scipy.signal import cspline1d, cspline1d_eval

#duas funcoes do Matplotlib  para gerar um grafico
from pylab import plot, show

x0 = arange(20)
y0 = cos(x0) * sin(x0 / 2)  #Y a partir de x
dx = x0[1] - x0[0]  #diferenca original
x1 = arange(-1, 21, 0.1)

#coeficiente para arranjo de uma dimensao
cj = cspline1d(y0)

#Avalia o Spline para um novo conjunto de pontos
y1 = cspline1d_eval(cj, x1, dx=dx, x0=x0[0])

plot(x1, y1, '-g', x0, y0, '^y')  #Desenha
show()  #Mostra o grafico
示例#56
0
def wiggle(values, origin=0, posFill='black', negFill=None, lineColor='black',
           resampleRatio=10, rescale=False, zmin=0, zmax=None, ax=None):
    """
    Plot a trace in VAWT(Variable Area Wiggle Trace)

    Parameters
    ----------
    x: input data (1D numpy array)

    origin: (default, 0) value to fill above or below (float)

    posFill: (default, black)
        color to fill positive wiggles with (string or None)

    negFill: (default, None)
        color to fill negative wiggles with (string or None)

    lineColor: (default, black)
        color of wiggle trace (string or None)

    resampleRatio: (default, 10)
        factor to resample traces by before plotting (1 = raw data) (float)

    rescale: (default, False)
        If True, rescale "x" to be between -1 and 1

    zmin: (default, 0)
        The minimum z to use for plotting

    zmax: (default, len(x))
        The maximum z to use for plotting

    ax: (default, current axis)
        The matplotlib axis to plot onto

    Returns
    -------
    Plot
    """
    if zmax is None:
        zmax = values.size

    # Rescale so that values ranges from -1 to 1
    if rescale:
        values = values.astype(np.float)
        values -= values.min()
        values /= values.ptp()
        values *= 2
        values -= 1

    # Interpolate at resampleRatio x the previous density
    resample_z = np.linspace(0, values.size, values.size * resampleRatio)
    # cubic spline interpolation
    cj = cspline1d(values)
    resample_v = cspline1d_eval(cj, resample_z)

    # newz = np.linspace(zmax, zmin, resample_z.size)
    # newz = np.linspace(zmin, zmax, resample_z.size)
    newz = resample_z
    if origin is None:
        origin = resample_v.mean()

    # # Plot
    # if ax is None:
    #     ax = plt.gca()
    #     # plt.hold(True)
    if posFill is not None:
        ax.fill_betweenx(newz, resample_v, origin,
                         where=resample_v > origin,
                         facecolor=posFill)
    if negFill is not None:
        ax.fill_betweenx(newz, resample_v, origin,
                         where=resample_v < origin,
                         facecolor=negFill)
    if lineColor is not None:
        ax.plot(resample_v, newz, color=lineColor, linewidth=.1)
def get_errorbar(fv,
                 fname=None,
                 trace=None,
                 frac=0.1,
                 additional_fixed=None,
                 plotresult=False,
                 guess=dict()):
    """ Use Lakowicz's f-statistic method to find the 95% confidence interval on a fitting parameter.
        fv -- a string/dictionary key identifying the parameter to find a confidence interval on.
        fname -- (optional, can use trace instead) fname of data file to fit.
        trace -- can use trace instead of fname to pass in info on wraptime and irf fname, etc.
        frac -- the fractional change in the parameter to use for the intial 5-point mesh
        additional_fixed -- any parameters you want to hold fixed during the fitting
        plotresult -- plot the f-statistic curve?
        guess -- dictionary of initial parameters for the fit (by default, best-fit values are used)
    """
    #bestfit, bestacorr = load_wire( fname )
    if fname is not None:
        raise ValueError(
            'have modified this to prefer working with actual traces to forward wraptime, etc., to do_fit'
        )
        bestfit = load_wire(fname)
    if trace is not None:
        assert fname == None
        bestfit = trace.fitresults.copy()
    bestChi2 = bestfit['ReducedChi2']
    irf = bestfit['irf_dispersion']
    lkeys = ['l0']
    akeys = ['a0']
    if bestfit.has_key('l1'):
        lkeys.append('l1')
        akeys.append('a1')
    if bestfit.has_key('l2'):
        lkeys.append('l2')
        akeys.append('a2')
    if bestfit.has_key('l3'):
        lkeys.append('l3')
        akeys.append('a3')
    fixedparams = [fv]
    if additional_fixed is not None: fixedparams += additional_fixed
    fv_best = bestfit[fv]

    # If holding l1-l3 lifetimes fixed, as we did
    # for the actual fit, then Fx should equal 1.0 at the 'bestfit' value of the parameter.
    # But if l1-l3 are allowed to vary (making a more conservative error bar), then
    # Fx may be lower than 1.0, and the minimum may not be at the "bestfit" value.
    # This is to say that the bestfit of the constrained fit may not be the bestfit of the unconstrained fit.

    #Fx = 1.0038 # Threshold found from F-statistic 1600 deg. freedom (~half trace points, following Fundamentals Of Fluorescence Spectroscopy)
    Fx = 1.001858  # Threshold found from F-statistic on all 3282 points. Seems easier to justify
    Fx_list = [
    ]  # later we'll sort these in order of increasing parameter value to enable interpolation
    val_step = frac * fv_best / 2
    argvals = arange(fv_best * (1.0 - frac),
                     fv_best * (1.0 + frac) + val_step, val_step)

    # do a coarse (5-pt) run across the data
    for val in argvals:
        bestfit[fv] = val
        l = [bestfit[key] for key in lkeys]
        a = [bestfit[key] for key in akeys]
        params = do_fit(trace, l, a, fixedparams, irf, guess)
        Fx_list.append([val, params['ReducedChi2'] / bestChi2])

    if all(array(Fx_list)[:, 1] > Fx):
        Fx_list = []
        for val in argvals:
            bestfit[fv] = val
            cpy = bestfit.copy()
            for key in lkeys:
                if key not in fixedparams: cpy[key] = 1.25 * bestfit[key]
            for key in akeys:
                if key not in fixedparams: cpy[key] = 1.5 * bestfit[key]
            l = [cpy[key] for key in lkeys]
            a = [cpy[key] for key in akeys]
            params = do_fit(trace, l, a, fixedparams, irf, guess)
            Fx_list.append([val, params['ReducedChi2'] / bestChi2])

    if all(array(Fx_list)[:, 1] > Fx):
        raise ValueError("Problem fitting: always above Fx. Min: %f" %
                         (array(Fx_list)[:, 1].min()))

    # if the left side (low param value) didn't exceed Fx threshold, extend
    val = argvals[0]
    while Fx_list[0][1] < Fx:
        val -= val_step
        if val < 0:
            if fv in ['l1', 'l2', 'l3']:
                raise ValueError("long-time component just went negative...")
            else:
                break
        bestfit[fv] = val
        l = [bestfit[key] for key in lkeys]
        a = [bestfit[key] for key in akeys]
        params = do_fit(trace, l, a, fixedparams, irf, guess)
        Fx_list.append([val, params['ReducedChi2'] / bestChi2])
        Fx_list.sort(
            key=lambda x: x[0])  # sort by first element (parameter value)

    # if the right side (high param value) didn't exceed Fx threshold, extend
    val = argvals[-1]
    while Fx_list[-1][1] < Fx:
        val += val_step
        bestfit[fv] = val
        l = [bestfit[key] for key in lkeys]
        a = [bestfit[key] for key in akeys]
        params = do_fit(trace, l, a, fixedparams, irf, guess)
        Fx_list.append([val, params['ReducedChi2'] / bestChi2])
        Fx_list.sort(
            key=lambda x: x[0])  # sort by first element (parameter value)

    # interpolate to find values at threshold
    Fx_array = array(Fx_list)
    splines = cspline1d(Fx_array[:, 1])
    interp_val = linspace(Fx_array[0, 0], Fx_array[-1, 0], 500)
    interp_Fx = cspline1d_eval(splines,
                               interp_val,
                               dx=val_step,
                               x0=Fx_array[:, 0].min())
    error_bar = [
        interp_val[find(interp_Fx < Fx)[0]],
        interp_val[find(interp_Fx < Fx)[-1]]
    ]

    if plotresult:
        fig = figure(1)
        #fig.clf()
        ax_chi = fig.add_subplot(111)
        ax_chi.cla()
        ax_chi.plot(interp_val, interp_Fx, '-k')
        ax_chi.plot(interp_val, [Fx] * len(interp_val), '--k')
        ax_chi.plot(Fx_array[:, 0], Fx_array[:, 1], 'sg')
        ax_chi.plot(error_bar, [Fx] * 2, '-b', lw=3.0)

        ax_chi.set_ylim([0.99, 1.01])
        fig.show()
        fig.canvas.draw()

    return error_bar