Пример #1
0
    def function(self,E) :
        """ Calculates the number of counts in barns"""
        
        if self.delta.value != self._previous_delta:
            self._previous_delta = copy.copy(self.delta.value)
            self.integrategos(self.delta.value)
            self.calculate_knots()

        if self._previous_effective_angle != self.effective_angle.value:
            self.integrategos()
            
        factor = 4.0 * np.pi * a0 ** 2.0 * R**2 / E / self.T #to convert to m**2/bin
        Emax = self.energyaxis[-1] + self.edgeenergy + \
        self.delta.value #maximum tabulated energy
        cts = np.zeros((len(E)))
        
        if self.fs_state is True:
            if self.__knots[-1] > Emax : Emax = self.__knots[-1]
            fine_structure_indices=np.logical_and(np.greater_equal(E, 
            self.edgeenergy+self.delta.value), 
            np.less(E, self.edgeenergy + self.delta.value + self.fs_emax))
            tabulated_indices = np.logical_and(np.greater_equal(E, 
            self.edgeenergy + self.delta.value + self.fs_emax), 
            np.less(E, Emax))
            if self.fs_mode == "new_spline" :
                cts = np.where(fine_structure_indices, 
                1E-25*splev(E,(self.__knots,self.fslist.value,3),0), cts)
            elif self.fs_mode == "spline" :
                cts = np.where(fine_structure_indices, 
                cspline1d_eval(self.fslist.value, 
                E, 
                dx = self.energy_scale / self.knots_factor, 
                x0 = self.edgeenergy+self.delta.value), 
                cts)
            elif self.fs_mode == "spline_times_edge" :
                cts = np.where(fine_structure_indices, 
                factor*splev((E-self.edgeenergy-self.delta.value), 
                self.__goscoeff)*cspline1d_eval(self.fslist.value, 
                E,dx = self.energy_scale / self.knots_factor, 
                x0 = self.edgeenergy+self.delta.value), 
                cts )
        else:
            tabulated_indices = np.logical_and(np.greater_equal(E, 
            self.edgeenergy + self.delta.value), np.less(E, Emax))            
        powerlaw_indices = np.greater_equal(E,Emax)  
        cts = np.where(tabulated_indices, 
        factor * splev((E-self.edgeenergy-self.delta.value), 
        self.__goscoeff),
         cts)
        
        # Convert to barns/dispersion.
        #Note: The R factor is introduced in order to give the same value
        # as DM, although it is not in the equations.
        cts = np.where(powerlaw_indices, self.A * E**-self.r, cts) 
        return (self.__subshell_factor * self.intensity.value * self.energy_scale 
        * 1.0e28 / R) * cts       
Пример #2
0
def TransformSpectrum(args):
    flux, invvar, c0, c1, newwave = args
    smoother = 3.0

    fitc = cspline1d(flux, lamb=smoother)
    fitc_iv = cspline1d(invvar, lamb=smoother)
    newf = cspline1d_eval(fitc, newwave, dx=c1, x0=c0)
    newiv = cspline1d_eval(fitc_iv, newwave, dx=c1, x0=c0)

    return (newf, newiv)
Пример #3
0
def TransformSpectrum(args):
    flux, invvar, c0, c1, newwave = args
    smoother = 3.0

    fitc = cspline1d(flux, lamb=smoother)
    fitc_iv = cspline1d(invvar, lamb=smoother)
    newf = cspline1d_eval(fitc, newwave, dx=c1, x0=c0)
    newiv   = cspline1d_eval(fitc_iv, newwave, dx=c1, x0=c0)

    return (newf, newiv)
Пример #4
0
def splineIntLinExt(y,x,xNew,splinecoeff = 0.):
    """
    Use the scipy spline interpolation, but linearly extrapolate at the edges,
    since scipy.signal.cspline1d assumes periodic boundary conditions
    """    
    if len(x) < 4:
        return interpolateLin(y,x,xNew)

    if isinstance(xNew,float):
        wasfloat = 1
        xNew = M.array([xNew])
    else:
        wasfloat = 0

    whereSpline = N.where((xNew > x[0]) * (xNew < x[-1]))[0]
    whereLin = N.where((xNew <= x[0]) + (xNew >= x[-1]))[0]
    
    ans = xNew * 0.
    if len(whereSpline) > 0:
        if isinstance(splinecoeff,float): # not pre-calculated.
            splinecoeff = SS.cspline1d(y)
        ans[whereSpline] = SS.cspline1d_eval(splinecoeff, xNew[whereSpline], dx=x[1]-x[0], x0 = x[0])

    if len(whereLin) > 0:
        ans[whereLin] = interpolateLin(y,x,xNew[whereLin])

    if wasfloat:
        return ans[0]
    else:
        return ans
Пример #5
0
def splineIntBig0LittleLog(y,x,xNew,splinecoeff = 0.):
    """
    Use the scipy spline interpolation, but linearly extrapolate at the edges,
    since scipy.signal.cspline1d assumes periodic boundary conditions
    """
    if len(x) < 4:
        return interpolateLin(y,x,xNew)

    if isinstance(xNew,float):
        wasfloat = 1
        xNew = N.array([xNew])
    else:
        wasfloat = 0

    whereSpline = N.where((xNew >= x[0]) * (xNew <= x[-1]))[0]
    whereLittle = N.where(xNew < x[0])[0]
    whereBig = N.where(xNew >= x[-1])[0]
    
    ans = xNew * 0.
    if len(whereSpline) > 0:
        if isinstance(splinecoeff,float): # not pre-calculated.
            splinecoeff = SS.cspline1d(y)
        ans[whereSpline] = SS.cspline1d_eval(splinecoeff, xNew[whereSpline], dx=x[1]-x[0], x0 = x[0])

    if len(whereLittle) > 0:
        xw = xNew[whereLittle]
        logx,logy = N.log(x[:2]),N.log(y[:2])
        ans[whereLittle] = N.exp(logy[0] + (N.log(xw)-logx[0])/(logx[1]-logx[0])*(logy[1]-logy[0]))


    if wasfloat:
        return ans[0]
    else:
        return ans
 def f( t, *args ):
     for i,arg in enumerate(args): params[ free_params[i] ] = arg
     tshift = params[-1]
     ideal = fmodel( t, *args )
     irf = cspline1d_eval( self.irf_generator, t-tshift, dx=self.irf_dt, x0=self.irf_t0 )
     convoluted = pylab.real(pylab.ifft( pylab.fft(ideal)*pylab.fft(irf) )) # very small imaginary anyway
     return convoluted
    def set_irf( self, irf=None, wraptime=None, dispersion=None ):
        """
        The detector response isn't a delta-function, meaning that what
        you measure isn't the true time-dependence of the system you are
        measuring. It's the time dependence of the system convolved with
        the response of the detector. This method sets the measured
        trace of the detector response that will be used to convolve
        the mult-exponential model before fitting (thereby taking this
        convolution into account without doing nearly-impossible
        numerical deconvolution).
        """
        if isinstance( irf, Trace ):
            self.irf = irf
        elif type(irf) == str:
            self.irf = Trace( irf )
            if wraptime is not None:
                self.irf.wrapcurves( wraptime )
            elif self.wraptime is not None:
                self.irf.wrapcurves( self.wraptime )
        
        self.irf_dispersion = dispersion
        if dispersion is not None:
            # this is meant to address chromatic dispersion within the setup
            # (e.g. optical fiber)
            # don't bother with normalization b/c it gets normalized to unit area below anyway.
            #original = self.irf.curves[0].copy()
            #self.irf.curves[0][dispersion:] += original[:-dispersion]
            #self.irf.curves[0][:-dispersion] += original[dispersion:]
            len1 = len(self.irf.curves[0])
            chain_of_three = pylab.zeros( 3*len1 ) # stack three curves end-to-end so cspline1d_eval doesn't have to extrapolate beyond data
            chain_of_three[:len1] = self.irf.curves[0][:]
            chain_of_three[len1:2*len1] = self.irf.curves[0][:]
            chain_of_three[-len1:] = self.irf.curves[0][:]
            g = cspline1d(chain_of_three)
            smoothed = pylab.zeros( len1 )
            std_dev = dispersion/1000.0
            for t0 in pylab.linspace(-2*std_dev, 2*std_dev, 50):
                weight = pylab.exp( -t0**2/2.0/std_dev**2 )
                smoothed += weight * cspline1d_eval( g, self.irf.t[0]-t0, dx=self.irf.t[0][1], x0=-self.irf.t[0][-1] )
            self.irf.curves[0] = smoothed
            
        normalized = self.irf.curves[0].astype(numpy.float)/float(sum(self.irf.curves[0])) # normalize integral to 1, just like delta-function!!!
        self.irf.curves[0] = normalized.copy()

        self.irf_generator = cspline1d(self.irf.curves[0])
        self.irf_dt = self.irf.t[0][1]-self.irf.t[0][0]
        self.irf_t0 = self.irf.t[0][0]
        
        if False:
            """not sure this matters if we do interpolation
            """
            # difference in degree of binning (e.g. 8ps vs. 4ps is bin difference of 2)
            bin_difference = pylab.np.int( self.resolution / self.irf.resolution )
            if bin_difference != 1:
                raise ValueError("Have not yet tested deconvolution with different resolution than detector trace!!!")
                d = self.irf.curves[0]
                detector_binned = pylab.zeros( len(d)/bin_difference )
                for i in range( len(detector_binned ) ):
                    detector_binned[i] = sum( d[i*bin_difference : i*bin_difference+bin_difference] )
Пример #8
0
def interp_T(T, Ts, PPs):
    xvals = zeros(1)
    xvals[0] = T
    Ps = list()
    for i in PPs:
        cj = cspline1d(i)
        Ps.append(cspline1d_eval(cj, xvals, dx=500.0, x0=Ts[0]))
    return Ps
Пример #9
0
def interp_T(T,Ts,PPs):
    xvals=zeros(1)
    xvals[0]=T
    Ps=list()
    for i in PPs:
        cj=cspline1d(i)
        Ps.append(cspline1d_eval(cj,xvals,dx=500.0,x0=Ts[0]))
    return Ps
Пример #10
0
def wiggle(x, origin=0, posFill='black', negFill=None, lineColor='black', 
        resampleRatio=10, rescale=False, ymin=0, ymax=None, ax=None):
    """Plots a "wiggle" trace
    Input:
        x: input data (1D numpy array)
        origin: (default, 0) value to fill above or below (float)
        posFill: (default, black) color to fill positive wiggles with (string 
            or None)
        negFill: (default, None) color to fill negative wiggles with (string 
            or None)
        lineColor: (default, black) color of wiggle trace (string or None)
        resampleRatio: (default, 10) factor to resample traces by before 
            plotting (1 = raw data) (float)
        rescale: (default, False) If True, rescale "x" to be between -1 and 1
        ymin: (default, 0) The minimum y to use for plotting
        ymax: (default, len(x)) The maximum y to use for plotting
        ax: (default, current axis) The matplotlib axis to plot onto
    Output:
        a matplotlib plot on the current axes
    """
    from matplotlib import pyplot as plt
    from scipy.signal import cspline1d, cspline1d_eval

    if ymax is None:
        ymax = x.size

    # Rescale so that x ranges from -1 to 1
    if rescale:
        x = x.astype(np.float)
        x -= x.min()
        x /= x.ptp()
        x *= 2
        x -= 1

    # Interpolate at resampleRatio x the previous density
    y = np.linspace(0, x.size, x.size)
    interp_y = np.linspace(0, x.size, x.size * resampleRatio)
    cj = cspline1d(x)
    interpX = cspline1d_eval(cj,interp_y) #,dx=1,x0=0
    newy = np.linspace(ymax, ymin, interp_y.size)
    if origin == None: 
        origin = interpX.mean()

    # Plot
    if ax is None:
        ax = plt.gca()
        plt.hold(True)
    if posFill is not None: 
        ax.fill_betweenx(newy, interpX, origin,
                where=interpX > origin,
                facecolor=posFill)
    if negFill is not None:
        ax.fill_betweenx(newy, interpX, origin,
                where=interpX < origin,
                facecolor=negFill)
    if lineColor is not None:
        ax.plot(interpX, newy, color=lineColor)
Пример #11
0
def spline_interpolate(oldx, oldy, newx, smoothing=0.001, **kw):
    """
    newy = spline_interpolate(oldx, oldy, newx)
    1-dimensional cubic spline, for cases where oldx and newx are on a uniform grid.
    """
    return cspline1d_eval(cspline1d(oldy),
                          newx,
                          dx=oldx[1] - oldx[0],
                          x0=oldx[0])
Пример #12
0
def wiggle(x, origin=0, posFill='black', negFill=None, lineColor='black',
        resampleRatio=10, rescale=False, ymin=0, ymax=None, ax=None):
    """Plots a "wiggle" trace
    Input:
        x: input data (1D numpy array)
        origin: (default, 0) value to fill above or below (float)
        posFill: (default, black) color to fill positive wiggles with (string
            or None)
        negFill: (default, None) color to fill negative wiggles with (string
            or None)
        lineColor: (default, black) color of wiggle trace (string or None)
        resampleRatio: (default, 10) factor to resample traces by before
            plotting (1 = raw data) (float)
        rescale: (default, False) If True, rescale "x" to be between -1 and 1
        ymin: (default, 0) The minimum y to use for plotting
        ymax: (default, len(x)) The maximum y to use for plotting
        ax: (default, current axis) The matplotlib axis to plot onto
    Output:
        a matplotlib plot on the current axes
    """
    from matplotlib import pyplot as plt
    from scipy.signal import cspline1d, cspline1d_eval

    if ymax is None:
        ymax = x.size

    # Rescale so that x ranges from -1 to 1
    if rescale:
        x = x.astype(np.float)
        x -= x.min()
        x /= x.ptp()
        x *= 2
        x -= 1

    # Interpolate at resampleRatio x the previous density
    y = np.linspace(0, x.size, x.size)
    interp_y = np.linspace(0, x.size, x.size * resampleRatio)
    cj = cspline1d(x)
    interpX = cspline1d_eval(cj,interp_y) #,dx=1,x0=0
    newy = np.linspace(ymax, ymin, interp_y.size)
    if origin == None:
        origin = interpX.mean()

    # Plot
    if ax is None:
        ax = plt.gca()
        plt.hold(True)
    if posFill is not None:
        ax.fill_betweenx(newy, interpX, origin,
                where=interpX > origin,
                facecolor=posFill)
    if negFill is not None:
        ax.fill_betweenx(newy, interpX, origin,
                where=interpX < origin,
                facecolor=negFill)
    if lineColor is not None:
        ax.plot(interpX, newy, color=lineColor)
Пример #13
0
    def test_basic(self):
        y = array([1, 2, 3, 4, 3, 2, 1, 2, 3.0])
        x = arange(len(y))
        dx = x[1] - x[0]
        cj = signal.cspline1d(y)

        x2 = arange(len(y) * 10.0) / 10.0
        y2 = signal.cspline1d_eval(cj, x2, dx=dx, x0=x[0])

        # make sure interpolated values are on knot points
        assert_array_almost_equal(y2[::10], y, decimal=5)
Пример #14
0
    def test_basic(self):
        y=array([1,2,3,4,3,2,1,2,3.0])
        x=arange(len(y))
        dx=x[1]-x[0]
        cj = signal.cspline1d(y)

        x2=arange(len(y)*10.0)/10.0
        y2=signal.cspline1d_eval(cj, x2, dx=dx,x0=x[0])

        # make sure interpolated values are on knot points
        assert_array_almost_equal(y2[::10], y, decimal=5)
Пример #15
0
 def run(self):
     _, fileExtension = os.path.splitext(self.fileName)
     if fileExtension == '.gmv':
         print('Geomagnetic variation')
         with open(self.fileName, 'rt') as csvdata:
             date = []
             value = []
             for row in csv.reader(csvdata):
                 if ('#' in row[0]):
                     self.header.append(row)
                 else:
                     date.append(row[0])
                     value.append(row[1])
         self.notifyProgress.emit(20)
     elif fileExtension == '.ske':
         print('Kp estimation')
         with open(self.fileName, 'rt') as csvdata:
             date = []
             value = []
             for row in csv.reader(csvdata, delimiter=' '):
                 if ('#' in row[0]):
                     self.header.append(row)
                 else:
                     print(row)
                     if int(row[7]) < 2:
                         date.append(
                             dt.datetime.strptime(
                                 ''.join((row[0], row[1], row[2], row[4])),
                                 '%Y%m%d%H%M')),
                         value.append(float(row[-1]) -
                                      float(row[-14]))  # 4h
                         # value.append(float(row[-1])-float(row[19]))  # 1h
         self.notifyProgress.emit(20)
     signal_src = np.array((date, value), dtype=np.dtype('a25'))
     signal = signal_src[:,
                         np.logical_not(
                             np.isnan(signal_src[1, :].astype(np.float)))]
     self.notifyProgress.emit(60)
     if self.interpolate:
         self.time = signal_src[0, :].astype(np.datetime64).astype(
             dt.datetime)
         dx = dates.date2num(self.time[1]) - dates.date2num(self.time[0])
         cj = cspline1d(signal[1, :].astype(float))
         self.value = cspline1d_eval(cj,
                                     dates.date2num(self.time),
                                     dx=dx,
                                     x0=dates.date2num(self.time[0]))
     else:
         self.time = dates.signal[0, :].astype(np.datetime64).astype(
             dt.datetime)
         self.value = signal[1, :].astype(np.float)
     self.notifyProgress.emit(80)
     self.loaded.emit()
Пример #16
0
def doresample(orig_x, orig_y, new_x, method='cubic', padlen=0, antialias=False):
    """
    Resample data from one spacing to another.  By default, does not apply any antialiasing filter.

    Parameters
    ----------
    orig_x
    orig_y
    new_x
    method
    padlen

    Returns
    -------

    """
    pad_y = tide_filt.padvec(orig_y, padlen=padlen)
    tstep = orig_x[1] - orig_x[0]
    if padlen > 0:
        pad_x = np.concatenate((np.arange(orig_x[0] - padlen * tstep, orig_x[0], tstep),
                                orig_x,
                                np.arange(orig_x[-1] + tstep, orig_x[-1] + tstep * (padlen + 1), tstep)))
    else:
        pad_x = orig_x
    if padlen > 0:
        print('padlen=', padlen)
        print('tstep=', tstep)
        print(pad_x)

    # antialias and ringstop filter
    init_freq = len(pad_x) / (pad_x[-1] - pad_x[0])
    final_freq = len(new_x) / (new_x[-1] - new_x[0])
    if antialias and (init_freq > final_freq):
        aafilterfreq = final_freq / 2.0
        aafilter = tide_filt.noncausalfilter(filtertype='arb', usebutterworth=False)
        aafilter.setarb(0.0, 0.0, 0.95 * aafilterfreq, aafilterfreq)
        pad_y = aafilter.apply(init_freq, pad_y)

    if method == 'cubic':
        cj = signal.cspline1d(pad_y)
        return tide_filt.unpadvec(
            np.float64(signal.cspline1d_eval(cj, new_x, dx=(orig_x[1] - orig_x[0]), x0=orig_x[0])), padlen=padlen)
    elif method == 'quadratic':
        qj = signal.qspline1d(pad_y)
        return tide_filt.unpadvec(
            np.float64(signal.qspline1d_eval(qj, new_x, dx=(orig_x[1] - orig_x[0]), x0=orig_x[0])), padlen=padlen)
    elif method == 'univariate':
        interpolator = sp.interpolate.UnivariateSpline(pad_x, pad_y, k=3, s=0)  # s=0 interpolates
        return tide_filt.unpadvec(np.float64(interpolator(new_x)), padlen=padlen)
    else:
        print('invalid interpolation method')
        return None
Пример #17
0
 def run(self):
     _, fileExtension = os.path.splitext(self.fileName)
     if fileExtension == '.gmv':
         print('Geomagnetic variation')
         with open(self.fileName, 'rt') as csvdata:
             date = []
             value = []
             for row in csv.reader(csvdata):
                 if ('#' in row[0]):
                     self.header.append(row)
                 else:
                     date.append(row[0])
                     value.append(row[1])
         self.notifyProgress.emit(20)
     elif fileExtension == '.ske':
         print('Kp estimation')
         with open(self.fileName, 'rt') as csvdata:
             date = []
             value = []
             for row in csv.reader(csvdata, delimiter=' '):
                 if ('#' in row[0]):
                     self.header.append(row)
                 else:
                     print(row)
                     if int(row[7]) < 2:
                         date.append(
                             dt.datetime.strptime(
                                 ''.join((row[0], row[1], row[2],
                                         row[4])),
                                 '%Y%m%d%H%M')),
                         value.append(float(row[-1])-float(row[-14]))  # 4h
                         # value.append(float(row[-1])-float(row[19]))  # 1h
         self.notifyProgress.emit(20)
     signal_src = np.array((date, value), dtype=np.dtype('a25'))
     signal = signal_src[:, np.logical_not(
         np.isnan(signal_src[1, :].astype(np.float)))]
     self.notifyProgress.emit(60)
     if self.interpolate:
         self.time = signal_src[0, :].astype(np.datetime64).astype(
             dt.datetime)
         dx = dates.date2num(self.time[1])-dates.date2num(self.time[0])
         cj = cspline1d(signal[1, :].astype(float))
         self.value = cspline1d_eval(cj, dates.date2num(self.time),
                                     dx=dx,
                                     x0=dates.date2num(self.time[0]))
     else:
         self.time = dates.signal[0, :].astype(np.datetime64).astype(
             dt.datetime)
         self.value = signal[1, :].astype(np.float)
     self.notifyProgress.emit(80)
     self.loaded.emit()
Пример #18
0
def interpol(x1, y1, x_out, plot=False):
    from scipy.signal import cspline1d, cspline1d_eval
    #assumes that data points are evenly spaced

    dx = x1[1] - x1[0]
    cj = cspline1d(y1)
    y_out = cspline1d_eval(cj, x_out, dx=dx, x0=x1[0])
    if plot:
        from pylab import plot, show, legend
        plot(x_out, y_out, 'ob', x1, y1, 'xg', ms=2)
        legend(['interpolated', 'original'])
        show()

    return y_out
Пример #19
0
def interpol(x1,y1,x_out,plot=False):
	from scipy.signal import cspline1d, cspline1d_eval
	#assumes that data points are evenly spaced

	dx=x1[1]-x1[0]
	cj=cspline1d(y1)
	y_out=cspline1d_eval(cj,x_out,dx=dx,x0=x1[0])
	if plot:
		from pylab import plot, show,legend
		plot(x_out,y_out,'ob',x1,y1,'xg',ms=2)
		legend(['interpolated','original'])
		show()

	return y_out
Пример #20
0
def SplineResample(y, n_x, x_range = None, smoother = 3):
    ''' Resample a signal y using spline.

    y: the signal must be sampled on uniform x
    n_x: number of points for the new signal
    x_range: tuple (start x, end x) of the signal
    smoother: spline smoothing strength
    '''

    if x_range is None: x_range = (0., 1.)
    spcoeffs = cspline1d(y, lamb = smoother)
    return cspline1d_eval(
        spcoeffs, linspace(x_range[0], x_range[1], n_x), 
        dx = (x_range[1]-x_range[0])/(len(y)-1.0), x0 = x_range[0])
Пример #21
0
def interp_around(X_sc,s_fracpeak,s_before,s_after,kind='cubic'):
    n_c = X_sc.shape[1]
    n_s = s_before+s_after
    Out_sc = np.empty((n_s,n_c),dtype=np.float32)
    for i_c in xrange(n_c):
        if kind == 'cubic':
            coeffs = cspline1d(X_sc[:,i_c])
            Out_sc[:,i_c] = cspline1d_eval(coeffs,
                                       newx=np.arange(s_fracpeak - s_before,s_fracpeak+s_after,dtype=np.float32))
        elif kind == "linear":
            Out_sc[:,i_c] = interp1d(np.arange(X_sc.shape[0]),X_sc[:,i_c],
                                     bounds_error=True,kind=kind)(np.arange(s_fracpeak - s_before,s_fracpeak+s_after,dtype=np.float32))
        else: raise Exception("kind must be 'linear' or 'cubic'")
    return Out_sc
Пример #22
0
def SplineResample(y, n_x, x_range=None, smoother=3):
    ''' Resample a signal y using spline.

    y: the signal must be sampled on uniform x
    n_x: number of points for the new signal
    x_range: tuple (start x, end x) of the signal
    smoother: spline smoothing strength
    '''

    if x_range is None: x_range = (0., 1.)
    spcoeffs = cspline1d(y, lamb=smoother)
    return cspline1d_eval(spcoeffs,
                          linspace(x_range[0], x_range[1], n_x),
                          dx=(x_range[1] - x_range[0]) / (len(y) - 1.0),
                          x0=x_range[0])
def evalBeams(arrBeams,guessShearLoad,spc,debugFlag = False):
    
    lastBeam = arrBeams[-1]
    arrStrainEnergy = np.zeros(np.size(arrBeams))
    arrSurfEnergy = np.zeros(np.size(arrBeams))
    lastIndex = np.size(arrBeams)
    maxSurfEnergy = -gamma * lastBeam.w*(lastBeam.Lt - lastBeam.L)
    arrResults = np.array([])
    
    for j,beam in enumerate(arrBeams):
        if debugFlag:
            print('Beam Length: %f of %f'%(beam.L*scale,beam.Lt*scale))
        
        dispSearch(beam=beam,initLoad = guessShearLoad,goal=spc/2/scale,tol=1e-7,right=0,left=0)
        guessShearLoad = beam.shearLoad
        
        if debugFlag:
            print('Solved Beam -- TipDisp: %s Goal: %s Force: %s' % (beam.yTipDisplacement()*scale,spc/2,beam.shearLoad))
        
        arrStrainEnergy[j] = beam.calculateStrainEnergy()
        arrSurfEnergy[j] = -gamma * beam.w *(beam.Lt - beam.L)
        if arrStrainEnergy[j] >= np.abs(maxSurfEnergy): 
            # since there is more bending energy than surface energy stop computing 
            print('Super stiff beam')
            lastIndex = j
            break
    
    if lastIndex > 0:   # This ensures that we have more than one data point before trying to interpolate
        interpLens = np.linspace(arrBeamLens[0],arrBeamLens[lastIndex-1],num=100,endpoint=True) # Generate x values for which to interpolate
        csFit = cspline1d((arrStrainEnergy[0:lastIndex]+arrSurfEnergy[0:lastIndex]))    # Generate cubic spline fit to the sub dataset
        interpTotalEnergy = cspline1d_eval(csFit,interpLens,dx=(arrBeamLens[1]-arrBeamLens[0]), x0 = arrBeamLens[0])    # Generate the interpolated values from the fit and x points
        finalLen = interpLens[interpTotalEnergy.argmin()]   # find the minimum of the energy balance and grab index to choose the appropriate length
        
        if debugFlag:
            print('beamLens shape: %s arrStrain: %s'%(arrBeamLens[0:lastIndex].shape,arrStrainEnergy[0:lastIndex].shape))
            mpl.figure()
            mpl.hold(True)
            mpl.plot(arrBeamLens[0:lastIndex]*scale,arrStrainEnergy[0:lastIndex]*scale)
            mpl.plot(arrBeamLens[0:lastIndex]*scale,arrSurfEnergy[0:lastIndex]*scale)
            mpl.plot(interpLens*scale,interpTotalEnergy*scale,arrBeamLens[0:lastIndex]*scale,(arrStrainEnergy+arrSurfEnergy)[0:lastIndex]*scale,'o')
        arrResults = np.array([arrBeamLens[0:lastIndex],arrStrainEnergy[0:lastIndex]])
    else:   # since there is only one datapoint then use that as the value
        finalLen = arrBeamLens[lastIndex]
        arrResults = np.array([arrBeamLens[lastIndex],arrStrainEnergy[lastIndex]])
    
    
    
    return (finalLen,arrResults)
Пример #24
0
def peakdetect_spline(y_axis, x_axis, pad_len=20):
    """
    Performs a b-spline interpolation on the data to increase resolution and
    send the data to the 'peakdetect_zero_crossing' function for peak
    detection.

    Omitting the x_axis is forbidden as it would make the resulting x_axis
    value silly if it was returned as the index 50.234 or similar.

    will find the same amount of peaks as the 'peakdetect_zero_crossing'
    function, but might result in a more precise value of the peak.

    keyword arguments:
    y_axis -- A list containing the signal over which to find peaks

    x_axis -- A x-axis whose values correspond to the y_axis list and is used
        in the return to specify the position of the peaks.
        x-axis must be equally spaced.

    pad_len -- By how many times the time resolution should be increased by,
        e.g. 1 doubles the resolution.
        (default: 20)


    return: two lists [max_peaks, min_peaks] containing the positive and
        negative peaks respectively. Each cell of the lists contains a tuple
        of: (position, peak_value)
        to get the average peak value do: np.mean(max_peaks, 0)[1] on the
        results to unpack one of the lists into x, y coordinates do:
        x, y = zip(*max_peaks)
    """
    # check input data
    x_axis, y_axis = _datacheck_peakdetect(x_axis, y_axis)
    # could perform a check if x_axis is equally spaced
    #if np.std(np.diff(x_axis)) > 1e-15: raise ValueError
    # perform spline interpolations
    dx = x_axis[1] - x_axis[0]
    x_interpolated = np.linspace(x_axis.min(), x_axis.max(),
                                 len(x_axis) * (pad_len + 1))
    cj = cspline1d(y_axis)
    y_interpolated = cspline1d_eval(cj, x_interpolated, dx=dx, x0=x_axis[0])
    # get peaks
    max_peaks, min_peaks = peakdetect_zero_crossing(y_interpolated,
                                                    x_interpolated)

    return [max_peaks, min_peaks]
Пример #25
0
def sr_interpol2(x,y,ytarget,doplot=0,factor=10):
    dx = x[1]-x[0]
    newx = linspace(min(x),max(x),factor*len(x))

    cj = cspline1d(y)
    newy = cspline1d_eval(cj, newx, dx=dx,x0=x[0])

    ysq = (ytarget-newy)**2
    index = where(ysq == min(ysq))

    if doplot:
        clf()
        plot(x,y,'o')
        plot(newx,newy)
        plot(newx[index],newy[index],'o')
        show()

    return newx[index[0][0]]
Пример #26
0
    def fitSpline(self, degree=2):
        """
        **SUMMARY**

        A function to generate a spline curve fitting over the points in LineScan with
        order of precision given by the parameter degree

        **PARAMETERS**

        * *degree* - the precision of the generated spline 

        **RETURNS**

        The spline as a LineScan fitting over the initial values of LineScan

        **EXAMPLE**

        >>> import matplotlib.pyplot as plt
        >>> img = Image("lenna")
        >>> ls = img.getLineScan(pt1=(10,10)),pt2=(20,20)).normalize()
        >>> spline = ls.fitSpline()
        >>> plt.plot(ls)
        >>> plt.show()
        >>> plt.plot(spline)
        >>> plt.show()
        
        **NOTES**

        Implementation taken from http://www.scipy.org/Cookbook/Interpolation  

        """
        if degree > 4:
            degree = 4  # No significant improvement with respect to time usage
        if degree < 1:
            warnings.warn('LineScan.fitSpline - degree needs to be >= 1')
            return None
        retVal = None
        y = np.array(self)
        x = np.arange(0, len(y), 1)
        dx = 1
        newx = np.arange(0, len(y) - 1, pow(0.1, degree))
        cj = sps.cspline1d(y)
        retVal = sps.cspline1d_eval(cj, newx, dx=dx, x0=x[0])
        return retVal
Пример #27
0
def peakdetect_spline(y_axis, x_axis, pad_len=20):
    """
    Performs a b-spline interpolation on the data to increase resolution and
    send the data to the 'peakdetect_zero_crossing' function for peak 
    detection.
    
    Omitting the x_axis is forbidden as it would make the resulting x_axis
    value silly if it was returned as the index 50.234 or similar.
    
    will find the same amount of peaks as the 'peakdetect_zero_crossing'
    function, but might result in a more precise value of the peak.
    
    keyword arguments:
    y_axis -- A list containing the signal over which to find peaks
    
    x_axis -- A x-axis whose values correspond to the y_axis list and is used
        in the return to specify the position of the peaks. 
        x-axis must be equally spaced.
    
    pad_len -- By how many times the time resolution should be increased by,
        e.g. 1 doubles the resolution.
        (default: 20)
    
    
    return: two lists [max_peaks, min_peaks] containing the positive and
        negative peaks respectively. Each cell of the lists contains a tuple
        of: (position, peak_value) 
        to get the average peak value do: np.mean(max_peaks, 0)[1] on the
        results to unpack one of the lists into x, y coordinates do: 
        x, y = zip(*max_peaks)
    """
    # check input data
    x_axis, y_axis = _datacheck_peakdetect(x_axis, y_axis)
    # could perform a check if x_axis is equally spaced
    #if np.std(np.diff(x_axis)) > 1e-15: raise ValueError
    # perform spline interpolations
    dx = x_axis[1] - x_axis[0]
    x_interpolated = np.linspace(x_axis.min(), x_axis.max(), len(x_axis) * (pad_len + 1))
    cj = cspline1d(y_axis)
    y_interpolated = cspline1d_eval(cj, x_interpolated, dx=dx,x0=x_axis[0])
    # get peaks
    max_peaks, min_peaks = peakdetect_zero_crossing(y_interpolated, x_interpolated)
    
    return [max_peaks, min_peaks]
Пример #28
0
    def fitSpline(self,degree=2):
        """
        **SUMMARY**

        A function to generate a spline curve fitting over the points in LineScan with
        order of precision given by the parameter degree

        **PARAMETERS**

        * *degree* - the precision of the generated spline 

        **RETURNS**

        The spline as a LineScan fitting over the initial values of LineScan

        **EXAMPLE**

        >>> import matplotlib.pyplot as plt
        >>> img = Image("lenna")
        >>> ls = img.getLineScan(pt1=(10,10)),pt2=(20,20)).normalize()
        >>> spline = ls.fitSpline()
        >>> plt.plot(ls)
        >>> plt.show()
        >>> plt.plot(spline)
        >>> plt.show()
        
        **NOTES**

        Implementation taken from http://www.scipy.org/Cookbook/Interpolation  

        """
        if degree > 4:
            degree = 4  # No significant improvement with respect to time usage
        if degree < 1:
            warnings.warn('LineScan.fitSpline - degree needs to be >= 1')
            return None
        retVal = None
        y = np.array(self)
        x = np.arange(0,len(y),1)
        dx = 1
        newx = np.arange(0,len(y)-1,pow(0.1,degree))
        cj = sps.cspline1d(y)
        retVal = sps.cspline1d_eval(cj,newx,dx=dx,x0=x[0])
        return retVal
Пример #29
0
def _spline_interpolate(oldx, oldy, newx, smoothing=0.001,fast=True, **kw):
    """
    cubic splines for axis alignment using
    scipy.signal and/or scipy.interpolate

    newy = _spline_interpolate(oldx, oldy, newx, fast=True)
    if fast = True
       1-dimensional cubic spline for cases where
       oldx and newx are on a uniform grid.
    else
       handles multi-dimensional data, non-uniform x-grids, but is
       much slower for 1d cubic splines
    """
    from scipy.interpolate import splrep, splev
    from scipy.signal import cspline1d, cspline1d_eval
    if fast:
        return cspline1d_eval(cspline1d(oldy), newx, dx=oldx[1]-oldx[0],x0=oldx[0])
    else:
        rep = splrep(oldx,oldy,s=smoothing,full_output=False,**kw)
        return splev(newx, rep)
Пример #30
0
    def wiggle(self, values):
        """
        Plot a trace in VAWT(Variable Area Wiggle Trace)
        """
        if self.zmax is None:
            self.zmax = values.size

        # Rescale so that values ranges from -1 to 1
        if self.rescale:
            values = values.astype(np.float)
            values -= values.min()
            values /= values.ptp()
            values *= 2
            values -= 1

        # Interpolate at resampleRatio x the previous density
        resample_z = np.linspace(0, values.size,
                                 values.size * self.resampleRatio)
        # cubic spline interpolation
        cj = cspline1d(values)
        resample_v = cspline1d_eval(cj, resample_z)
        print(resample_v)
        newz = resample_z
        if self.origin is None:
            self.origin = resample_v.mean()

        # Plot
        if self.posFill is not None:
            self.ax.fill_betweenx(newz,
                                  resample_v,
                                  self.origin,
                                  where=resample_v > self.origin,
                                  facecolor=self.posFill)
        if self.negFill is not None:
            self.ax.fill_betweenx(newz,
                                  resample_v,
                                  self.origin,
                                  where=resample_v < self.origin,
                                  facecolor=self.negFill)
        if self.lineColor is not None:
            self.ax.plot(resample_v, newz, color=self.lineColor, linewidth=.1)
Пример #31
0
def interp_around(X_sc, s_fracpeak, s_before, s_after, kind='cubic'):
    n_c = X_sc.shape[1]
    n_s = s_before + s_after
    Out_sc = np.empty((n_s, n_c), dtype=np.float32)
    for i_c in xrange(n_c):
        if kind == 'cubic':
            coeffs = cspline1d(X_sc[:, i_c])
            Out_sc[:,
                   i_c] = cspline1d_eval(coeffs,
                                         newx=np.arange(s_fracpeak - s_before,
                                                        s_fracpeak + s_after,
                                                        dtype=np.float32))
        elif kind == "linear":
            Out_sc[:,
                   i_c] = interp1d(np.arange(X_sc.shape[0]),
                                   X_sc[:, i_c],
                                   bounds_error=True,
                                   kind=kind)(np.arange(s_fracpeak - s_before,
                                                        s_fracpeak + s_after,
                                                        dtype=np.float32))
        else:
            raise Exception("kind must be 'linear' or 'cubic'")
    return Out_sc
Пример #32
0
def sr_interpol3(x,y,ytarget,doplot=0,factor=10):
    s = numpy.sign(numpy.diff(y)[0])
    if s==1:
        y[numpy.argmax(y)+1:] = 2*abs(max(y))
    else:
        y[numpy.argmin(y)+1:] = -2*abs(max(y))
    dx = x[1]-x[0]
    newx = linspace(min(x),max(x),factor*len(x))

    cj = cspline1d(y)
    newy = cspline1d_eval(cj, newx, dx=dx,x0=x[0])

    ysq = (ytarget-newy)**2
    index = where(ysq == min(ysq))

    if doplot:
        clf()
        plot(x,y,'o')
        plot(newx,newy)
        plot(newx[index],newy[index],'o')
        show()

    return newx[index[0][0]]
Пример #33
0
    def wiggle(self, values):
        """
        Plot a trace in VAWT(Variable Area Wiggle Trace)
        """
        if self.zmax is None:
            self.zmax = values.size

        # Rescale so that values ranges from -1 to 1
        if self.rescale:
            values = values.astype(np.float)
            values -= values.min()
            values /= values.ptp()
            values *= 2
            values -= 1

        # Interpolate at resampleRatio x the previous density
        resample_z = np.linspace(0, values.size, values.size * self.resampleRatio)
        # cubic spline interpolation
        cj = cspline1d(values)
        resample_v = cspline1d_eval(cj, resample_z)
        print(resample_v)
        newz = resample_z
        if self.origin is None:
            self.origin = resample_v.mean()

        # Plot
        if self.posFill is not None:
            self.ax.fill_betweenx(newz, resample_v, self.origin,
                                  where=resample_v > self.origin,
                                  facecolor=self.posFill)
        if self.negFill is not None:
            self.ax.fill_betweenx(newz, resample_v, self.origin,
                                  where=resample_v < self.origin,
                                  facecolor=self.negFill)
        if self.lineColor is not None:
            self.ax.plot(resample_v, newz, color=self.lineColor, linewidth=.1)
Пример #34
0
    def fit_spline(self, degree=2):
        """
        Generates a spline _curve fitting over the points in LineScan with
        order of precision given by the parameter degree.
        :param degree: the precision of the generated spline.
        :return: the spline as a LineScan fitting over the initial values of
                  LineScan
        Notes:
        Implementation taken from http://www.scipy.org/Cookbook/Interpolation
        """
        if degree > 4:
            degree = 4  # No significant improvement with respect to time usage
        if degree < 1:
            warnings.warn("LineScan.fit_spline - degree needs to be >= 1.")
            return None

        y = np.array(self)
        x = np.arange(0, len(y), 1)
        dx = 1
        newx = np.arange(0, len(y) - 1, pow(0.1, degree))
        cj = signal.cspline1d(y)
        ret = signal.cspline1d_eval(cj, newx, dx=dx, x0=x[0])

        return ret
Пример #35
0
npix = (finalpixel - initialpixel) / deltapix + 1.
newwave = initialpixel + deltapix * np.arange(npix)

newflux = np.zeros((len(flux), npix + 1))
chisq = np.zeros(len(flux))

#resample spectra at single wavelength spectrum defined above
smoothing_parameter = 3.0
spline_order = 3
number_of_knots = -1

for p in range(len(flux)):
    nonzero = np.where(wavevector[p, :] != 0.)
    fitcoeff = cspline1d(flux[p], lamb=smoothing_parameter)
    newflux[p, :] = cspline1d_eval(fitcoeff,
                                   newwave,
                                   dx=wave1[p],
                                   x0=wavevector[p, 0])
    oldfit = cspline1d_eval(fitcoeff,
                            wavevector[p, nonzero][0],
                            dx=wave1[p],
                            x0=wavevector[p, 0])
    chisq[p] = np.sum(np.sqrt(
        (oldfit - flux[p])**2. * invvar[p])) / np.shape(flux[p])[0]

filename = 'pcaspectra_rest.fits'
pf.writeto(filename, newflux, clobber=True)
pf.append(filename, newwave)
pf.append(filename, z)

t1 = time.time()
print t1 - t0
Пример #36
0
y = np.sin(t)

# オブジェクト指向型の FITPACK のラッパー
spl1 = interpolate.UnivariateSpline(t, y, s=0)
y1 = spl1(tt)

# UnivariateSpline で s=0 とした場合と同じ
spl2 = interpolate.InterpolatedUnivariateSpline(t, y)
y2 = spl2(tt)

# 非オブジェクト指向型の FITPACK のラッパー
c1 = interpolate.splrep(t, y)
y3 = interpolate.splev(tt, c1)

# 3 次スプライン曲線
c2 = signal.cspline1d(y)
y4 = signal.cspline1d_eval(c2, tt)

# 2 次スプライン曲線
c3 = signal.qspline1d(y)
y5 = signal.qspline1d_eval(c3, tt)

plt.figure()
plt.plot(t, y, "o")
plt.plot(tt, y1)
plt.plot(tt, y2)
plt.plot(tt, y3)
plt.plot(tt, y4)
plt.plot(tt, y5)
plt.show()
Пример #37
0
#
# Example showing how to use B-splines in scipy.signal to do
# interpolation. The input points must be equally spaced to use these
# routine.
#
# <codecell>

from numpy import r_, sin
from scipy.signal import cspline1d, cspline1d_eval

x = r_[0:10]
dx = x[1] - x[0]
newx = r_[-3:13:0.1]  # notice outside the original domain
y = sin(x)
cj = cspline1d(y)
newy = cspline1d_eval(cj, newx, dx=dx, x0=x[0])
from pylab import plot, show

plot(newx, newy, x, y, 'o')
show()

# <markdowncell>

# ![](files/Interpolation_attachments/interpolate_figure1.png
#
# N-D interpolation for equally-spaced data
# =========================================
#
# The scipy.ndimage package also contains spline\_filter and
# map\_coordinates which can be used to perform N-dimensional
# interpolation for equally-spaced data. A two-dimensional example is
Пример #38
0
def doresample(orig_x, orig_y, new_x, method="cubic", padlen=0, antialias=False, debug=False):
    """
    Resample data from one spacing to another.  By default, does not apply any antialiasing filter.

    Parameters
    ----------
    orig_x
    orig_y
    new_x
    method
    padlen

    Returns
    -------

    """
    tstep = orig_x[1] - orig_x[0]
    if padlen > 0:
        rawxpad = np.linspace(0.0, padlen * tstep, num=padlen, endpoint=False)
        frontpad = rawxpad + orig_x[0] - padlen * tstep
        backpad = rawxpad + orig_x[-1] + tstep
        pad_x = np.concatenate((frontpad, orig_x, backpad))
        pad_y = tide_filt.padvec(orig_y, padlen=padlen)
    else:
        pad_x = orig_x
        pad_y = orig_y

    if debug:
        print("padlen=", padlen)
        print("tstep=", tstep)
        print("lens:", len(pad_x), len(pad_y))
        print(pad_x)
        print(pad_y)
        fig = pl.figure()
        ax = fig.add_subplot(111)
        ax.set_title("Original and padded vector")
        pl.plot(orig_x, orig_y + 1.0, pad_x, pad_y)
        pl.show()

    # antialias and ringstop filter
    init_freq = len(pad_x) / (pad_x[-1] - pad_x[0])
    final_freq = len(new_x) / (new_x[-1] - new_x[0])
    if antialias and (init_freq > final_freq):
        aafilterfreq = final_freq / 2.0
        aafilter = tide_filt.NoncausalFilter(filtertype="arb", transferfunc="trapezoidal")
        aafilter.setfreqs(0.0, 0.0, 0.95 * aafilterfreq, aafilterfreq)
        pad_y = aafilter.apply(init_freq, pad_y)

    if method == "cubic":
        cj = signal.cspline1d(pad_y)
        # return tide_filt.unpadvec(
        #   np.float64(signal.cspline1d_eval(cj, new_x, dx=(orig_x[1] - orig_x[0]), x0=orig_x[0])), padlen=padlen)
        return signal.cspline1d_eval(cj, new_x, dx=(orig_x[1] - orig_x[0]), x0=orig_x[0])
    elif method == "quadratic":
        qj = signal.qspline1d(pad_y)
        # return tide_filt.unpadvec(
        #    np.float64(signal.qspline1d_eval(qj, new_x, dx=(orig_x[1] - orig_x[0]), x0=orig_x[0])), padlen=padlen)
        return signal.qspline1d_eval(qj, new_x, dx=(orig_x[1] - orig_x[0]), x0=orig_x[0])
    elif method == "univariate":
        interpolator = sp.interpolate.UnivariateSpline(pad_x, pad_y, k=3, s=0)  # s=0 interpolates
        # return tide_filt.unpadvec(np.float64(interpolator(new_x)), padlen=padlen)
        return np.float64(interpolator(new_x))
    else:
        print("invalid interpolation method")
        return None
Пример #39
0
def interp(dx, knots, xis):
    cspl = cspline1d(knots)  #cupic spline coefficients
    return cspline1d_eval(cspl, xis, dx=dx, x0=0.0)
def long_time_errorbars( fnames, fv, frac=0.1, additional_fixed=None, plotresult=False ):
    # finding error bar in the "fixed" parameters... I think this is similar to what
    # I did above but with average RChi2 across all traces
    # I tried to recalculate a lower Fx value because DOF increased from 3282 to 3282*len(fnames),
    # but the calc. only could handle <10000 DOF, and Fx only changed from 1.001858 to 1.001845.

    lkeys = ['l0','l1','l2','l3']
    akeys = ['a0','a1','a2','a3']
    fixedparams = [fv]
    if additional_fixed is not None: fixedparams += additional_fixed
    #Fx = 1.001845 # Threshold found from F-statistic on 9999 points. (max of calculator, faking 3282*21).
    Fx = 1.001858 # Threshold found from F-statistic on 3282 pts.
    Fx_list = [] # later we'll sort these in order of increasing parameter value to enable interpolation

    # first get best_avg_RChi2:
    bestfits = []
    chi2 = []
    for fname in fnames:
        bestfit, bestacorr = load_wire( fname )
        bestfits.append( bestfit )
        chi2.append( bestfit['ReducedChi2'] )
    best_avg_RChi2 = np.mean(chi2)
    
    # setup initial coarse scan
    assert bestfit.has_key("l3")
    fv_best = bestfit[fv]
    val_step = frac*fv_best/2.0
    argvals = arange( fv_best*(1.0-frac), fv_best*(1.0+frac)+val_step, val_step )

    def fmin_kernel( twoT, *args ):
        avg_RChi2 = 0.0
        fv_value = args[0]
        longT_floating = [ key for key in ['l1','l2','l3'] if key != fv ] # the two longTime components we're optimizing
        chi2 = []
        for i,fname in enumerate(fnames):
            bestfit = bestfits[i].copy()
            bestfit[ fv ] = fv_value
            bestfit[ longT_floating[0] ] = twoT[0]
            bestfit[ longT_floating[1] ] = twoT[1]
            l = [ bestfit[key] for key in lkeys ]
            a = [ bestfit[key] for key in akeys ]
            irf = bestfit['irf_dispersion']
             # all three long-components are fixed for this fit, but the
             # values they are fixed at are set at different levels:
             # fv is the component we're finding an errorbar for, and
             # it is set by the function long_time_errorbars.
             # The other two are allowed to "float" in response, but
             # not float freely for each trace individually; we're
             # looking for an error bar for the global fit across all
             # cavities on a given sample, so we constrain them for each
             # individual fit but let fmin play with them to minimize
             # the mean reduced Chi squared across all data sets.
            params = do_fit( fname, l, a, ['l1','l2','l3'], irf )
            chi2.append( params['ReducedChi2'] )
        return np.mean(chi2)

    # do a coarse (5-pt) run across the data
    twoT_guess = [ bestfit[key] for key in ['l1','l2','l3'] if key != fv ] # the two longTime components we're optimizing
    for val in argvals:
        res = fmin( fmin_kernel, twoT_guess, args=(val,), xtol=0.005, ftol=0.005, full_output=1 )
        avg_RChi2 = res[1]
        Fx_list.append( [val, avg_RChi2/best_avg_RChi2] )

    assert not all(array(Fx_list)[:,1]>Fx)

    # if the left side (low param value) didn't exceed Fx threshold, extend
    val = argvals[0]
    while Fx_list[0][1] < Fx:
        val -= val_step
        if val < 0: 
            if fv in ['l1', 'l2', 'l3']:
                raise ValueError("long-time component just went negative...")
            else:
                break
        res = fmin( fmin_kernel, twoT_guess, args=(val,), xtol=0.005, ftol=0.005, full_output=1 )
        avg_RChi2 = res[1]
        Fx_list.append( [val, avg_RChi2/best_avg_RChi2] )
        Fx_list.sort( key=lambda x: x[0] ) # sort by first element (parameter value)

    # if the right side (high param value) didn't exceed Fx threshold, extend
    val = argvals[-1]
    while Fx_list[-1][1] < Fx:
        val += val_step
        res = fmin( fmin_kernel, twoT_guess, args=(val,), xtol=0.005, ftol=0.005, full_output=1 )
        avg_RChi2 = res[1]
        Fx_list.append( [val, avg_RChi2/best_avg_RChi2] )
        Fx_list.sort( key=lambda x: x[0] ) # sort by first element (parameter value)


    # interpolate to find values at threshold
    Fx_array = array( Fx_list )
    splines = cspline1d( Fx_array[:,1] )
    interp_val = linspace( Fx_array[0,0], Fx_array[-1,0], 500 )
    interp_Fx = cspline1d_eval( splines, interp_val, dx=val_step, x0=Fx_array[:,0].min() )
    error_bar = [ interp_val[find(interp_Fx<Fx)[0]], interp_val[find(interp_Fx<Fx)[-1]] ]

    if plotresult:
        fig = figure(1)
        #fig.clf()
        ax_chi = gca()
        #ax_chi = fig.add_subplot(111)
        #ax_chi.cla()
        ax_chi.plot( interp_val, interp_Fx, label=fv )
        ax_chi.plot( interp_val, [Fx]*len(interp_val), '--k' )
        ax_chi.plot( Fx_array[:,0], Fx_array[:,1], 'sk' )
        ax_chi.plot( error_bar, [Fx]*2, '-k', lw=3.0 )

        #ax_chi.set_ylim([0.99, 1.01])
        fig.show()
        fig.canvas.draw()

    return error_bar
def get_errorbar(fv,
                 fname=None,
                 trace=None,
                 frac=0.1,
                 additional_fixed=None,
                 plotresult=False,
                 guess=dict()):
    """ Use Lakowicz's f-statistic method to find the 95% confidence interval on a fitting parameter.
        fv -- a string/dictionary key identifying the parameter to find a confidence interval on.
        fname -- (optional, can use trace instead) fname of data file to fit.
        trace -- can use trace instead of fname to pass in info on wraptime and irf fname, etc.
        frac -- the fractional change in the parameter to use for the intial 5-point mesh
        additional_fixed -- any parameters you want to hold fixed during the fitting
        plotresult -- plot the f-statistic curve?
        guess -- dictionary of initial parameters for the fit (by default, best-fit values are used)
    """
    #bestfit, bestacorr = load_wire( fname )
    if fname is not None:
        raise ValueError(
            'have modified this to prefer working with actual traces to forward wraptime, etc., to do_fit'
        )
        bestfit = load_wire(fname)
    if trace is not None:
        assert fname == None
        bestfit = trace.fitresults.copy()
    bestChi2 = bestfit['ReducedChi2']
    irf = bestfit['irf_dispersion']
    lkeys = ['l0']
    akeys = ['a0']
    if bestfit.has_key('l1'):
        lkeys.append('l1')
        akeys.append('a1')
    if bestfit.has_key('l2'):
        lkeys.append('l2')
        akeys.append('a2')
    if bestfit.has_key('l3'):
        lkeys.append('l3')
        akeys.append('a3')
    fixedparams = [fv]
    if additional_fixed is not None: fixedparams += additional_fixed
    fv_best = bestfit[fv]

    # If holding l1-l3 lifetimes fixed, as we did
    # for the actual fit, then Fx should equal 1.0 at the 'bestfit' value of the parameter.
    # But if l1-l3 are allowed to vary (making a more conservative error bar), then
    # Fx may be lower than 1.0, and the minimum may not be at the "bestfit" value.
    # This is to say that the bestfit of the constrained fit may not be the bestfit of the unconstrained fit.

    #Fx = 1.0038 # Threshold found from F-statistic 1600 deg. freedom (~half trace points, following Fundamentals Of Fluorescence Spectroscopy)
    Fx = 1.001858  # Threshold found from F-statistic on all 3282 points. Seems easier to justify
    Fx_list = [
    ]  # later we'll sort these in order of increasing parameter value to enable interpolation
    val_step = frac * fv_best / 2
    argvals = arange(fv_best * (1.0 - frac),
                     fv_best * (1.0 + frac) + val_step, val_step)

    # do a coarse (5-pt) run across the data
    for val in argvals:
        bestfit[fv] = val
        l = [bestfit[key] for key in lkeys]
        a = [bestfit[key] for key in akeys]
        params = do_fit(trace, l, a, fixedparams, irf, guess)
        Fx_list.append([val, params['ReducedChi2'] / bestChi2])

    if all(array(Fx_list)[:, 1] > Fx):
        Fx_list = []
        for val in argvals:
            bestfit[fv] = val
            cpy = bestfit.copy()
            for key in lkeys:
                if key not in fixedparams: cpy[key] = 1.25 * bestfit[key]
            for key in akeys:
                if key not in fixedparams: cpy[key] = 1.5 * bestfit[key]
            l = [cpy[key] for key in lkeys]
            a = [cpy[key] for key in akeys]
            params = do_fit(trace, l, a, fixedparams, irf, guess)
            Fx_list.append([val, params['ReducedChi2'] / bestChi2])

    if all(array(Fx_list)[:, 1] > Fx):
        raise ValueError("Problem fitting: always above Fx. Min: %f" %
                         (array(Fx_list)[:, 1].min()))

    # if the left side (low param value) didn't exceed Fx threshold, extend
    val = argvals[0]
    while Fx_list[0][1] < Fx:
        val -= val_step
        if val < 0:
            if fv in ['l1', 'l2', 'l3']:
                raise ValueError("long-time component just went negative...")
            else:
                break
        bestfit[fv] = val
        l = [bestfit[key] for key in lkeys]
        a = [bestfit[key] for key in akeys]
        params = do_fit(trace, l, a, fixedparams, irf, guess)
        Fx_list.append([val, params['ReducedChi2'] / bestChi2])
        Fx_list.sort(
            key=lambda x: x[0])  # sort by first element (parameter value)

    # if the right side (high param value) didn't exceed Fx threshold, extend
    val = argvals[-1]
    while Fx_list[-1][1] < Fx:
        val += val_step
        bestfit[fv] = val
        l = [bestfit[key] for key in lkeys]
        a = [bestfit[key] for key in akeys]
        params = do_fit(trace, l, a, fixedparams, irf, guess)
        Fx_list.append([val, params['ReducedChi2'] / bestChi2])
        Fx_list.sort(
            key=lambda x: x[0])  # sort by first element (parameter value)

    # interpolate to find values at threshold
    Fx_array = array(Fx_list)
    splines = cspline1d(Fx_array[:, 1])
    interp_val = linspace(Fx_array[0, 0], Fx_array[-1, 0], 500)
    interp_Fx = cspline1d_eval(splines,
                               interp_val,
                               dx=val_step,
                               x0=Fx_array[:, 0].min())
    error_bar = [
        interp_val[find(interp_Fx < Fx)[0]],
        interp_val[find(interp_Fx < Fx)[-1]]
    ]

    if plotresult:
        fig = figure(1)
        #fig.clf()
        ax_chi = fig.add_subplot(111)
        ax_chi.cla()
        ax_chi.plot(interp_val, interp_Fx, '-k')
        ax_chi.plot(interp_val, [Fx] * len(interp_val), '--k')
        ax_chi.plot(Fx_array[:, 0], Fx_array[:, 1], 'sg')
        ax_chi.plot(error_bar, [Fx] * 2, '-b', lw=3.0)

        ax_chi.set_ylim([0.99, 1.01])
        fig.show()
        fig.canvas.draw()

    return error_bar
Пример #42
0
def compute(chifile, peaksfile, Tfil, delta, ksq, writefile, minp, maxp,
            lamda):
    xxs, yys = readchi(chifile)

    #####################
    #Read in the peaks file
    found = False
    peaksx = list()
    peaksy = list()
    approx_p = 0.0
    if not (peaksfile is None):
        f = open(peaksfile, 'r')
        for line in f:
            if line[0] == '#':
                continue
            line = line.lstrip().rstrip()
            [fname, Ttemp, lamda, approx_p, line] = line.split(None, 4)
            if fname == chifile:
                lamda = float(lamda)
                approx_p = float(approx_p)
                if Tfil is None:
                    Tfil = float(Ttemp)
                found = True
                break
        if found:
            val = line.split(None, 2)
            if val[0] == '0' or val[0] == '-1':  #No information on peaks
                startloc = 0
            else:
                startloc = float(val[0])  #get the new peak info
                [peaksx.append(float(i)) for i in val[1].split(',')]
                peaksx.sort()
                i = 0
                px = peaksx[i]
                for (j, xx) in enumerate(xxs):
                    if px < xx:
                        peaksy.append(((xx - px) * yys[j] +
                                       (px - xxs[j - 1]) * yys[j - 1]) /
                                      (xxs[j] - xxs[j - 1]))
                        i += 1
                        if i == len(peaksx):
                            break
                        px = peaksx[i]
        else:
            print "\nError: Unable to find chi file " + chifile + " in peaks file " + peaksfile
            print "\nTry calling again without the peaks file arguement."
            exit(0)
    else:
        startloc = xxs[0]

    #####################
    #Load in the MgO data
    mgo_a0 = 4.213
    (ARs, Ts, PPs) = loadMgO('/home/acadien/Documents/ascwork/EOS/mgo_eos.dat')
    As = [i * mgo_a0 for i in ARs]
    if Tfil == 0.0:
        Ps = PPs[:, 0]
    elif Tfil == 300.0:
        As.insert(0, mgo_a0)
        Ps = PPs[:, 1].tolist()
        Ps.insert(0, 1.0)
    else:
        Ps = interp_T(Tfil, Ts[2:], PPs[:, 2:])

    #####################
    #Analyze the spectrum: find the peaks
    inp = list()

    startloc = max(startloc, 5.0)
    for (ind, xx) in enumerate(xxs):
        if xx >= startloc:
            break
    strtind = ind

    while True:
        ind = localMax(yys, ind, delta)
        if ind == -1:
            break
        else:
            mxslp = max(
                [fabs(yys[j] - yys[j + 1]) for j in range(ind - 5, ind + 5)])
            if mxslp > 0.1:
                inp.append(ind)

    rind = 50
    yreverse = yys[::-1]
    while True:
        rind = localMax(yreverse, rind, delta)
        if len(xxs) - rind >= strtind:
            break
        if rind == -1:
            break
        else:
            ind = len(yys) - rind
            found = False
            for pk in inp:
                if abs(pk - ind) < 3:
                    found = True
                    break
            if found == False:
                if ind < 50:
                    continue
                mxslp = max([
                    fabs(yys[j] - yys[j + 1]) for j in range(ind - 5, ind + 5)
                ])
                if mxslp > 0.1:
                    inp.append(ind)

    inp.sort()

    #####################
    #Plot normal peaks
    #Draw the blue peaks first to be overwritten by red peaks
    pl.figure()
    pl.plot(xxs, yys, ls='dotted')
    xx = [xxs[i] for i in inp]
    yy = [yys[i] for i in inp]
    [pl.scatter(a, b) for (a, b) in zip(xx, yy)]
    [pl.scatter(a, b) for (a, b) in zip(peaksx, peaksy)]
    pl.title(chifile + ", T=" + str(Tfil) + ", Kmag=sqrt(" + str(ksq) + ")")

    #####################
    #Calculate the MgO Peaks at this pressure
    #Get the MgO lattice constant for the desired pressure
    mgo_s = UnivariateSpline(Ps, As)
    print Ps
    mgo_sps = linspace(min(Ps), max(Ps), len(Ps))
    mgo_sas = mgo_s(mgo_sps)
    coefs = cspline1d(mgo_sas)
    latconst = cspline1d_eval(coefs, [approx_p],
                              dx=mgo_sps[1] - mgo_sps[0],
                              x0=mgo_sps[0])[0]

    #Get the intensity factors
    mgo_ks = [3, 4, 8, 9, 11, 12]  #Valid K vectors for MgO
    mgo_mp = [8.0, 6.0, 12.0, 6.0, 24.0, 8.0]  #Multiplicity
    mgo_sf = [1.0, 3.0, 3.0, 1.0, 1.0, 3.0]  #Structure Factor
    mgo_Im = [(mgo_sf[i] * mgo_sf[i]) / mgo_mp[i]
              for i in range(len(mgo_sf))]  #Intensity multiplier

    #Find the MgO peaks at this pressure
    mgo_2th = list()
    mgo_I = list()
    for (i, K) in enumerate(mgo_ks):
        theta = asin(lamda * sqrt(float(K)) * 0.5 / latconst)
        mgo_2th.append(degrees(theta) * 2)
        mgo_I.append(mgo_Im[i] * (1 + cos(theta * 2) * cos(theta * 2)) /
                     (sin(theta) * sin(theta) * cos(theta)))
    mxI = max(mgo_I)
    mxA = sum(xxs) / len(xxs)
    mgo_I = [(i / mxI + 1) * mxA * mxA for i in mgo_I]

    #####################
    #Find possible peak matches
    s = UnivariateSpline(As, Ps)
    sas = linspace(min(As), max(As), len(As))
    sps = s(sas)
    coefs = cspline1d(sps)
    pressure = list()
    ks = list()
    theas = list()
    thetas = list()

    peaksx.extend([xxs[i] for i in inp])
    peaksy.extend([yys[i] for i in inp])

    for j in ksq:  #for each k value given

        thepeak = mgo_2th[mgo_ks.index(next(p for p in mgo_ks if int(j) == p))]
        i = findnearest(peaksx, thepeak)

        #####################
        #Refine the peak using a Spline fit
        ind = findnearest(xxs, peaksx[i])
        lowbnd = ind - 10
        upbnd = ind + 10
        #[strt,end]=findpeakbounds(yys,findnearest(xxs,peaksx[i]),lowbnd,upbnd)
        #[xd,yd]=splinefit(xxs[strt:end],yys[strt:end],10*(end-strt))

        #pl.plot(xd,yd)
        pl.plot([thepeak, thepeak], [0, peaksy[i]], color='red')
        pl.text(thepeak, 5, str(j))

        #pkind=posmax(yd)
        #(theta2,intens)=(xd[pkind],yd[pkind])
        (theta2, intens) = (xxs[ind], yys[ind])
        """if round(theta2,2)==8.84: theta2=8.73"""

        d = lamda / (2.0 * sin(radians(theta2) / 2.0))

        a = d * sqrt(float(j))
        if (a > min(As) - tol and a < max(As) + tol):
            #Found a valid MgO peak
            pres = cspline1d_eval(coefs, [a], dx=sas[1] - sas[0], x0=sas[0])[0]
            if pres < minp or pres > maxp:
                continue
            ks.append(j)
            theas.append(a)
            thetas.append(theta2)

            pressure.append(pres)
            pl.scatter(theta2, intens, s=30, c='red', marker='o')
            pl.text(theta2,
                    intens,
                    str(round(theta2, 2)),
                    position=(theta2, intens),
                    size='x-small')

    #####################
    #Print Results

    print "Possible Matches near approximated pressure " + str(approx_p)
    print "kmag\t| theta2(deg)\t| pressure(GPa)\t| a (Angstroms"
    for i in range(len(ks)):
        print str(ks[i]) + "\t| " + str(round(thetas[i], 5)) + "  \t| " + str(
            round(pressure[i], 2)) + "   \t| " + str(round(theas[i], 5))
    avgp = sum(pressure) / len(pressure)
    print "At temperature " + str(Tfil) + "K"
    err = max([fabs(i - avgp) for i in pressure])
    print "Average Pressure: " + str(round(avgp, 2)) + "GPa, Error: " + str(
        round(err, 2)) + "GPa, StdDev: " + str(round(std(pressure), 2))

    pl.show()

    ######################
    #Write results to file
    if writefile is not None:
        print "Appending results to file " + writefile + "."
        file = open(writefile, 'a')
        file.write(chifile + "\t" + str(Tfil) + "\t" + str(round(avgp, 2)) +
                   "\t" + str(round(err, 2)) + "\t" + str(ks) + "\t" +
                   str([str(round(i, 2)) for i in pressure]) + "\n")
Пример #43
0
Файл: med.py Проект: FHe/tdl
def spline_interpolate(oldx, oldy, newx, smoothing=0.001, **kw):
    """
    newy = spline_interpolate(oldx, oldy, newx)
    1-dimensional cubic spline, for cases where oldx and newx are on a uniform grid.
    """
    return cspline1d_eval(cspline1d(oldy), newx, dx=oldx[1]-oldx[0],x0=oldx[0])
Пример #44
0
def compute():
    print ""

    #####################
    #Read in the chi file
    xxs, yys = readchi(chifilename)
    s2ti = int(sum([1 for i in xxs
                    if i < s2t]))  #index of the starting location

    #####################
    #Load in the MgO data
    mgo_a0 = 4.213
    import os
    (ARs, Ts,
     PPs) = loadMgO(os.getenv("HOME") + '/Documents/ascwork/EOS/mgo_eos.dat')
    As = [i * mgo_a0 for i in ARs]
    if Tval == 0.0:
        Ps = PPs[:, 0]
    elif Tval == 300.0:
        As.insert(0, mgo_a0)
        Ps = PPs[:, 1].tolist()
        Ps.insert(0, 1.0)
    else:
        Ps = interp_T(Tval, Ts[2:], PPs[:, 2:])

    #####################
    #Analyze the spectrum: find the peaks
    ind = s2ti
    inpleft = list()
    inpright = list()
    while True:
        ind = localMax(yys, ind, delta)
        if ind == -1:
            break
        else:
            inpleft.append(ind)

    rind = s2ti
    yreverse = yys[::-1]
    while True:
        rind = localMax(yreverse, rind, delta)

        if rind == -1 or rind < s2ti:
            break
        else:
            ind = len(yys) - rind - 1
            inpright.append(ind)

    ysmth = windowavg(yys, 15)
    ind = s2ti
    while True:
        ind = localMax(ysmth, ind, delta)
        if ind == -1:
            break
        else:
            inpleft.append(ind)

    inp = list()
    for i in inpright:
        possibles = [j for j in inpleft if abs(i - j) < 5]
        if len(possibles) > 0:
            inp.append(sum(possibles) / len(possibles))

    inp = sort(list(set(inp)))

    #Write the current peak locations to a file, to be altered by user
    towrite = "peak   leftbnd   rightbnd\n"
    towrite += "".join([str(xxs[i]) + " \n" for i in inp])
    fname = "peaks_" + chifilename.split("_")[2].split(".")[0] + ".dat"

    #Check if the file already exists, if so don't overwrite it!
    exists = False
    try:
        open(fname, "r")
        [centerxsold, startxsold, endxsold] = readpeaks(fname)
        centers2 = theta2index(xxs, centerxsold)
        starts2 = theta2index(xxs, startxsold)
        ends2 = theta2index(xxs, endxsold)
        exists = True
    except IOError as e:
        print "%s File doesn't exist creating new one." % fname
        open(fname, "w").write(towrite)
    except ValueError as e:
        pass

    #Begin interactive portion
    print "List the starting and ending points of the peaks in the file %s." % fname
    print "You can add or delete peaks as needed from here."
    print "Close the graph when done."
    p = Process(target=openemacs, args=fname)
    p.start()

    #Open up a plot so the user can select the peaks
    pl.plot(xxs, yys)
    if exists:
        for i in centers2:
            pl.scatter(xxs[i], yys[i], marker="o")
        for i in starts2:
            pl.text(xxs[i], yys[i], "S")
        for i in ends2:
            pl.text(xxs[i], yys[i], "E")
        pl.legend(["diff pattern", "center", "start", "end"])
    else:
        for i in inp:
            pl.scatter(xxs[i], yys[i])
    pl.show()
    p.join()
    print "Thanks, got it."

    #Read in the newly user generated peaks file
    [centerxs, startxs, endxs] = readpeaks(fname)
    pcenters = theta2index(xxs, centerxs)
    pstarts = theta2index(xxs, startxs)
    pends = theta2index(xxs, endxs)
    peaks = [[pcenters[i], list(),
              list(), list(), list(), 0] for i in range(len(pcenters))]

    #Check if there is enough space between the two points.
    badgap = False
    for i in range(len(pcenters)):
        if pends[i] - pstarts[i] < 8:
            badgap = True
            print ""
            print "Error not enough space between start and end points on peak #%d at %g." % (
                i + 1, xxs[pcenters[i]])
            print "Gap must be at least 7 points wide otherwise fit will be inaccurate."
    if badgap: exit()
    """
    #####################
    #Windowed average to smooth out the plot, useful for multi-peak analysis
    yysmth=windowavg(yys,11)

    #####################
    #Find the groups of peaks from the smooth plot
        (pstarts,pends,pgroups)=findclusterbounds(yysmth,inp)
        ingroup=list()
        for i in inp:
            found=False
            for group in pgroups:
                for j in group:
                    if i==j:
                        found=True
                        break
                if found:
                    break
            if not(found):
                ingroup.append(False)
            else:
                ingroup.append(True)

        ####################
        #Transfer the group peaks location from the smooth plot to the original
        for g in range(len(pgroups)):
            for s in range(len(pgroups[g])):
                smthpeak=xxs[pgroups[g][s]]
                for i in range(len(inp)):
                    if abs(smthpeak-xxs[inp[i]]) < 0.05:
                        pgroups[g][s]=i
                        break
        """
    mpeaks = list()
    for i in range(len(peaks)):
        print "Fitting peak at %g.\n" % xxs[ind]
        ind = peaks[i][0]
        start = pstarts[i]
        end = pends[i]
        [xdata, ydata, coefs] = glinitguess(xxs[start:end], yys[start:end])
        peaks[i][4] = coefs
        [peaks[i][2], peaks[i][3]] = glfit(xdata, ydata, coefs)
        peaks[i][1] = xdata
        peaks[i][5] = localMax(peaks[i][2], 0, 1)  #new peak
        """
        #####################
        #Further Refine peaks by seperating out convoluted peaks

        for i in range(len(pgroups)):
            mpeaks.append(list())
            (start,end,group)=(pstarts[i],pends[i],pgroups[i])
            groupcoefs=list()
            for p in group:
                groupcoefs.append(peaks[p][4])
            [xdata,ydata,initlsq]=mglinitguess(xxs[start:end],yys[start:end],groupcoefs)
            [mgly,lsq]=mglfit(xdata,ydata,initlsq,len(groupcoefs))
            mglx=xdata
            ilsq=extractpks(lsq,len(group))

            for j in range(len(group)):
                peaks[group[j]][3]=ilsq[j]
                [peaks[group[j]][1],peaks[group[j]][2]]=[xdata,glval(xdata,ilsq[j])]
                refpk=localMax(peaks[group[j]][2],0,1)
                peaks[group[j]][5]=refpk
            mpeaks[i].append(mglx)
            mpeaks[i].append(mgly)
        """

    #####################
    #Plot normal peaks
    #Draw the blue peaks first to be overwritten by red peaks
    pl.figure()
    pl.plot(xxs, yys, ls='dotted')
    xx = [xxs[i[0]] for i in peaks]
    yy = [yys[i[0]] for i in peaks]
    [pl.scatter(xx[i], yy[i], label=str(xx[i])) for i in range(len(peaks))]
    #[pl.text(xx[i],yy[i],str(round(xx[i],2)),position=(xx[i],yy[i]),size='x-small') for i in range(len(peaks))]
    pl.title(chifilename + ", T=" + str(Tval) + ", Kmag=sqrt(" + str(ksq) +
             ")")
    #Plot fitted peaks
    for i in range(len(peaks)):
        pl.plot(peaks[i][1], peaks[i][2])
    for i in range(len(mpeaks)):
        pl.plot(mpeaks[i][0], mpeaks[i][1], ls='dashed')

    #Plot initial guesses
    #for i in range(len(peaks)):
    #    pl.plot(peaks[i][1],glval(peaks[i][1],peaks[i][4]))
    #    print peaks[i][4]
    #for i in range(len(mpeaks)):
    #    pl.plot(mpeaks[i][0],mpeaks[i][1],ls='dashed')

    #####################
    #Find possible peak matches
    s = UnivariateSpline(As, Ps)
    sas = linspace(min(As), max(As), len(As))
    sps = s(sas)
    coefs = cspline1d(sps)
    pressure = list()
    ks = list()
    theas = list()
    thetas = list()
    i = 0
    for (ind, xgf, ygf, lsq, initlsq, npk) in peaks:  #for each peak found
        theta2 = xgf[npk]
        intens = ygf[npk]

        i += 1

        d = lamda / (2.0 * sin(radians(theta2) / 2.0))

        for j in ksq:  #for each k value given
            a = d * sqrt(float(j))
            if (a > min(As) - tol and a < max(As) + tol):
                #######################
                #Found an MgO peak
                ks.append(j)
                theas.append(a)
                thetas.append(theta2)

                pressure.append(
                    cspline1d_eval(coefs, [a], dx=sas[1] - sas[0], x0=sas[0]))
                pl.scatter(theta2, intens, s=30, c='red', marker='o')
                pl.text(theta2,
                        intens - 0.5,
                        str(round(theta2, 5)),
                        position=(theta2, intens),
                        size='x-small')

                #ksq.remove(j)
                #break

    #####################
    #Print Results

    print "Possible Matches:"
    print "kmag\t| theta2(deg)\t| pressure(GPa)\t| a (Angstroms"
    for i in range(len(ks)):
        print str(ks[i]) + "\t| " + str(round(thetas[i], 5)) + "  \t| " + str(
            round(pressure[i][0], 2)) + "   \t| " + str(round(theas[i], 5))
    avgp = sum(pressure) / len(pressure)
    print "At temperature " + str(Tval) + "K"
    err = max([fabs(i - avgp) for i in pressure])
    print "Average Pressure: " + str(round(avgp, 2)) + "GPa, Error: " + str(
        round(err, 3)) + "GPa, StdDev: " + str(round(std(pressure), 3))

    pl.show()
Пример #45
0
#!/usr/bin/python

#coding: utf-8

from numpy import arange, cos, sin

#duas funcoes do numpy para processamento de sinais
from scipy.signal import cspline1d, cspline1d_eval

#duas funcoes do Matplotlib  para gerar um grafico
from pylab import plot, show

x0 = arange(20)
y0 = cos(x0) * sin(x0 / 2)  #Y a partir de x
dx = x0[1] - x0[0]  #diferenca original
x1 = arange(-1, 21, 0.1)

#coeficiente para arranjo de uma dimensao
cj = cspline1d(y0)

#Avalia o Spline para um novo conjunto de pontos
y1 = cspline1d_eval(cj, x1, dx=dx, x0=x0[0])

plot(x1, y1, '-g', x0, y0, '^y')  #Desenha
show()  #Mostra o grafico
Пример #46
0
def compute(chifile, Tfil, pressure, lamda):
    #####################
    #Get lamda and T from the temperature file
    try:
        Tfil = float(Tfil)
    except ValueError:
        fndT = False
        tlist = open(Tfil, "r")
        for line in tlist:
            if line[0] == '#':
                continue
            line = line.rstrip()
            vals = line.split(None)
            if str(vals[0]) == chifile:
                Tfil = int(vals[1])
                lamda = float(vals[2])
                fndT = True
                break
        tlist.close()
        if fndT == False:
            print "Error: Unable to find chi file " + chifile + " in temperature file " + Tfil
            print "Please manually enter the temperature or enter the temperature into the temperature list file " + Tfil
            exit(0)
    else:
        if lamda == -1.0:
            parser.print_help()
            print "\nError: When temperature is given manually, must use -l arguement to set lambda.\n"
            exit()
        if Tfil != 0 and Tfil != 300 and (Tfil < 500 or Tfil > 3000):
            parser.print_help()
            print "\nError: Need a valid sample temperature value for analysis"
            exit()

    #####################
    #Read in the chi file
    f = open(chifile, 'r')
    i = 0
    for line in f:
        if i < 3:
            i += 1
            continue
        line = line.lstrip().rstrip()
        if i == 3:
            i += 1
            length = int(line)
            break
    xxs = zeros(length)
    yys = zeros(length)
    i = 0
    for line in f:
        line = line.lstrip().rstrip()
        [xxs[i], NULL, yys[i]] = line.split(' ')
        i += 1

    #####################
    #Load in the MgO data
    mgo_a0 = 4.213
    (ARs, Ts, PPs) = loadMgO('mgo_eos.dat')
    As = [i * mgo_a0 for i in ARs]
    if Tfil == 0.0:
        Ps = PPs[:, 0]
    elif Tfil == 300.0:
        As.insert(0, mgo_a0)
        Ps = PPs[:, 1].tolist()
        Ps.insert(0, 1.0)
    else:
        Ps = interp_T(Tfil, Ts[2:], PPs[:, 2:])

    #Get the MgO lattice constant for the desired pressure
    mgo_s = UnivariateSpline(Ps, As)
    mgo_sps = linspace(min(Ps), max(Ps), len(Ps))
    mgo_sas = mgo_s(mgo_sps)
    coefs = cspline1d(mgo_sas)
    latconst = cspline1d_eval(coefs, [pressure],
                              dx=mgo_sps[1] - mgo_sps[0],
                              x0=mgo_sps[0])

    #Get the intensity factors
    mgo_ks = [3, 4, 8, 11, 12]  #Valid K vectors
    mgo_mp = [8.0, 6.0, 12.0, 24.0, 8.0]  #Multiplicity
    mgo_sf = [1.0, 3.0, 3.0, 1.0, 3.0]  #Structure Factor
    mgo_Im = [(mgo_sf[i] * mgo_sf[i]) / mgo_mp[i]
              for i in range(len(mgo_sf))]  #Intensity multiplicand

    #Find the MgO peaks at this pressure
    mgo_2th = list()
    mgo_I = list()
    for (i, K) in enumerate(mgo_ks):
        print lamda, K, latconst
        theta = asin(lamda * sqrt(K) * 0.5 / latconst)
        mgo_2th.append(degrees(theta) * 2)
        mgo_I.append(mgo_Im[i] * (1 + cos(theta * 2) * cos(theta * 2)) /
                     (sin(theta) * sin(theta) * cos(theta)))
    mxI = max(mgo_I)
    mxA = sum(xxs) / len(xxs)
    mgo_I = [(i / mxI + 1) * mxA * mxA for i in mgo_I]
    pl.figure()
    pl.plot(xxs, yys)
    [pl.plot([x, x], [0, y], color='red') for (x, y) in zip(mgo_2th, mgo_I)]
    [
        pl.text(x, 5, str(z), size='x-small')
        for (x, y, z) in zip(mgo_2th, mgo_I, mgo_ks)
    ]
    pl.title(chifile + ", T=" + str(int(Tfil)) + ", P~" + str(pressure))
    pl.show()
Пример #47
0
def splinefit(xdata,ydata):
    xs=arange(min(xdata),max(xdata),0.1/float(len(xdata)))
    cj=cspline1d(array(ydata))
    return (xs,cspline1d_eval(cj,xs,dx=xdata[1]-xdata[0],x0=xdata[0]))
def get_errorbar( fv, fname=None, trace=None, frac=0.1, additional_fixed=None, plotresult=False, guess=dict() ):
    """ Use Lakowicz's f-statistic method to find the 95% confidence interval on a fitting parameter.
        fv -- a string/dictionary key identifying the parameter to find a confidence interval on.
        fname -- (optional, can use trace instead) fname of data file to fit.
        trace -- can use trace instead of fname to pass in info on wraptime and irf fname, etc.
        frac -- the fractional change in the parameter to use for the intial 5-point mesh
        additional_fixed -- any parameters you want to hold fixed during the fitting
        plotresult -- plot the f-statistic curve?
        guess -- dictionary of initial parameters for the fit (by default, best-fit values are used)
    """
    #bestfit, bestacorr = load_wire( fname )
    if fname is not None:
        raise ValueError('have modified this to prefer working with actual traces to forward wraptime, etc., to do_fit')
        bestfit = load_wire( fname )
    if trace is not None:
        assert fname==None
        bestfit = trace.fitresults.copy()
    bestChi2 = bestfit['ReducedChi2']
    irf = bestfit['irf_dispersion']
    lkeys = ['l0']
    akeys = ['a0']
    if bestfit.has_key('l1'):
        lkeys.append('l1')
        akeys.append('a1')
    if bestfit.has_key('l2'):
        lkeys.append('l2')
        akeys.append('a2')
    if bestfit.has_key('l3'):
        lkeys.append('l3')
        akeys.append('a3')
    fixedparams = [fv]
    if additional_fixed is not None: fixedparams += additional_fixed
    fv_best = bestfit[fv]

    # If holding l1-l3 lifetimes fixed, as we did
    # for the actual fit, then Fx should equal 1.0 at the 'bestfit' value of the parameter.
    # But if l1-l3 are allowed to vary (making a more conservative error bar), then
    # Fx may be lower than 1.0, and the minimum may not be at the "bestfit" value.
    # This is to say that the bestfit of the constrained fit may not be the bestfit of the unconstrained fit.

    #Fx = 1.0038 # Threshold found from F-statistic 1600 deg. freedom (~half trace points, following Fundamentals Of Fluorescence Spectroscopy)
    Fx = 1.001858 # Threshold found from F-statistic on all 3282 points. Seems easier to justify
    Fx_list = [] # later we'll sort these in order of increasing parameter value to enable interpolation
    val_step = frac*fv_best/2
    argvals = arange( fv_best*(1.0-frac), fv_best*(1.0+frac)+val_step, val_step )

    # do a coarse (5-pt) run across the data
    for val in argvals:
        bestfit[ fv ] = val
        l = [ bestfit[key] for key in lkeys ]
        a = [ bestfit[key] for key in akeys ]
        params = do_fit( trace, l, a, fixedparams, irf, guess )
        Fx_list.append( [val, params['ReducedChi2']/bestChi2] )

    if all(array(Fx_list)[:,1]>Fx):
        Fx_list = []
        for val in argvals:
            bestfit[ fv ] = val
            cpy = bestfit.copy()
            for key in lkeys:
                if key not in fixedparams: cpy[key] = 1.25*bestfit[key]
            for key in akeys:
                if key not in fixedparams: cpy[key] = 1.5*bestfit[key]
            l = [ cpy[key] for key in lkeys ]
            a = [ cpy[key] for key in akeys ]
            params = do_fit( trace, l, a, fixedparams, irf, guess )
            Fx_list.append( [val, params['ReducedChi2']/bestChi2] )

    if all(array(Fx_list)[:,1]>Fx): raise ValueError("Problem fitting: always above Fx. Min: %f" % (array(Fx_list)[:,1].min()))
    

    # if the left side (low param value) didn't exceed Fx threshold, extend
    val = argvals[0]
    while Fx_list[0][1] < Fx:
        val -= val_step
        if val < 0: 
            if fv in ['l1', 'l2', 'l3']:
                raise ValueError("long-time component just went negative...")
            else:
                break
        bestfit[ fv ] = val
        l = [ bestfit[key] for key in lkeys ]
        a = [ bestfit[key] for key in akeys ]
        params = do_fit( trace, l, a, fixedparams, irf, guess )
        Fx_list.append( [val, params['ReducedChi2']/bestChi2] )
        Fx_list.sort( key=lambda x: x[0] ) # sort by first element (parameter value)

    # if the right side (high param value) didn't exceed Fx threshold, extend
    val = argvals[-1]
    while Fx_list[-1][1] < Fx:
        val += val_step
        bestfit[ fv ] = val
        l = [ bestfit[key] for key in lkeys ]
        a = [ bestfit[key] for key in akeys ]
        params = do_fit( trace, l, a, fixedparams, irf, guess )
        Fx_list.append( [val, params['ReducedChi2']/bestChi2] )
        Fx_list.sort( key=lambda x: x[0] ) # sort by first element (parameter value)


    # interpolate to find values at threshold
    Fx_array = array( Fx_list )
    splines = cspline1d( Fx_array[:,1] )
    interp_val = linspace( Fx_array[0,0], Fx_array[-1,0], 500 )
    interp_Fx = cspline1d_eval( splines, interp_val, dx=val_step, x0=Fx_array[:,0].min() )
    error_bar = [ interp_val[find(interp_Fx<Fx)[0]], interp_val[find(interp_Fx<Fx)[-1]] ]

    if plotresult:
        fig = figure(1)
        #fig.clf()
        ax_chi = fig.add_subplot(111)
        ax_chi.cla()
        ax_chi.plot( interp_val, interp_Fx, '-k' )
        ax_chi.plot( interp_val, [Fx]*len(interp_val), '--k' )
        ax_chi.plot( Fx_array[:,0], Fx_array[:,1], 'sg' )
        ax_chi.plot( error_bar, [Fx]*2, '-b', lw=3.0 )

        ax_chi.set_ylim([0.99, 1.01])
        fig.show()
        fig.canvas.draw()

    return error_bar
Пример #49
0
# Example showing how to use B-splines in scipy.signal to do
# interpolation. The input points must be equally spaced to use these
# routine.
# 
# <codecell>


from numpy import r_, sin
from scipy.signal import cspline1d, cspline1d_eval

x = r_[0:10]
dx = x[1]-x[0]
newx = r_[-3:13:0.1]  # notice outside the original domain 
y = sin(x) 
cj = cspline1d(y)
newy = cspline1d_eval(cj, newx, dx=dx,x0=x[0]) 
from pylab import plot, show
plot(newx, newy, x, y, 'o') 
show()

# <markdowncell>

# ![](files/Interpolation_attachments/interpolate_figure1.png
# 
# N-D interpolation for equally-spaced data
# =========================================
# 
# The scipy.ndimage package also contains spline\_filter and
# map\_coordinates which can be used to perform N-dimensional
# interpolation for equally-spaced data. A two-dimensional example is
# given below:
Пример #50
0
def splinefit(xdata, ydata):
    xs = arange(min(xdata), max(xdata), 0.1 / float(len(xdata)))
    cj = cspline1d(array(ydata))
    return (xs, cspline1d_eval(cj, xs, dx=xdata[1] - xdata[0], x0=xdata[0]))
Пример #51
0
def splinefit(xdata, ydata, nx):
    xs = arange(min(xdata), max(xdata), 1.0 / float(nx) / len(xdata))
    cj = cspline1d(ydata)
    return (xs, cspline1d_eval(cj, xs, dx=xdata[1] - xdata[0], x0=xdata[0]))
Пример #52
0
def wiggle(values, origin=0, posFill='black', negFill=None, lineColor='black',
           resampleRatio=10, rescale=False, zmin=0, zmax=None, ax=None):
    """
    Plot a trace in VAWT(Variable Area Wiggle Trace)

    Parameters
    ----------
    x: input data (1D numpy array)

    origin: (default, 0) value to fill above or below (float)

    posFill: (default, black)
        color to fill positive wiggles with (string or None)

    negFill: (default, None)
        color to fill negative wiggles with (string or None)

    lineColor: (default, black)
        color of wiggle trace (string or None)

    resampleRatio: (default, 10)
        factor to resample traces by before plotting (1 = raw data) (float)

    rescale: (default, False)
        If True, rescale "x" to be between -1 and 1

    zmin: (default, 0)
        The minimum z to use for plotting

    zmax: (default, len(x))
        The maximum z to use for plotting

    ax: (default, current axis)
        The matplotlib axis to plot onto

    Returns
    -------
    Plot
    """
    if zmax is None:
        zmax = values.size

    # Rescale so that values ranges from -1 to 1
    if rescale:
        values = values.astype(np.float)
        values -= values.min()
        values /= values.ptp()
        values *= 2
        values -= 1

    # Interpolate at resampleRatio x the previous density
    resample_z = np.linspace(0, values.size, values.size * resampleRatio)
    # cubic spline interpolation
    cj = cspline1d(values)
    resample_v = cspline1d_eval(cj, resample_z)

    # newz = np.linspace(zmax, zmin, resample_z.size)
    # newz = np.linspace(zmin, zmax, resample_z.size)
    newz = resample_z
    if origin is None:
        origin = resample_v.mean()

    # # Plot
    # if ax is None:
    #     ax = plt.gca()
    #     # plt.hold(True)
    if posFill is not None:
        ax.fill_betweenx(newz, resample_v, origin,
                         where=resample_v > origin,
                         facecolor=posFill)
    if negFill is not None:
        ax.fill_betweenx(newz, resample_v, origin,
                         where=resample_v < origin,
                         facecolor=negFill)
    if lineColor is not None:
        ax.plot(resample_v, newz, color=lineColor, linewidth=.1)
def long_time_errorbars(fnames,
                        fv,
                        frac=0.1,
                        additional_fixed=None,
                        plotresult=False):
    # finding error bar in the "fixed" parameters... I think this is similar to what
    # I did above but with average RChi2 across all traces
    # I tried to recalculate a lower Fx value because DOF increased from 3282 to 3282*len(fnames),
    # but the calc. only could handle <10000 DOF, and Fx only changed from 1.001858 to 1.001845.

    lkeys = ['l0', 'l1', 'l2', 'l3']
    akeys = ['a0', 'a1', 'a2', 'a3']
    fixedparams = [fv]
    if additional_fixed is not None: fixedparams += additional_fixed
    #Fx = 1.001845 # Threshold found from F-statistic on 9999 points. (max of calculator, faking 3282*21).
    Fx = 1.001858  # Threshold found from F-statistic on 3282 pts.
    Fx_list = [
    ]  # later we'll sort these in order of increasing parameter value to enable interpolation

    # first get best_avg_RChi2:
    bestfits = []
    chi2 = []
    for fname in fnames:
        bestfit, bestacorr = load_wire(fname)
        bestfits.append(bestfit)
        chi2.append(bestfit['ReducedChi2'])
    best_avg_RChi2 = np.mean(chi2)

    # setup initial coarse scan
    assert bestfit.has_key("l3")
    fv_best = bestfit[fv]
    val_step = frac * fv_best / 2.0
    argvals = arange(fv_best * (1.0 - frac),
                     fv_best * (1.0 + frac) + val_step, val_step)

    def fmin_kernel(twoT, *args):
        avg_RChi2 = 0.0
        fv_value = args[0]
        longT_floating = [key for key in ['l1', 'l2', 'l3'] if key != fv
                          ]  # the two longTime components we're optimizing
        chi2 = []
        for i, fname in enumerate(fnames):
            bestfit = bestfits[i].copy()
            bestfit[fv] = fv_value
            bestfit[longT_floating[0]] = twoT[0]
            bestfit[longT_floating[1]] = twoT[1]
            l = [bestfit[key] for key in lkeys]
            a = [bestfit[key] for key in akeys]
            irf = bestfit['irf_dispersion']
            # all three long-components are fixed for this fit, but the
            # values they are fixed at are set at different levels:
            # fv is the component we're finding an errorbar for, and
            # it is set by the function long_time_errorbars.
            # The other two are allowed to "float" in response, but
            # not float freely for each trace individually; we're
            # looking for an error bar for the global fit across all
            # cavities on a given sample, so we constrain them for each
            # individual fit but let fmin play with them to minimize
            # the mean reduced Chi squared across all data sets.
            params = do_fit(fname, l, a, ['l1', 'l2', 'l3'], irf)
            chi2.append(params['ReducedChi2'])
        return np.mean(chi2)

    # do a coarse (5-pt) run across the data
    twoT_guess = [bestfit[key] for key in ['l1', 'l2', 'l3']
                  if key != fv]  # the two longTime components we're optimizing
    for val in argvals:
        res = fmin(fmin_kernel,
                   twoT_guess,
                   args=(val, ),
                   xtol=0.005,
                   ftol=0.005,
                   full_output=1)
        avg_RChi2 = res[1]
        Fx_list.append([val, avg_RChi2 / best_avg_RChi2])

    assert not all(array(Fx_list)[:, 1] > Fx)

    # if the left side (low param value) didn't exceed Fx threshold, extend
    val = argvals[0]
    while Fx_list[0][1] < Fx:
        val -= val_step
        if val < 0:
            if fv in ['l1', 'l2', 'l3']:
                raise ValueError("long-time component just went negative...")
            else:
                break
        res = fmin(fmin_kernel,
                   twoT_guess,
                   args=(val, ),
                   xtol=0.005,
                   ftol=0.005,
                   full_output=1)
        avg_RChi2 = res[1]
        Fx_list.append([val, avg_RChi2 / best_avg_RChi2])
        Fx_list.sort(
            key=lambda x: x[0])  # sort by first element (parameter value)

    # if the right side (high param value) didn't exceed Fx threshold, extend
    val = argvals[-1]
    while Fx_list[-1][1] < Fx:
        val += val_step
        res = fmin(fmin_kernel,
                   twoT_guess,
                   args=(val, ),
                   xtol=0.005,
                   ftol=0.005,
                   full_output=1)
        avg_RChi2 = res[1]
        Fx_list.append([val, avg_RChi2 / best_avg_RChi2])
        Fx_list.sort(
            key=lambda x: x[0])  # sort by first element (parameter value)

    # interpolate to find values at threshold
    Fx_array = array(Fx_list)
    splines = cspline1d(Fx_array[:, 1])
    interp_val = linspace(Fx_array[0, 0], Fx_array[-1, 0], 500)
    interp_Fx = cspline1d_eval(splines,
                               interp_val,
                               dx=val_step,
                               x0=Fx_array[:, 0].min())
    error_bar = [
        interp_val[find(interp_Fx < Fx)[0]],
        interp_val[find(interp_Fx < Fx)[-1]]
    ]

    if plotresult:
        fig = figure(1)
        #fig.clf()
        ax_chi = gca()
        #ax_chi = fig.add_subplot(111)
        #ax_chi.cla()
        ax_chi.plot(interp_val, interp_Fx, label=fv)
        ax_chi.plot(interp_val, [Fx] * len(interp_val), '--k')
        ax_chi.plot(Fx_array[:, 0], Fx_array[:, 1], 'sk')
        ax_chi.plot(error_bar, [Fx] * 2, '-k', lw=3.0)

        #ax_chi.set_ylim([0.99, 1.01])
        fig.show()
        fig.canvas.draw()

    return error_bar
Пример #54
0
def interp(dx,knots,xis):
    cspl = cspline1d(knots) #cupic spline coefficients
    return cspline1d_eval(cspl,xis,dx=dx,x0=0.0)