コード例 #1
0
ファイル: pyfda_lib.py プロジェクト: euripedesrocha/pyFDA
def H_mag(num, den, z, H_max, H_min = None, log = False, div_by_0 = 'ignore'):
    """
    Calculate `|H(z)|` at the complex frequency(ies) `z` (scalar or
    array-like).  The function `H(z)` is given in polynomial form with numerator and
    denominator. When log = True, `20 log_10 (|H(z)|)` is returned.

    The result is clipped at H_min, H_max; clipping can be disabled by passing
    None as the argument.

    Parameters
    ----------
    num : float or array-like
        The numerator polynome of H(z).
    den : float or array-like
        The denominator polynome of H(z).
    z : float or array-like
        The complex frequency(ies) where `H(z)` is to be evaluated
    H_max : float
        The maximum value to which the result is clipped
    H_min : float, optional
        The minimum value to which the result is clipped (default: 0)
    log : boolean, optional
        When true, return 20 * log10 (|H(z)|). The clipping limits have to
        be given as dB in this case.
    div_by_0 : string, optional
        What to do when division by zero occurs during calculation (default:
        'ignore'). As the denomintor of H(z) becomes 0 at each pole, warnings
        are suppressed by default. This parameter is passed to numpy.seterr(),
        hence other valid options are 'warn', 'raise' and 'print'.

    Returns
    -------
    H_mag : float or ndarray
        The magnitude |`H(z)`| for each value of `z`.
    """

    try: len(num)
    except TypeError:
        num_val = abs(num) # numerator is a scalar
    else:
        num_val = abs(np.polyval(num, z)) # evaluate numerator at z
    try: len(den)
    except TypeError:
        den_val = abs(den) # denominator is a scalar
    else:
        den_val = abs(np.polyval(den, z)) # evaluate denominator at z

    olderr = np.geterr()  # store current floating point error behaviour
    # turn off divide by zero warnings, just return 'inf':
    np.seterr(divide = 'ignore')

    if log:
        H_val = 20 * np.log10(num_val / den_val)
    else:
        H_val = num_val / den_val

    np.seterr(**olderr) # restore previous floating point error behaviour

    # clip result to H_min / H_max
    return np.clip(H_val, H_min, H_max)
コード例 #2
0
ファイル: DragCoefficient.py プロジェクト: lrhgit/tkt4140
def cd_sphere_vector_bool(Re):
    from numpy import log10,array,polyval
       
    condition1 = Re < 0
    condition2 = logical_and(0 < Re, Re <= 0.5)
    condition3 = logical_and(0.5 < Re, Re <= 100.0)
    condition4 = logical_and(100.0 < Re, Re <= 1.0e4)
    condition5 = logical_and(1.0e4 < Re, Re <= 3.35e5)
    condition6 = logical_and(3.35e5< Re, Re <= 5.0e5)
    condition7 = logical_and(5.0e5 < Re, Re <= 8.0e6)
    condition8 = Re > 8.0e6
    
    cd = zeros_like(Re)
    cd[condition1] = 0.0
    
    cd[condition2] = 24/Re[condition2]
    
    p = array([4.22,-14.05,34.87,0.658])
    cd[condition3] = polyval(p,1.0/Re[condition3]) 
    
    p = array([-30.41,43.72,-17.08,2.41])
    cd[condition4] = polyval(p,1.0/log10(Re[condition4]))
    
    p = array([-0.1584,2.031,-8.472,11.932])
    cd[condition5] = polyval(p,log10(Re[condition5]))
    
    cd[condition6] = 91.08*(log10(Re[condition6]/4.5e5))**4 + 0.0764
    
    p  = array([-0.06338,1.1905,-7.332,14.93])
    cd[condition7] = polyval(p,log10(Re[condition7]))
    
    cd[condition8] = 0.2
    
    return cd
コード例 #3
0
ファイル: DragCoefficient.py プロジェクト: lrhgit/tkt4140
def cd_sphere(Re):
    "Computes the drag coefficient of a sphere as a function of the Reynolds number Re."
    # Curve fitted after fig . A -56 in Evett & Liu :% " Fluid Mechanics & Hydraulics ",
    # Schaum ' s Solved Problems McGraw - Hill 1989.
    
    from numpy import log10,array,polyval    
    
    if Re <= 0.0:
        CD = 0.0
    elif Re > 8.0e6:
        CD = 0.2
    elif Re > 0.0 and Re <= 0.5:
        CD = 24.0/Re
    elif Re > 0.5 and Re <= 100.0:
        p = array([4.22,-14.05,34.87,0.658])
        CD = polyval(p,1.0/Re) 
    elif Re > 100.0 and Re <= 1.0e4:
        p = array([-30.41,43.72,-17.08,2.41])
        CD = polyval(p,1.0/log10(Re))
    elif Re > 1.0e4 and Re <= 3.35e5:
        p = array([-0.1584,2.031,-8.472,11.932])
        CD = polyval(p,log10(Re))
    elif Re > 3.35e5 and Re <= 5.0e5:
        x1 = log10(Re/4.5e5)
        CD = 91.08*x1**4 + 0.0764
    else:
        p = array([-0.06338,1.1905,-7.332,14.93])
        CD = polyval(p,log10(Re))
    return CD
コード例 #4
0
ファイル: ECdata.py プロジェクト: shortda/ECdata
    def calibrate(self, title, rng=None, cal=None):
        """ Returns calibrated bin centers for saved histogram with given title.
        If the cal argument is given, title must exactly match a key in the file
        self.ofile.

        "title" title of histogram. If cal is not None then title must exactly
            match a key in the hdf5 file self.ofile.
        "rng" is the index range of elements to return
        "cal" is a set of polynomial coefficients for np.polyval. A linear
            scaling by a factor s would require cal=[s,0]
        """
        rng = rng or [None, None]
        if cal is None:
            bins = self.bcent(title, rng)
            return np.polyval(self.cal, bins)
        else:
            with h5py.File(self.ofile, 'r') as ofile:
                if title not in ofile.keys():
                    raise ValueError(title + ' is not in file ' + self.ofile)
                elif title[0] == 'b':
                    bins = self.bcent(title[1:], rng)
                    return np.polyval(cal, bins)
                else:
                    axis = ofile[title][rng[0]:rng[1]]
                    return np.polyval(cal, axis)
コード例 #5
0
ファイル: DragCoefficient.py プロジェクト: lrhgit/tkt4140
def cd_sphere_vector(Re):
    "Computes the drag coefficient of a sphere as a function of the Reynolds number Re."
    # Curve fitted after fig . A -56 in Evett & Liu :% " Fluid Mechanics & Hydraulics ",
    # Schaum ' s Solved Problems McGraw - Hill 1989.

    from numpy import log10,array,polyval
    CD = zeros_like(Re)
   
    CD = where(Re<0,0.0,0.0)     # condition 1
    
    CD = where((Re > 0.0) & (Re <=0.5),24/Re,CD) # condition 2

    p = array([4.22,-14.05,34.87,0.658])
    CD = where((Re > 0.5) & (Re <=100.0),polyval(p,1.0/Re),CD) #condition 3

    p = array([-30.41,43.72,-17.08,2.41])
    CD = where((Re >100.0)  & (Re <=1.0e4) ,polyval(p,1.0/log10(Re)),CD) #condition 4

    p = array([-0.1584,2.031,-8.472,11.932])
    CD = where((Re > 1.0e4)  &  (Re <=3.35e5),polyval(p,log10(Re)),CD) #condition 5

    CD = where((Re > 3.35e5) & (Re <=5.0e5),91.08*(log10(Re/4.5e5))**4 + 0.0764,CD) #condition 6

    p  = array([-0.06338,1.1905,-7.332,14.93])
    CD = where((Re > 5.05e5)  &  (Re <=8.0e6),polyval(p,log10(Re)),CD) #condition 7
    
    CD = where(Re>8.0e6,0.2,CD)  # condition 8

    return CD
コード例 #6
0
def getParams(inputF,cosineScore,error,lib,minimumFeature):
    # parse the argument
    def search(lib,row):
        if row['INCHI'] in lib:
            return lib[row['INCHI']]
        return -1
    df = pd.read_csv(inputF,sep='\t')
    # filter out by cosinScore
    df = df[df.MQScore > cosineScore]
    # search for ki average
    lib_df = pd.read_csv(lib)
    lib_df = lib_df[lib_df.polarity.str.contains('non-polar')]
    lib = pd.Series(lib_df.ki_nonpolar_average.values,index=lib_df.INCHI.values).to_dict()
    df['ki_average'] = df.apply(lambda row:search(lib,row),axis = 1)
    df = df[df.ki_average>0]

    #clean the data for polynomial fitting:
    df = df[df['ki_average']>500]
    df = df[df.RT_Query<800]
    if len(df) < minimumFeature:
        return None
    #simply find the polynomial fitting
    p_a = np.polyfit(df.RT_Query,df.ki_average,2)
    # we fit it twice to have more robust results:
    df =df[abs(df.ki_average-np.polyval(p_a,df.RT_Query))/np.polyval(p_a,df.RT_Query)<error]
    p_b = np.polyfit(df.RT_Query,df.ki_average,2)
    return p_b
コード例 #7
0
ファイル: example08.py プロジェクト: deodeta/6.00SC
def showExamplePolyFit(xs,ys,fitDegree1 = 1,fitDegree2 = 2):
    pylab.figure()    
    pylab.plot(xs,ys,'r.',ms=2.0,label = "measured")

    # poly fit to noise
    coeeff = numpy.polyfit(xs, ys, fitDegree1)

    # Predict the curve
    pys = numpy.polyval(numpy.poly1d(coeeff), xs)

    se = mse(ys, pys)
    r2 = rSquared(ys, pys)

    pylab.plot(xs,pys, 'g--', lw=5,label="%d degree fit, SE = %0.10f, R2 = %0.10f" %(fitDegree1,se,r2))

    # Poly fit to noise
    coeeffs = numpy.polyfit(xs, ys, fitDegree2)

    # Predict the curve
    pys = numpy.polyval(numpy.poly1d(coeeffs), xs)

    se = mse(ys, pys)
    r2 = rSquared(ys, pys)

    pylab.plot(xs,pys, 'b--', lw=5,label="%d degree fit, SE = %0.10f, R2 = %0.10f" %(fitDegree2,se,r2))

    pylab.legend()
コード例 #8
0
def fit(data, nz):
    x = [0 for iz in range(0, nz, 1)]
    y = [0 for iz in range(0, nz, 1)]
    z = [iz for iz in range(0, nz, 1)]

    for iz in range(0, nz, 1):
        x[iz], y[iz] = ndimage.measurements.center_of_mass(np.array(data[:,:,iz]))

    #Fit centerline in the Z-X plane using polynomial function
    print '\nFit centerline in the Z-X plane using polynomial function...'
    coeffsx = np.polyfit(z, x, 1)
    polyx = np.poly1d(coeffsx)
    x_fit = np.polyval(polyx, z)
    print 'x_fit'
    print x_fit

    #Fit centerline in the Z-Y plane using polynomial function
    print '\nFit centerline in the Z-Y plane using polynomial function...'
    coeffsy = np.polyfit(z, y, 1)
    polyy = np.poly1d(coeffsy)
    y_fit = np.polyval(polyy, z)


    #### 3D plot
    fig1 = plt.figure()
    ax = Axes3D(fig1)
    ax.plot(x,y,z,zdir='z')
    ax.plot(x_fit,y_fit,z,zdir='z')
    plt.show()
    return x, y, x_fit, y_fit
コード例 #9
0
ファイル: misc.py プロジェクト: achuwilson/openrave
 def sampleR3(averagedist,boxdims):
     """low-discrepancy sampling using primes.
     The samples are evenly distributed with an average distance of averagedist inside the box with dimensions boxdims.
     Algorithim from "Geometric Discrepancy: An Illustrated Guide" by Jiri Matousek"""
     minaxis = numpy.argmin(boxdims)
     maxaxis = numpy.argmax(boxdims)
     meddimdist = numpy.sort(boxdims)[1]
     # convert average distance to number of samples.... do simple 3rd degree polynomial fitting...
     x = meddimdist/averagedist
     if x < 25.6:
         N = int(numpy.polyval([ -3.50181522e-01,   2.70202333e+01,  -3.10449514e+02, 1.07887093e+03],x))
     elif x < 36.8:
         N = int(numpy.polyval([  4.39770585e-03,   1.10961031e+01,  -1.40066591e+02, 1.24563464e+03],x))
     else:
         N = int(numpy.polyval([5.60147111e-01,  -8.77459988e+01,   7.34286834e+03, -1.67779452e+05],x))
     pts = numpy.zeros((N,3))
     pts[:,0] = numpy.linspace(0.0,meddimdist,N)
     pts[:,1] = meddimdist*numpy.mod(0.5+0.5*numpy.sqrt(numpy.arange(0,5.0*N,5.0)),1.0)
     pts[:,2] = meddimdist*numpy.mod(0.5+3*numpy.sqrt(numpy.arange(0,13.0*N,13.0)),1.0)
     if boxdims[minaxis] < meddimdist:
         pts = pts[pts[:,minaxis]<=boxdims[minaxis],:]
     if boxdims[maxaxis] > meddimdist:
         # have to copy across the max dimension
         numfullcopies = numpy.floor(boxdims[maxaxis]/meddimdist)
         oldpts = pts
         pts = numpy.array(oldpts)
         for i in range(int(numfullcopies)-1):
             oldpts[:,maxaxis] += meddimdist
             pts = numpy.r_[pts,oldpts]
         if boxdims[maxaxis]/meddimdist > numfullcopies:
             oldpts[:,maxaxis] += meddimdist
             pts = numpy.r_[pts,oldpts[oldpts[:,maxaxis]<=boxdims[maxaxis],:]]
     return pts
コード例 #10
0
ファイル: utils.py プロジェクト: jessecusack/ocean-tools
def nan_detrend(x, y, deg=1):
    """Subtract a polynomial fit from the data, ignoring NaNs.

    Parameters
    ----------
    x : array_like
        x data.
    y : array_like
        Data to detrend.
    deg : int
        Degree of polynomial to subtract. (Can be zero i.e. constant)


    Returns
    -------
    y_out : numpy.array
        Detrended data.

    """
    y_out = np.nan*np.zeros_like(y)

    if np.ndim(x) == 1:
        nans = np.isnan(x) | np.isnan(y)
        p = nan_polyfit(x, y, deg)
        y_out[~nans] = y[~nans] - np.polyval(p, x[~nans])
    elif np.ndim(x) == 2:
        for i in xrange(x.shape[1]):
            nans = np.isnan(x[:, i]) | np.isnan(y[:, i])
            p = nan_polyfit(x[:, i], y[:, i], deg)
            y_out[~nans, i] = y[~nans, i] - np.polyval(p, x[~nans, i])
    else:
        raise RuntimeError('Arguments must be 1 or 2 dimensional arrays.')

    return y_out
コード例 #11
0
ファイル: winspec.py プロジェクト: antonl/pyWinSpec
    def _make_axes(self):
        '''Construct axes from calibration fields in header file
        '''
        xcalib = self.header.xcalibration
        ycalib = self.header.ycalibration

        xcalib_valid = struct.unpack('?', xcalib.calib_valid)

        if xcalib_valid:
            xcalib_order, = struct.unpack('>B', xcalib.polynom_order) # polynomial order
            px = xcalib.polynom_coeff[:xcalib_order+1]
            px = np.array(px[::-1]) # reverse coefficients to use numpy polyval
            pixels = np.arange(1, self.header.xdim + 1)
            px = np.polyval(px, pixels)
        else:
            px = np.arange(1, self.header.xdim + 1)

        ycalib_valid = struct.unpack('?', ycalib.calib_valid)

        if ycalib_valid:
            ycalib_order, = struct.unpack('>B', ycalib.polynom_order) # polynomial order
            py = ycalib.polynom_coeff[:ycalib_order+1]
            py = np.array(py[::-1]) # reverse coefficients to use numpy polyval
            pixels = np.arange(1, self.header.ydim + 1)
            py = np.polyval(py, pixels)
        else:
            py = np.arange(1, self.header.ydim + 1)

        self._xaxis = px
        self._yaxis = py

        return px, py
コード例 #12
0
ファイル: camera.py プロジェクト: softtrainee/arlab
    def set_limits_by_zoom(self, zoom, cx, cy, canvas=None):
        '''
        '''
        def _set_limits(axis_key, px_per_cm, cur_pos, canvas):

            if axis_key == 'x':
                d = self.width
            else:
                d = self.height

            # scale to mm
            if canvas is None:
                canvas = self.parent

            if canvas:
                d /= 2.0 * px_per_cm / 10.0
                lim = (-d + cur_pos, d + cur_pos)
                canvas.set_mapper_limits(axis_key, lim)

        cdata = self.calibration_data
        xpx_per_cm = np.polyval(cdata.get_xcoeffs(), [zoom])[0]
        ypx_per_cm = np.polyval(cdata.get_ycoeffs(), [zoom])[0]

        _set_limits('x', xpx_per_cm, cx, canvas)
        _set_limits('y', ypx_per_cm, cy, canvas)
コード例 #13
0
def plot_fit(deg, err):
    try:
        polcoefs = np.polyfit(x_rand, y_rand, deg)
    except np.RankWarning:
        pass
    if err:
        fig = plt.figure(figsize=(15, 10)) 
        gs = gridspec.GridSpec(2, 1, height_ratios=[3, 1])
        ax = plt.subplot(gs[0])
    else:
        fig, ax = plt.subplots(1, 1, figsize=(15, 10))
    # data
    ax.plot(x_cont, np.polyval(polcoefs, x_cont))
    ax.legend(['degree {}'.format(deg)])
    plot_dat(ax, err)
    # error
    if err:
        train_error.append(sum((np.polyval(polcoefs, x_rand)-y_rand)**2)/len(x_rand))
        test_error.append(sum((np.polyval(polcoefs, x_test)-y_test)**2)/len(x_test))
        ax = plt.subplot(gs[1])
        ax.plot(deg_list[0:len(train_error)], train_error, marker='o', c=(0, 0, 1))
        ax.plot(deg_list[0:len(test_error)], test_error, marker='o', c=(0.9, 0, 0))
        ax.set_xlabel('degree')
        ax.set_ylabel('error')
        ax.legend(['train', 'test'])
        ax.set_xlim(0.9, deg_list[-1]+0.1)
        ax.set_ylim(0, None)
        ax.set_yticks([])
        # layout
        plt.tight_layout()
コード例 #14
0
ファイル: phasekick.py プロジェクト: ryanpdwyer/pmefm
def measure_dA_dphi_fir(lock, li, tp, dA_dphi_before, dA_dphi_after):
    """Correct for impulsive phase shift at end of pulse time."""

    i_tp = np.arange(lock.t.size)[lock.t < tp][-1]
    # Use 20 data points for interpolating; this is slightly over one
    # cycle of our oscillation
    m = np.arange(-10, 11) + i_tp
    # This interpolator worked reasonably for similar, low-frequency sine waves
    interp = interpolate.KroghInterpolator(lock.t[m], lock.x[m])
    x0 = interp(tp)[()]
    # We only need t0 approximately; the precise value of f0 doesn't matter very much.
    t0 = li.t[(li.t < tp)][-1]
    f0 = li.df[(li.t < tp)][-1] + li.f0(t0)
    v0 = interp.derivative(tp)[()]
    x2 = v0 / (2*np.pi*f0)

    phi0 = np.arctan2(-x2, x0)

    ml = masklh(li.t, tp-t_fit, tp)
    mr = masklh(li.t, tp, tp + t_fit)

    A = abs(li.z_out)
    phi = np.unwrap(np.angle(li.z_out))/(2*np.pi)

    mbAl = np.polyfit(li.t[ml], A[ml], 1)
    mbAr = np.polyfit(li.t[mr], A[mr], 1)

    mb_phi_l = np.polyfit(li.t[ml], phi[ml], 1)
    mb_phi_r = np.polyfit(li.t[mr], phi[mr], 1)

    dA = np.polyval(mbAr, tp) - np.polyval(mbAl, tp)
    dphi = np.polyval(mb_phi_r, tp) - np.polyval(mb_phi_l, tp)

    return phi0, dA, dphi
コード例 #15
0
ファイル: toolsDetectors.py プロジェクト: marcocamma/x3py
def corrNonlinGetPar(linearDet,nonLinearDet,order=2,data_0=0,
    correct_0=0,plot=False,returnCorrectedDet=False):
  """ Find parameters for non linear correction
    *linearDet* should be an 1D array of the detector that is linear
    *nonLinearDet* is the detector that is sussposed to be none linear
    *data_0" is an offset to use for the data (used only if plotting)"
    *correct_0* offset of the "linear detector"""
  p =  np.polyfit(nonLinearDet,linearDet,order)
  p[-1] = p[-1]-correct_0
  if plot:
    d = corrNonlin(nonLinearDet,p,data_0=data_0,correct_0=correct_0)
    plt.plot(linearDet,nonLinearDet,".",label="before correction")
    plt.plot(linearDet,d,".",label="after correction")
    poly_lin = np.polyfit(linearDet,d,1)
    xmin = min(linearDet.min(),0)
    xtemp = np.asarray( (xmin,linearDet.max()) )
    plt.plot(xtemp,np.polyval(poly_lin,xtemp),label="linear fit")
    plt.plot(linearDet,d-np.polyval(poly_lin,linearDet),
       ".",label="difference after-linear")
    plt.xlabel("linearDet")
    plt.ylabel("nonLinearDet")
    plt.legend()
  if order>=2 and p[-3]<0:
    log.warn("corrNonlinGetPar: consistency problem, second order coefficient should \
    be > 0, please double check result (plot=True) or try inverting the data and the\
    correct arguments")

  if returnCorrectedDet:
    return corrNonlin(nonLinearDet,p,data_0=data_0,correct_0=correct_0)
  else:
    return p
コード例 #16
0
ファイル: calibadc.py プロジェクト: GRay63/e20gateway
 def calib(self, name, mac, channel, vnom,
         vmin=None, vmax=None, i2c_command=None):
     self.k.output_state(False)
     response = raw_input('connect to %s' % name)
     if response == ' ' or response == '  ':
         return
     if not vmin:
         vmin = 0.1 * vnom
     if not vmax:
         vmax = 1.05 * vnom
     self.k.select(2)
     self.k.set_voltage(keithley_3p3v)
     self.k.set_current(0.5)
     self.k.output_enable(True)
     self.k.select(1)
     self.k.set_voltage(vmin)
     self.k.set_current(0.1)
     self.k.output_enable(True)
     self.k.output_state(True)
     if i2c_command:
         self.eng.call(eng1_mac, 'i2cInit', False)
         num = self.eng.call(eng1_mac, 'i2cWrite', i2c_command, 10, False)
         print 'num:', num
     vtest = vmin
     vstep = (vmax - vmin) / (number_of_points - 1)
     i = number_of_points
     x = []
     y = []
     while i > 0:
         i -= 1
         self.k.set_voltage(vtest)
         #read_stable(self.k.meas_voltage, 0.005, 4, 10)
         #read_stable(self.fluke.get_read, 0.05, 3, 10)
         sleep(0.5)
         adcs = []
         while len(adcs) < 25:
             adc = self.eng.call(mac, 'readAdc', channel)
             adcs.append(float(adc))
         adc = numpy.average(adcs)
         stddev = numpy.std(adcs)
         #adc = sum(adcs) / len(adcs)
         #print 'adc =', adc
         vmeas = self.fluke.get_read()
         print self.k.set_voltage(), vmeas, adc, stddev
         x.append(adc)
         y.append(vmeas)
         vtest += vstep
     p = numpy.polyfit(x, y, 3)
     print '%s_poly = %s' % (name, nice_print(p.tolist()))
     error = 0.0
     i = 0
     while i < len(x):
         v = numpy.polyval(p, x[i])
         e = math.fabs(v - y[i])
         if e > error:
             error = e
         i += 1
     print '%s_error_percent = %.3f' % (name, 100.0 * error / vnom)
     print '%s_range = (%.3f, %.3f)' % (
         name, numpy.polyval(p, 0), numpy.polyval(p, 1023))
コード例 #17
0
ファイル: postfit.py プロジェクト: eptune/smepycode
def tfit(line):
    """
    Correct for temperature systematics.  Fit a polynomial to (teff,abund)
    and require that the corrected solar value be 0.  We cut on vsini, 

    returns:
    (fitabund,fitpar,t,abund)
    fitabund - the temperature corrected abundance
    fitpar   - the parameters to the polynomial fit
    t        - the temperature array
    abund    - the non-temp-corrected abundances

    """
    deg  = 3 # fit with a 3rd degree polynomial
    #define abundance for the particular line we're looking at
    p = getelnum.Getelnum(line)
    elstr = p.elstr

    conn = sqlite3.connect(os.environ['STARSDB'])
    cur = conn.cursor()

    #pull in the abundances and the non-corrected abundances
    cmd = 'SELECT '+elstr+'_abund_nt,teff FROM mystars WHERE '+globcut(elstr)
    cur.execute(cmd)
    arr = np.array(cur.fetchall() ) 
    abund,t = arr[:,0],arr[:,1]
    abund = abund - p.abnd_sol

    #fit the points
    fitpar = np.polyfit(t,abund,deg)
    #subtract out the fit, while requiring that the solar value be 0.
    fitpar[deg] = fitpar[deg] - np.polyval(fitpar,p.teff_sol)
    fitabund = abund - np.polyval(fitpar,t)
    return (fitabund,fitpar,t,abund)
コード例 #18
0
    def FitLine(self, definition):
        '''
        Fits a 1st order polynom (line) to the
        start and end points of a straight path,
        then populates the line with equally spaced
        points, and returns the list of points
        '''
    
        Ax = self.A.get_Pos_X();
        Ay = self.A.get_Pos_Y();
        
        Bx = self.B.get_Pos_X();
        By = self.B.get_Pos_Y();
        
        SubWP_No = numpy.linalg.norm(numpy.array([Ax-Bx,Ay-By])) * definition * 0.01;
        '''
        If the path is vertical, the X and Y axes must be swapped before the
        polynom fitting and populating, then switched back to return the
        proper point coordinates
        ''' 
        if abs(Ax - Bx) < 1:

            self.poly = numpy.polyfit([Ay, By], [Ax, Bx], 1);
            prange = numpy.linspace(Ay, By, SubWP_No);

            values = numpy.polyval(self.poly, prange);
            self.SubWP = numpy.array([prange, values]);
            
        else:            
            self.poly = numpy.polyfit([Ax, Bx], [Ay, By], 1);
            prange = numpy.linspace(Ax, Bx, SubWP_No);
            values = numpy.polyval(self.poly, prange);
            self.SubWP = numpy.array([values, prange]);
コード例 #19
0
ファイル: helxas.py プロジェクト: aripekka/helxas
    def get_spectrum(self,sample_str,x_scale = 'energy'):

        #normalize the signals to the tube current
        direct_beam = self.scan_groups['direct_beam']
        sample = self.scan_groups[sample_str]

        theta = direct_beam['signal']['theta']

        I0 = direct_beam['signal']['intensity']/direct_beam['signal']['tube_current']
        I0_err = direct_beam['signal']['intensity_error']/direct_beam['signal']['tube_current']

        I = sample['signal']['intensity']/sample['signal']['tube_current']
        I_err = sample['signal']['intensity_error']/sample['signal']['tube_current']

        theta_I0bg = direct_beam['background']['theta']
        theta_Ibg = sample['background']['theta']

        I0_bg =  direct_beam['background']['intensity']/direct_beam['background']['tube_current']
        I_bg =  sample['background']['intensity']/sample['background']['tube_current']

        #fit backgrounds
        p0 = np.polyfit(theta_I0bg,I0_bg,self.background_fit_order)
        p = np.polyfit(theta_Ibg,I_bg,self.background_fit_order)

        #compute mux
        mux = -np.log((I-np.polyval(p,theta))/(I0-np.polyval(p0,theta)))
        mux_error = np.sqrt((I0_err/I0)**2 + (I_err/I)**2)

        if x_scale == 'theta':
            return theta+self.theta_calibration, mux, mux_error
        else:
            return energy(theta+self.theta_calibration,*self.analyser), mux, mux_error
コード例 #20
0
ファイル: earth_orientation.py プロジェクト: ARO-user/astropy
def _precess_from_J2000_Capitaine(epoch):
    """
    Computes the precession matrix from J2000 to the given Julian Epoch.
    Expression from from Capitaine et al. 2003 as expressed in the USNO
    Circular 179.  This should match the IAU 2006 standard from SOFA.

    Parameters
    ----------
    epoch : scalar
        The epoch as a julian year number (e.g. J2000 is 2000.0)

    """
    from .angles import rotation_matrix

    T = (epoch - 2000.0) / 100.0
    # from USNO circular
    pzeta = (-0.0000003173, -0.000005971, 0.01801828, 0.2988499, 2306.083227, 2.650545)
    pz = (-0.0000002904, -0.000028596, 0.01826837, 1.0927348, 2306.077181, -2.650545)
    ptheta = (-0.0000001274, -0.000007089, -0.04182264, -0.4294934, 2004.191903, 0)
    zeta = np.polyval(pzeta, T) / 3600.0
    z = np.polyval(pz, T) / 3600.0
    theta = np.polyval(ptheta, T) / 3600.0

    return rotation_matrix(-z, 'z') *\
           rotation_matrix(theta, 'y') *\
           rotation_matrix(-zeta, 'z')
コード例 #21
0
ファイル: earth_orientation.py プロジェクト: ARO-user/astropy
def _precession_matrix_besselian(epoch1, epoch2):
    """
    computes the precession matrix from one Besselian epoch to another using
    Newcomb's method.

    `epoch1` and `epoch2` are in besselian year numbers
    """
    from .angles import rotation_matrix

    # tropical years
    t1 = (epoch1 - 1850.0) / 1000.0
    t2 = (epoch2 - 1850.0) / 1000.0
    dt = t2 - t1

    zeta1 = 23035.545 + t1 * 139.720 + 0.060 * t1 * t1
    zeta2 = 30.240 - 0.27 * t1
    zeta3 = 17.995
    pzeta = (zeta3, zeta2, zeta1, 0)
    zeta = np.polyval(pzeta, dt) / 3600

    z1 = 23035.545 + t1 * 139.720 + 0.060 * t1 * t1
    z2 = 109.480 + 0.39 * t1
    z3 = 18.325
    pz = (z3, z2, z1, 0)
    z = np.polyval(pz, dt) / 3600

    theta1 = 20051.12 - 85.29 * t1 - 0.37 * t1 * t1
    theta2 = -42.65 - 0.37 * t1
    theta3 = -41.8
    ptheta = (theta3, theta2, theta1, 0)
    theta = np.polyval(ptheta, dt) / 3600

    return rotation_matrix(-z, 'z') *\
           rotation_matrix(theta, 'y') *\
           rotation_matrix(-zeta, 'z')
コード例 #22
0
ファイル: hrsorder.py プロジェクト: EricDepagne/pyhrs
    def unravel_box(self, box):
        """Convert a rectangular represenation of the spectra back to a single
           array

        Parameters
        ----------
        box: ~numpy.ndarray
            Rectangular represnation of flux

        Returns
        -------
        data: ~numpy.ndarray
            Array of values to convert into a rectangular representation
        """
        xmax = self.region[1].max()
        xmin = 0
        ymax = self.region[0].max()
        ymin = self.region[0].min()
        ys = ymax-ymin
        xs = xmax-xmin
        data = np.zeros((ys+1,xs+1))
        coef = np.polyfit(self.region[1], self.region[0], 3)
        xarr = np.arange(xs+1)
        yarr = np.polyval(coef, xarr)-ymin
        x = self.region[1]-xmin
        y = self.region[0]-ymin - (np.polyval(coef, x) - ymin - yarr.min()).astype(int)
        data = np.zeros(self.npixels)
        data = box[y,x]
        return data
コード例 #23
0
ファイル: distributions.py プロジェクト: Marata459/qutip
    def update(self, rho):
        """
        Calculate the probability function for the given state of an harmonic
        oscillator (as density matrix)
        """

        if isket(rho):
            rho = ket2dm(rho)

        self.data = np.zeros(len(self.xvecs[0]), dtype=complex)
        M, N = rho.shape

        for m in range(M):
            k_m = pow(self.omega / pi, 0.25) / \
                sqrt(2 ** m * factorial(m)) * \
                exp(-self.xvecs[0] ** 2 / 2.0) * \
                np.polyval(hermite(m), self.xvecs[0])

            for n in range(N):
                k_n = pow(self.omega / pi, 0.25) / \
                    sqrt(2 ** n * factorial(n)) * \
                    exp(-self.xvecs[0] ** 2 / 2.0) * \
                    np.polyval(hermite(n), self.xvecs[0])

                self.data += np.conjugate(k_n) * k_m * rho.data[m, n]
コード例 #24
0
ファイル: new2.py プロジェクト: rabrahm/zaspe
def get_cont(x,y,n=1,sl=1.,sh=5.):
	orilen = len(x)
	coef = np.polyfit(x,y,n)
	res = y - np.polyval(coef,x)
	IH = np.where(res>0)[0]
	IL = np.where(res<0)[0]
	dev = np.mean(res[IH])
	I = np.where((res>-sl*dev) & (res<sh*dev))[0]
	J1 = np.where(res<=-sl*dev)[0]
	J2 = np.where(res>=sh*dev)[0]
	J = np.unique(np.hstack((J1,J2)))
	cond = True
	if len(J)==0 or len(x)< .3*orilen:
		cond=False
	while cond:
		x = np.delete(x,J)
		y = np.delete(y,J)
		coef = np.polyfit(x,y,n)
		res = y - np.polyval(coef,x)
		IH = np.where(res>0)[0]
		IL = np.where(res<0)[0]
		dev = np.mean(res[IH])
		I = np.where((res>-sl*dev) & (res<sh*dev))[0]
		J1 = np.where(res<=-sl*dev)[0]
		J2 = np.where(res>=sh*dev)[0]
		J = np.unique(np.hstack((J1,J2)))
		cond = True
		if len(J)==0 or len(x)< .1*orilen:
			cond=False
	return coef
コード例 #25
0
ファイル: new2.py プロジェクト: rabrahm/zaspe
def get_ratio(sciw,rat,n=3):
	rat = scipy.signal.medfilt(rat,11)
	lori = len(sciw)
	coef = np.polyfit(sciw,rat,n)
	res = rat - np.polyval(coef,sciw)
	rms = np.sqrt(np.mean(res**2))
	I = np.where(res> 3*rms)[0]
	I2 = np.where(res< -3*rms)[0]
	I = np.sort(np.hstack((I,I2)))
	cond = True
	if len(I) == 0 or len(sciw) < .3 * lori:
		cond = False

	while cond:
		#imax = np.argmax(res**2)
		#sciw = np.delete(sciw,imax)
		#rat  = np.delete(rat,imax)
		sciw = np.delete(sciw,I)
		rat  = np.delete(rat,I)
		coef = np.polyfit(sciw,rat,n)
		res = rat - np.polyval(coef,sciw)
		rms = np.sqrt(np.mean(res**2))
		I = np.where(res> 3*rms)[0]
		I2 = np.where(res< -3*rms)[0]
		I = np.sort(np.hstack((I,I2)))
		if len(I) == 0 or len(sciw) < .3 * lori:
			cond = False

	return coef
コード例 #26
0
ファイル: misc.py プロジェクト: xiefengy/GeoPy
def detrend(var, ax=None, lcopy=True, ldetrend=True, ltrend=False, degree=1, rcond=None, w=None,  
            lsmooth=False, lresidual=False, window_len=11, window='hanning'): 
  ''' subtract a linear trend from a time-series array (operation is in-place) '''
  # check input
  if not isinstance(var,np.ndarray): raise NotImplementedError # too many checks
  if lcopy: var = var.copy() # make copy - not in-place!
  # fit over entire array (usually not what we want...)
  if ax is None and ldetrend: ax = np.arange(var.size) # make dummy axis, if necessary
  if var.ndim != 1:
    shape = var.shape 
    var = var.ravel() # flatten array, if necessary
  else: shape = None
  # apply optional detrending
  if ldetrend or ltrend:
    # fit linear trend
    trend = np.polyfit(ax, var, deg=degree, rcond=rcond, w=w, full=False, cov=False)
    # evaluate and subtract linear trend
    if ldetrend and ltrend: raise ArgumentError("Can either return trend/polyfit or residuals, not both.")
    elif ldetrend and not ltrend: var -= np.polyval(trend, ax) # residuals
    elif ltrend and not ldetrend: var = np.polyval(trend, ax) # residuals
  # apply optional smoothing
  if lsmooth and lresidual: raise ArgumentError("Can either return smoothed array or residuals, not both.")
  elif lsmooth: var = smooth(var, window_len=window_len, window=window)  
  elif lresidual: var -= smooth(var, window_len=window_len, window=window)
  # return detrended and/or smoothed time-series
  if shape is not None: var = var.reshape(shape)
  return var
コード例 #27
0
ファイル: new2.py プロジェクト: rabrahm/zaspe
def get_rats(ZO,ZI,ZF,pars):
	ords = []
	for i in range(sc.shape[1]):
		J1 = np.where(mw > sc[0,i,-1])[0]
		J2 = np.where(mw < sc[0,i,0])[0]
		if len(J1)>0 and len(J2)>0:
			ords.append(i)
	ords = np.array(ords)
	mf = get_full_model(pars[0],pars[1],pars[2],pars[3],RES_POW)
	tmodf = np.zeros((sc.shape[1],sc.shape[2]))
	tscif = np.zeros((sc.shape[1],sc.shape[2]))
	test_plot = np.zeros((4,sc.shape[1],sc.shape[2]))
	for i in ords:
		I = np.where((mw>sc[0,i,0]) & (mw<sc[0,i,-1]))[0]
		modw = mw[I]
		modf = mf[I]
		sciw = sc[0,i]
		scif = sc[3,i]/np.median(sc[3,i])
		modf = pixelization(modw,modf,sciw)
		#IMB = np.where(mask_bin[i]!=0)[0]
		#modf /= modf[IMB].mean()
		mscif = scipy.signal.medfilt(scif,11)
		rat = modf/mscif
		INF = np.where(mscif!=0)[0]
		coef = get_ratio(sciw[INF],rat[INF])
		scif = scif * np.polyval(coef,sciw)
		mscif = mscif * np.polyval(coef,sciw)
		coef = get_cont(sciw,mscif)
		scif = scif / np.polyval(coef,sciw)
		#plot(sciw,scif)
		coef = get_cont(sciw,modf)
		modf = modf / np.polyval(coef,sciw)
		#plot(sciw,modf)	
		tmodf[i] = modf
		tscif[i] = scif
		test_plot[0,i] = sc[0,i]
		test_plot[1,i] = scif
		test_plot[2,i] = modf
		test_plot[3,i] = mask_bin[i]
	#show()
	#print vcdx
	hdu = pyfits.PrimaryHDU(test_plot)
	os.system('rm example.fits')
	hdu.writeto('example.fits')
	rat = tscif/tmodf

	nejx = np.arange(100)/100.
	ratsout = []

	for i in range(len(ZI)):
		ejy = rat[ZO[i],ZI[i]:ZF[i]]
		ejx = np.arange(len(ejy))/float(len(ejy))
		tck = interpolate.splrep(ejx,ejy,k=3)
		if len(ratsout)==0:
			ratsout = interpolate.splev(nejx,tck)
		else:
			ratsout = np.vstack((ratsout,interpolate.splev(nejx,tck)))
			#plot(interpolate.splev(nejx,tck))
	#show()
	return ratsout
コード例 #28
0
def rssmodelwave(grating,grang,artic,cbin,cols):
#   compute wavelengths from model (this can probably be done using pyraf spectrograph model)
    spec=np.loadtxt(datadir+"spec.txt",usecols=(1,))
    Grat0,Home0,ArtErr,T2Con,T3Con=spec[0:5]
    FCampoly=spec[5:11]
    grname=np.loadtxt(datadir+"gratings.txt",dtype=str,usecols=(0,))
    grlmm,grgam0=np.loadtxt(datadir+"gratings.txt",usecols=(1,2),unpack=True)

    grnum = np.where(grname==grating)[0][0]
    lmm = grlmm[grnum]
    alpha_r = np.radians(grang+Grat0)
    beta0_r = np.radians(artic*(1+ArtErr)+Home0)-alpha_r
    gam0_r = np.radians(grgam0[grnum])
    lam0 = 1e7*np.cos(gam0_r)*(np.sin(alpha_r) + np.sin(beta0_r))/lmm
    ww = lam0/1000. - 4.
    fcam = np.polyval(FCampoly,ww)
    disp = (1e7*np.cos(gam0_r)*np.cos(beta0_r)/lmm)/(fcam/.015)
    dfcam = 3.162*disp*np.polyval([FCampoly[x]*(5-x) for x in range(5)],ww)
    T2 = -0.25*(1e7*np.cos(gam0_r)*np.sin(beta0_r)/lmm)/(fcam/47.43)**2 + T2Con*disp*dfcam
    T3 = (-1./24.)*3162.*disp/(fcam/47.43)**2 + T3Con*disp
    T0 = lam0 + T2 
    T1 = 3162.*disp + 3*T3
    X = (np.array(range(cols))+1-cols/2)*cbin/3162.
    lam_X = T0+T1*X+T2*(2*X**2-1)+T3*(4*X**3-3*X)
    return lam_X
コード例 #29
0
def dualPlot(age, meanWithin, meanBetween, title):

    fig, (within, between) = plt.subplots(1, 2, sharex=True, sharey=False)

    # fitshit
    wP = np.polyfit(age, meanWithin, 1)
    bP = np.polyfit(age, meanBetween, 1)
    xnew = np.arange(age.min() - 1, age.max() + 1, 0.1)
    wFit = np.polyval(wP, xnew)
    bFit = np.polyval(bP, xnew)

    within.set_title("within network")
    between.set_title("between network")

    withinCorr, withinP = st.pearsonr(age, meanWithin)
    within.plot(age, meanWithin, "k.")
    within.plot(xnew, wFit, "r", label=(str(np.round(withinCorr, 2)) + " " + str(np.round(withinP, 4))))
    within.set_xlabel("mean connectivity")
    within.set_ylabel("age")
    within.legend()

    betweenCorr, betweenP = st.pearsonr(age, meanBetween)
    between.plot(age, meanBetween, "k.")
    between.plot(xnew, bFit, "b", label=(str(np.round(betweenCorr, 2)) + " " + str(np.round(betweenP, 4))))
    between.set_xlabel("mean connectivity")
    between.set_ylabel("age")
    between.legend()

    fig.suptitle(title)
    plt.show()
    raw_input("Press Enter to continue...")
    plt.close()
コード例 #30
0
def eeval(expression, w):
	""" evaluate a sympy expression at omega. return magnitude, phase."""
	num, den = e2nd(expression)
	y = numpy.polyval(num, 1j*w) / numpy.polyval(den, 1j*w)
	phase = numpy.arctan2(y.imag, y.real) * 180.0 / numpy.pi
	mag = abs(y)
	return mag, phase
コード例 #31
0
 def get_line_points(self):
     y = np.array(range(0, self.img_size[1]+1, 10), dtype=np.float32)/self.pixels_per_meter[1]
     x = np.polyval(self.poly_coeffs, y)*self.pixels_per_meter[0]
     y *= self.pixels_per_meter[1]
     return np.array([x, y], dtype=np.int32).T
コード例 #32
0
ファイル: LBFGS.py プロジェクト: hoangminhle/Off-Policy
def polyinterp(points, x_min_bound=None, x_max_bound=None, plot=False):
    """
    Gives the minimizer and minimum of the interpolating polynomial over given points
    based on function and derivative information. Defaults to bisection if no critical
    points are valid.
    Based on polyinterp.m Matlab function in minFunc by Mark Schmidt with some slight
    modifications.
    Implemented by: Hao-Jun Michael Shi and Dheevatsa Mudigere
    Last edited 12/6/18.
    Inputs:
        points (nparray): two-dimensional array with each point of form [x f g]
        x_min_bound (float): minimum value that brackets minimum (default: minimum of points)
        x_max_bound (float): maximum value that brackets minimum (default: maximum of points)
        plot (bool): plot interpolating polynomial
    Outputs:
        x_sol (float): minimizer of interpolating polynomial
        F_min (float): minimum of interpolating polynomial
    Note:
      . Set f or g to np.nan if they are unknown
    """
    no_points = points.shape[0]
    order = np.sum(1 - np.isnan(points[:, 1:3]).astype('int')) - 1

    x_min = np.min(points[:, 0])
    x_max = np.max(points[:, 0])

    # compute bounds of interpolation area
    if x_min_bound is None:
        x_min_bound = x_min
    if x_max_bound is None:
        x_max_bound = x_max

    # explicit formula for quadratic interpolation
    if no_points == 2 and order == 2 and plot is False:
        # Solution to quadratic interpolation is given by:
        # a = -(f1 - f2 - g1(x1 - x2))/(x1 - x2)^2
        # x_min = x1 - g1/(2a)
        # if x1 = 0, then is given by:
        # x_min = - (g1*x2^2)/(2(f2 - f1 - g1*x2))

        if points[0, 0] == 0:
            x_sol = -points[0, 2] * points[1, 0]**2 / (
                2 *
                (points[1, 1] - points[0, 1] - points[0, 2] * points[1, 0]))
        else:
            a = -(points[0, 1] - points[1, 1] - points[0, 2] *
                  (points[0, 0] - points[1, 0])) / (points[0, 0] -
                                                    points[1, 0])**2
            x_sol = points[0, 0] - points[0, 2] / (2 * a)

        x_sol = np.minimum(np.maximum(x_min_bound, x_sol), x_max_bound)

    # explicit formula for cubic interpolation
    elif no_points == 2 and order == 3 and plot is False:
        # Solution to cubic interpolation is given by:
        # d1 = g1 + g2 - 3((f1 - f2)/(x1 - x2))
        # d2 = sqrt(d1^2 - g1*g2)
        # x_min = x2 - (x2 - x1)*((g2 + d2 - d1)/(g2 - g1 + 2*d2))
        d1 = points[0, 2] + points[1, 2] - 3 * ((points[0, 1] - points[1, 1]) /
                                                (points[0, 0] - points[1, 0]))
        d2 = np.sqrt(d1**2 - points[0, 2] * points[1, 2])
        if np.isreal(d2):
            x_sol = points[1, 0] - (points[1, 0] - points[0, 0]) * (
                (points[1, 2] + d2 - d1) /
                (points[1, 2] - points[0, 2] + 2 * d2))
            x_sol = np.minimum(np.maximum(x_min_bound, x_sol), x_max_bound)
        else:
            x_sol = (x_max_bound + x_min_bound) / 2

    # solve linear system
    else:
        # define linear constraints
        A = np.zeros((0, order + 1))
        b = np.zeros((0, 1))

        # add linear constraints on function values
        for i in range(no_points):
            if not np.isnan(points[i, 1]):
                constraint = np.zeros((1, order + 1))
                for j in range(order, -1, -1):
                    constraint[0, order - j] = points[i, 0]**j
                A = np.append(A, constraint, 0)
                b = np.append(b, points[i, 1])

        # add linear constraints on gradient values
        for i in range(no_points):
            if not np.isnan(points[i, 2]):
                constraint = np.zeros((1, order + 1))
                for j in range(order):
                    constraint[0,
                               j] = (order - j) * points[i, 0]**(order - j - 1)
                A = np.append(A, constraint, 0)
                b = np.append(b, points[i, 2])

        # check if system is solvable
        if A.shape[0] != A.shape[1] or np.linalg.matrix_rank(A) != A.shape[0]:
            x_sol = (x_min_bound + x_max_bound) / 2
            f_min = np.Inf
        else:
            # solve linear system for interpolating polynomial
            coeff = np.linalg.solve(A, b)

            # compute critical points
            dcoeff = np.zeros(order)
            for i in range(len(coeff) - 1):
                dcoeff[i] = coeff[i] * (order - i)

            crit_pts = np.array([x_min_bound, x_max_bound])
            crit_pts = np.append(crit_pts, points[:, 0])

            if not np.isinf(dcoeff).any():
                roots = np.roots(dcoeff)
                crit_pts = np.append(crit_pts, roots)

            # test critical points
            f_min = np.Inf
            x_sol = (x_min_bound + x_max_bound) / 2  # defaults to bisection
            for crit_pt in crit_pts:
                if np.isreal(
                        crit_pt
                ) and crit_pt >= x_min_bound and crit_pt <= x_max_bound:
                    F_cp = np.polyval(coeff, crit_pt)
                    if np.isreal(F_cp) and F_cp < f_min:
                        x_sol = np.real(crit_pt)
                        f_min = np.real(F_cp)

            if (plot):
                plt.figure()
                x = np.arange(x_min_bound, x_max_bound,
                              (x_max_bound - x_min_bound) / 10000)
                f = np.polyval(coeff, x)
                plt.plot(x, f)
                plt.plot(x_sol, f_min, 'x')

    return x_sol
コード例 #33
0
ファイル: lmfit_src_hmap.py プロジェクト: nkern/capo
    if src.get_jys()[0] < .01: continue
    uv = a.miriad.UV(filename)
    dat = []
    x,y,z = [],[],[]
    for (crd,t,bl),d,f in uv.all(raw=True):
        if f[ch]: continue
        aa.set_jultime(t)
        src.compute(aa)
        if src.alt < opts.altmin * a.img.deg2rad: continue
        xi,yi,zi = src.get_crds('top', ncrd=3)
        x.append(xi); y.append(yi); z.append(zi)
        dat.append(d[ch])
    sdat = n.abs(n.array(dat))
    x,y,z = n.array(x), n.array(y), n.array(z)
    poly = n.polyfit(x, sdat, deg=6)
    sig = n.sqrt(n.average((sdat - n.polyval(poly, x))**2))
    jys = src.get_jys()[0]
    src_dat[src_name] = (x,y,z,sdat,jys,sig)
    #prms[src_name] = pymc.Normal(src_name, jys, 1./sig**2)
    prms[src_name] = jys
    #print src_name, src_dat[src_name][-2:]

#@pymc.deterministic
def infer_hmap(arg=prms):
    alms = a.healpix.Alm(opts.lmax,opts.lmax)
    _hmapr = a.healpix.HealpixMap(hmap.nside(), dtype=n.float)
    _hmapi = a.healpix.HealpixMap(hmap.nside(), dtype=n.float)
    _alm = a.healpix.Alm(opts.lmax,opts.lmax)
    for L,M in zip(*alms.lm_indices()):
        _alm.set_to_zero()
        _alm[L,M] = 1.0
コード例 #34
0
ファイル: imageprocess.py プロジェクト: junruli/odysseus
def normalize_img(img, com, size):
    """Mask off the atoms, then fit linear slopes to the image and normalize

    We assume that there are no atoms left outside 1.5 times the size. This
    seems to be a reasonable assumption, it does not influence the result of
    the normalization.

    **Inputs**

      * img: 2D array, containing the image
      * com: tuple, center of mass coordinates
      * size: float, radial size of the cloud

    **Outputs**

      * normimg: 2D array, the normalized image

    """

    xmax, ymax = img.shape

    # create mask
    x_ind1 = round(com[0] - 1.5 * size)
    x_ind2 = round(com[0] + 1.5 * size)
    y_ind1 = round(com[1] - 1.5 * size)
    y_ind2 = round(com[1] + 1.5 * size)

    # fit first order polynomial along x and y (do not use quadratic terms!!)
    if x_ind1 > 0 and x_ind2 < xmax and y_ind1 > 0 and y_ind2 < ymax:
        normx = np.zeros(x_ind1 + xmax - x_ind2, dtype=float)
        xx = np.ones(normx.shape)
        xx[:x_ind1] = np.arange(x_ind1)
        xx[x_ind1:] = np.arange(x_ind2, xmax)
        normx[:x_ind1] = img[:x_ind1, :].mean(axis=1)
        normx[x_ind1:] = img[x_ind2:, :].mean(axis=1)

        # fit normx vs xx
        fitline = np.polyfit(xx, normx, 1)

        divx = np.ones(img.shape, dtype=float).transpose()*\
             np.polyval(fitline, np.arange(img.shape[0])).transpose()
        normimg = img / divx.transpose()

        normy = np.zeros(y_ind1 + ymax - y_ind2, dtype=float)
        yy = np.ones(normy.shape)
        yy[:y_ind1] = np.arange(y_ind1)
        yy[y_ind1:] = np.arange(y_ind2, ymax)
        normy[:y_ind1] = normimg[:, :y_ind1].mean(axis=0)
        normy[y_ind1:] = normimg[:, y_ind2:].mean(axis=0)

        # fit normx vs yy
        fitline = np.polyfit(yy, normy, 1)

        divy = np.ones(normimg.shape, dtype=float)*\
             np.polyval(fitline, np.arange(normimg.shape[1]))
        normimg = normimg / divy
    else:
        print "atom cloud extends to the edge of the image, can't normalize"
        raise NotImplementedError

    return normimg
コード例 #35
0
ファイル: ps4.py プロジェクト: shivasbasnet/Mooc
    pylab.title("Model is" + "\n" + "r^2 = " + str(r_square))
    pylab.xlabel("Year")
    pylab.ylabel("Temperature")
    pylab.legend()


### Begining of program
raw_data = Climate('data.csv')

## Problem 3
y = []
x = INTERVAL_1
for year in INTERVAL_1:
    y.append(raw_data.get_daily_temp('BOSTON', 1, 10, year))

models = generate_models(x, y, [1])
evaluate_models_on_training(x, y, models)

estimated = np.polyval(models[0], x)
r_square = r_squared(y, estimated)
print(r_square)

## Problem 4: FILL IN MISSING CODE TO GENERATE y VALUES
x1 = INTERVAL_1
x2 = INTERVAL_2
y = []
for year in INTERVAL_1:
    y.append(np.mean(raw_data.get_yearly_temp('BOSTON', year)))
models = generate_models(x, y, [1])
evaluate_models_on_training(x, y, models)
コード例 #36
0
#dax['PCA_2'] = np.dot(pca_components, weights)  #가중평균으로 계산

# 다섯 개 주성분이 나오도록 분석
pca = KernelPCA(n_components=5).fit(data.apply(scale_function))
pca_components = pca.transform(data)
weights = get_we(pca.lambdas_)  #다섯 개의 주성분 비중을 정규화
dax['PCA_5'] = np.dot(pca_components, weights)  #가중평균으로 계산

#dax.apply(scale_function).plot(figsize=(8, 4))


mpl_dates = mpl.dates.date2num(data.index.to_pydatetime())

plt.figure(figsize=(8,4))
plt.scatter(dax['PCA_5'], dax['^GDAXI'], c=mpl_dates, s=np.repeat(6, len(dax)))
lin_reg = np.polyval(np.polyfit(dax['PCA_5'], dax['^GDAXI'], 1), dax['PCA_5'])
plt.plot(dax['PCA_5'], lin_reg, 'r', lw=3)
plt.grid(True)
plt.xlabel('PCA_5')
plt.ylabel('^GDAXI')
plt.colorbar(ticks=mpl.dates.DayLocator(interval=250),
             format=mpl.dates.DateFormatter('%d %b %y'))

# 각각의 연결된 선 하나가 하나의 회귀모형으로 최대한 만족할 수 있는 구간으로 해석

cut_date = '2011/7/1'
early_pca = dax[dax.index < cut_date]['PCA_5']
print(len(early_pca))
print(len(dax['^GDAXI'][dax.index < cut_date]))
early_reg = np.polyval(np.polyfit(early_pca, dax['^GDAXI'][dax.index < cut_date], 1), early_pca)
コード例 #37
0
def compute_profile(wl, img, line_wl, line_width=5, n_iter=15, polyorder=5,
                    bad_rows=None, debug=False,
                    op=numpy.sum):

    y,x = numpy.indices(wl.shape)

    #print wl.shape, img.shape

    #fits.PrimaryHDU(data=x).writeto("fiddle_x.fits", clobber=True)
    #fits.PrimaryHDU(data=y).writeto("fiddle_y.fits", clobber=True)

    logger = logging.getLogger("ComputeProfile")
    logger.debug("Using measurement operation: %s" % (str(op)))

    if (debug):
        region_select = img.copy()
        full_select = (wl >= line_wl-line_width) & (wl <= line_wl+line_width)
        region_select[bad_rows.reshape((-1,1))] = numpy.NaN
        region_select[~full_select] = numpy.NaN
        fits.PrimaryHDU(data=region_select).writeto(
            "slitflat_data_%.f.fits" % (line_wl), clobber=True,
        )


    if (bad_rows is not None):
        wl = wl[~bad_rows, :]
        img = img[~bad_rows, :]
        y = y[~bad_rows, :]
        x = x[~bad_rows, :]

    part_of_line = (wl >= line_wl-line_width) & (wl <= line_wl+line_width)

    # if (debug):
    #     full_select = part_of_line[~bad_rows]
    #     region_select[bad_rows] = numpy.NaN
    #     region_select[~bad_rows][~part_of_line] = numpy.NaN
    #
    cut_wl = wl[part_of_line]
    cut_y = y[part_of_line]
    cut_img = img[part_of_line]

    if (numpy.isnan(cut_img).any()):
        # there are invalid pixels in this bin - be safe and not use this data at all
        logger.debug("found at least one pixel with NaN - skipping this data set")
        return None, None

    #print cut_wl.shape, cut_y.shape, cut_img.shape

    combined = numpy.empty((cut_wl.shape[0], 4))
    combined[:,0] = cut_wl
    combined[:,1] = x[part_of_line]
    combined[:,2] = cut_y
    combined[:,3] = cut_img

    if (debug):
        out_fn = "fiddle_slitflat.comb.%7.2f-%.2f" % (line_wl, line_width)
        numpy.savetxt(out_fn, combined)
        numpy.save(out_fn, combined)
        #print out_fn

    # print "WL shape:", wl.shape

    output = numpy.empty((wl.shape[0],20))
    output[:,:] = numpy.NaN

    # print y
    # print y[:,0]

    for iy in range(wl.shape[0]):
        #if (bad_rows is not None and bad_rows[iy]):
        #    continue

        wl_match = (wl[iy,:] >= line_wl-line_width) & (wl[iy,:] <= line_wl+line_width)

        # find line intensity
        flux = op(img[iy, wl_match])
        output[iy,0] = y[iy,0] #iy
        output[iy,1] = flux

    #
    # run a median filter to get rid of individual hot pixels
    #
    logger.debug("median-filtering the line profile to eliminate hot pixels and cosmics")
    fm = scipy.ndimage.median_filter(output[:,1], size=25, mode='constant', cval=0)
    output[:,2] = fm
    _median = numpy.nanmedian(output[:,1])
    # output[:,1][bad_rows] = _median
    # fm = scipy.ndimage.median_filter(output[:,1], size=25, mode='constant', cval=0)
    # output[:,2] = fm

    # print "output shape:", output.shape

    valid = numpy.isfinite(fm)
    fm[~valid] = -1e99 # this is to prevent runtimewarnings about invalid values encountered
    median_intensity = numpy.median(fm[valid])
    valid &= (fm > 0.3*median_intensity)

    # print "valid", valid.shape

    if (debug):
        out_fn = "fiddle_slitflat.comb2.%7.2f-%.2f" % (line_wl, line_width)
        # print out_fn
        with open(out_fn, "w") as f:
           numpy.savetxt(f, output)
           print >>f, "\n"*5
        #print out_fn2

    # print output[:,0][valid]
    # print output[:,2][valid]
    # print "fm:", fm.shape

    fit, poly = None, None
    for iteration in range(n_iter):

        try:
            poly = numpy.polyfit(
                x=output[:,0][valid],
                y=output[:,2][valid],
                deg=polyorder,
            )
        except Exception as e:
            print e
            break

        fit = numpy.polyval(poly, output[:,0])
        residual = fm - fit
        _perc = numpy.nanpercentile(residual[valid], [16,84,50])
        _med = _perc[2]
        _sigma = 0.5*(_perc[1] - _perc[0])
        #print _sigma, _perc

        bad = (residual > 3*_sigma) | (residual < -3*_sigma)
        valid[bad] = False

        output[:,iteration+3] = fit

    norm_line = int(0.5*output.shape[0])
    output[:,n_iter+3] = output[:,n_iter+2] / output[norm_line,n_iter+2]

    if (debug):
        out_fn = "fiddle_slitflat.comb2.%7.2f-%.2f" % (line_wl, line_width)
        with open(out_fn, "w") as f:
           numpy.savetxt(f, output)
           print >>f, "\n"*5
        #print out_fn2

    # print "done with fitting one wl profile"
    return fit, poly
コード例 #38
0
ファイル: solve.py プロジェクト: nikolai-piskunov/SME
def synthesize_spectrum(
    sme,
    segments="all",
    passLineList=True,
    passAtmosphere=True,
    passNLTE=True,
    reuse_wavelength_grid=False,
):
    """
    Calculate the synthetic spectrum based on the parameters passed in the SME structure
    The wavelength range of each segment is set in sme.wran
    The specific wavelength grid is given by sme.wave, or is generated on the fly if sme.wave is None

    Will try to fit radial velocity RV and continuum to observed spectrum, depending on vrad_flag and cscale_flag

    Other important fields:
    sme.iptype: instrument broadening type

    Parameters
    ----------
    sme : SME_Struct
        sme structure, with all necessary parameters for the calculation
    setLineList : bool, optional
        wether to pass the linelist to the c library (default: True)
    passAtmosphere : bool, optional
        wether to pass the atmosphere to the c library (default: True)
    passNLTE : bool, optional
        wether to pass NLTE departure coefficients to the c library (default: True)
    reuse_wavelength_grid : bool, optional
        wether to use sme.wint as the output grid of the function or create a new grid (default: False)

    Returns
    -------
    sme : SME_Struct
        same sme structure with synthetic spectrum in sme.smod
    """

    # Define constants
    n_segments = sme.nseg
    nmu = sme.nmu
    cscale_degree = sme.cscale_degree

    # fix impossible input
    if "spec" not in sme:
        sme.vrad_flag = "none"
    if "spec" not in sme:
        sme.cscale_flag = "none"
    if "wint" not in sme:
        reuse_wavelength_grid = False

    if segments == "all":
        segments = range(n_segments)
    else:
        segments = np.atleast_1d(segments)
        if np.any(segments < 0) or np.any(segments >= n_segments):
            raise IndexError("Segment(s) out of range")

    # Prepare arrays
    wran = sme.wran

    wint = [None for _ in range(n_segments)]
    sint = [None for _ in range(n_segments)]
    cint = [None for _ in range(n_segments)]
    vrad = np.zeros(n_segments)

    cscale = np.zeros((n_segments, cscale_degree + 1))
    cscale[:, -1] = 1
    wave = [None for _ in range(n_segments)]
    smod = [[] for _ in range(n_segments)]
    wmod = [[] for _ in range(n_segments)]
    wind = np.zeros(n_segments + 1, dtype=int)

    # If wavelengths are already defined use those as output
    if "wave" in sme:
        wave = [w for w in sme.wave]
        wind = [0, *np.diff(sme.wind)]

    # Input Model data to C library
    if passLineList and not reuse_wavelength_grid:
        dll.SetLibraryPath()
        dll.InputLineList(sme.linelist)
    if passAtmosphere:
        sme = get_atmosphere(sme)
        dll.InputModel(sme.teff, sme.logg, sme.vmic, sme.atmo)
        dll.InputAbund(sme.abund)
        dll.Ionization(0)
        dll.SetVWscale(sme.gam6)
        dll.SetH2broad(sme.h2broad)
    if passNLTE:
        update_nlte_coefficients(sme, dll)

    # Loop over segments
    #   Input Wavelength range and Opacity
    #   Calculate spectral synthesis for each
    #   Interpolate onto geomspaced wavelength grid
    #   Apply instrumental and turbulence broadening
    for il in segments:
        logging.debug("Segment %i", il)
        # Input Wavelength range and Opacity
        vrad_seg = sme.vrad[il]
        wbeg, wend = get_wavelengthrange(wran[il], vrad_seg, sme.vsini)

        dll.InputWaveRange(wbeg, wend)
        dll.Opacity()

        # Reuse adaptive wavelength grid in the jacobians
        wint_seg = sme.wint[il] if reuse_wavelength_grid else None
        # Only calculate line opacities in the first segment
        keep_line_opacity = il != segments[0]
        #   Calculate spectral synthesis for each
        _, wint[il], sint[il], cint[il] = dll.Transf(
            sme.mu,
            sme.accrt,  # threshold line opacity / cont opacity
            sme.accwi,
            keep_lineop=keep_line_opacity,
            wave=wint_seg,
        )

        # Create new geomspaced wavelength grid, to be used for intermediary steps
        wgrid, vstep = new_wavelength_grid(wint[il])

        # Continuum
        # rtint = Radiative Transfer Integration
        cont_flux = integrate_flux(sme.mu, cint[il], 1, 0, 0)
        cont_flux = np.interp(wgrid, wint[il], cont_flux)

        # Broaden Spectrum
        y_integrated = np.empty((nmu, len(wgrid)))
        for imu in range(nmu):
            y_integrated[imu] = np.interp(wgrid, wint[il], sint[il][imu])

        # Turbulence broadening
        # Apply macroturbulent and rotational broadening while integrating intensities
        # over the stellar disk to produce flux spectrum Y.
        flux = integrate_flux(sme.mu, y_integrated, vstep, sme.vsini, sme.vmac)
        # instrument broadening
        if "iptype" in sme:
            ipres = sme.ipres if np.size(sme.ipres) == 1 else sme.ipres[il]
            flux = broadening.apply_broadening(
                ipres, wgrid, flux, type=sme.iptype, sme=sme
            )

        # Divide calculated spectrum by continuum
        if sme.cscale_flag != "fix":
            flux /= cont_flux
        smod[il] = flux
        wmod[il] = wgrid

        # Create a wavelength array if it doesn't exist
        if "wave" not in sme or len(sme.wave[il]) == 0:
            # trim padding
            wbeg, wend = wran[il]
            itrim = (wgrid > wbeg) & (wgrid < wend)
            # Force endpoints == wavelength range
            wave[il] = np.concatenate(([wbeg], wgrid[itrim], [wend]))
            wind[il + 1] = len(wave[il])

    # Fit continuum and radial velocity
    # And interpolate the flux onto the wavelength grid
    cscale, vrad = match_rv_continuum(sme, segments, wmod, smod)
    logging.debug("Radial velocity: %s", str(vrad))
    logging.debug("Continuum coefficients: %s", str(cscale))

    for il in segments:
        if vrad[il] is not None:
            rv_factor = np.sqrt((1 + vrad[il] / clight) / (1 - vrad[il] / clight))
            wmod[il] *= rv_factor
        smod[il] = safe_interpolation(wmod[il], smod[il], wave[il])

        if cscale[il] is not None and not np.all(cscale[il] == 0):
            x = wave[il] - wave[il][0]
            smod[il] *= np.polyval(cscale[il], x)

    # Merge all segments
    # if sme already has a wavelength this should be the same

    sme.wind = wind = np.cumsum(wind)
    sme.wint = wint

    if "wave" not in sme:
        npoints = sum([len(wave[s]) for s in segments])
        sme.wave = np.zeros(npoints)
    if "synth" not in sme:
        sme.smod = np.zeros_like(sme.wob)

    for s in segments:
        sme.wave[s] = wave[s]
        sme.synth[s] = smod[s]

    if sme.cscale_flag != "fix":
        for s in segments:
            sme.cscale[s] = cscale[s]

    sme.vrad = np.asarray(vrad)
    sme.nlte.flags = dll.GetNLTEflags()

    return sme
コード例 #39
0
# Airy function
x = np.linspace(-15, 4, 256)
ai, aip, bi, bip = scipy.special.airy(x)
ax[1, 1].plot(x, ai, color='black')
ax[1, 1].plot(x, bi, color='black', dashes=(5, 2))
ax[1, 1].axhline(color="grey", ls="--", zorder=-1)
ax[1, 1].axvline(color="grey", ls="--", zorder=-1)
ax[1, 1].set_xlim(-15, 4)
ax[1, 1].set_ylim(-0.5, 0.6)
ax[1, 1].text(0.5, 0.95, 'Airy', ha='center',
              va='top', transform=ax[1, 1].transAxes)

# Legendre polynomials
x = np.linspace(-1, 1, 256)
lp0 = np.polyval(scipy.special.legendre(0), x)
lp1 = np.polyval(scipy.special.legendre(1), x)
lp2 = scipy.special.eval_legendre(2, x)
lp3 = scipy.special.eval_legendre(3, x)
ax[2, 0].plot(x, lp0, color='black')
ax[2, 0].plot(x, lp1, color='black', dashes=(5, 2))
ax[2, 0].plot(x, lp2, color='black', dashes=(3, 2))
ax[2, 0].plot(x, lp3, color='black', dashes=(1, 2))
ax[2, 0].axhline(color="grey", ls="--", zorder=-1)
ax[2, 0].axvline(color="grey", ls="--", zorder=-1)
ax[2, 0].set_ylim(-1, 1.1)
ax[2, 0].text(0.5, 0.9, 'Legendre', ha='center',
              va='top', transform=ax[2, 0].transAxes)

# Laguerre polynomials
x = np.linspace(-5, 8, 256)
コード例 #40
0
def get_center_shift(coeffs, img_size, pixels_per_meter):
    return np.polyval(coeffs, img_size[1]/pixels_per_meter[1]) - (img_size[0]//2)/pixels_per_meter[0]
コード例 #41
0
from matplotlib.pyplot import ylim
import math
from scipy import interpolate
from scipy.interpolate import interp1d

#%% First function:

# Interpolations with monomials:

x = np.linspace(-1, 1, num = 20, endpoint = True) # If we increase the range, we could see better the behaviour of that function
y = np.exp(1/x)

# First part. Interpolations with monomials:

pol3 = np.polyfit(x, y, 3)
val3 = np.polyval(pol3, x) 

pol5 = np.polyfit(x, y, 5)
val5 = np.polyval(pol5, x)

pol10 = np.polyfit(x, y, 10)
val10 = np.polyval(pol10, x)

plt.plot(x, y, label = 'Original function')
plt.plot(x, val3,'-', label = 'Interpolation order 3')
plt.plot(x, val5,'--', label = 'Interpolation order 5')
plt.plot(x, val10,':', label = 'Interpolation order 10')
plt.legend(loc = 'upper left')
plt.xlim(xmin = -1, xmax = 1)
plt.title('Monomial interpolations - Eq. 1', size=15)
plt.ylabel('f(x)', size=10)
コード例 #42
0
@author: zhouning
@file:test_numpy_fit.py
@time:2018/7/11 17:16
@desc:多项式拟合范例:
"""

import matplotlib.pyplot as plt
import numpy as np

x = np.arange(1, 17, 1)
y = np.array([
    3.10323, 3.20646, 3.30969, 5.41292, 3.51615, 3.61938, 3.72261, 3.82584,
    3.92907, 4.0323, 4.13553, 4.23876, 4.34199, 4.44522, 4.54845, 4.65168
])
z1 = np.polyfit(x, y, 2)  # 用2次多项式拟合

p1 = np.poly1d(z1)

print(p1)  # 在屏幕上打印拟合多项式
# yvals = p1(x)  # 也可以使用yvals=np.polyval(z1,x)
yvals = np.polyval(z1, x)  # 也可以使用yvals=np.polyval(z1,x)
print(type(yvals))
plot1 = plt.plot(x, y, '*', label='original values')
plot2 = plt.plot(x, yvals, 'r', label='polyfit values')
plt.xlabel('x axis')
plt.ylabel('y axis')
plt.legend(loc=4)  # 指定legend的位置,读者可以自己help它的用法
plt.title('polyfitting')
plt.show()
plt.savefig('p1.png')
コード例 #43
0
def tpoly(q0, qf, t, qd0=0, qdf=0):
    """
    Generate scalar polynomial trajectory

    :param q0: initial value
    :type q0: float
    :param qf: final value
    :type qf: float
    :param t: time
    :type t: float or array_like
    :param qd0: initial velocity, optional
    :type q0: float
    :param qdf: final velocity, optional
    :type q0: float
    :return: trajectory
    :rtype: namedtuple

    - ``tg = lspb(q0, q1, t)`` is a scalar trajectory (Mx1) that varies smoothly
      from ``q0`` to ``qf`` using a quintic polynomial.  The initial and final
      velocity and acceleration are zero. Time ``t`` can be either:

        * an integer scalar, indicating the total number of timesteps

            - Velocity is in units of distance per trajectory step, not per
              second.
            - Acceleration is in units of distance per trajectory step squared,
              *not* per second squared. 

        * an array_like, containing the time steps. 
        
            - Results are scaled to units of time.

    - ``tg = lspb(q0, q1, t, qd0, qdf)`` as above but specify the initial and
      final velocity. The initial and final acceleration are zero. 

    The return value is a namedtuple (named ``tpoly``) with elements:

        - ``x``  the time coordinate as a numpy ndarray, shape=(M,)
        - ``y``  the position as a numpy ndarray, shape=(M,)
        - ``yd``  the velocity as a numpy ndarray, shape=(M,)
        - ``ydd``  the acceleration as a numpy ndarray, shape=(M,)

    Notes:

        - The time vector T is assumed to be monotonically increasing, and time
        scaling is based on the first and last element.

    References:
    
    - Robotics, Vision & Control, Chap 3,
      P. Corke, Springer 2011.

    :seealso: :func:`lspb`, :func:`t1plot`, :func:`jtraj`.
    """

    if isinstance(t, int):
        t = np.arange(0, t)
        istime = False
    elif isvector(t):
        t = getvector(t)
        istime = True
    else:
        raise TypeError('bad argument for time, must be int or vector')
                
    tf = max(t)
    # solve for the polynomial coefficients using least squares
    X = [
            [0,             0,           0,           0,       0,   1],
            [tf ** 5,       tf ** 4,     tf ** 3,     tf ** 2, tf,  1],
            [0,             0,           0,           0,       1,   0],
            [5 * tf ** 4,   4 * tf ** 3, 3 * tf ** 2, 2 * tf,  1,   0],
            [0,             0,           0,           2,       0,   0],
            [20 * tf ** 3, 12 * tf ** 2, 6 * tf,      2,       0,   0]
    ]
    coeffs, resid, rank, s =  np.linalg.lstsq(X, np.r_[q0, qf, qd0, qdf, 0, 0], rcond=None)

    # coefficients of derivatives 
    coeffs_d = coeffs[0:5] * np.arange(5, 0, -1)
    coeffs_dd = coeffs_d[0:4] * np.arange(4, 0, -1)

    # evaluate the polynomials
    p = np.polyval(coeffs, t)
    pd = np.polyval(coeffs_d, t)
    pdd = np.polyval(coeffs_dd, t)

    return namedtuple('tpoly', 'x y yd ydd istime')(t, p, pd, pdd, istime)
コード例 #44
0
def cdf_to_nc(cdf_filename, atmpres=False):
    """
    Load a "raw" .cdf file and generate a processed .nc file
    """

    # Load raw .cdf data
    ds = xr.open_dataset(cdf_filename)

    # definition of PAR is
    # PAR = Im * 10 ^ ((x-a0)/a1)
    # Where
    # Im is the immersion coefficient
    # a1 is the scaling factor
    # a0 is the voltage offset, typically 0
    # x is the voltage
    # The manufacturer calculates PAR in units of μmol photons/m2/s1
    # from Sea-Bird Scientific, ECO PAR User Manual
    # Document No. par170706, 2017-07-06, Version B
    # https://www.seabird.com/asset-get.download.jsa?id=54627862518

    if "par" in ds.attrs["INST_TYPE"].lower():
        ds["PAR_905"] = ds.attrs["Im"] * 10**(
            (ds["counts"].mean(dim="sample") - ds.attrs["a0"]) /
            ds.attrs["a1"])
        ds["PAR_905"].attrs["units"] = "umol m-2 s-1"
        ds["PAR_905"].attrs[
            "long_name"] = "Photosynthetically active " "radiation"

    if "ntu" in ds.attrs["INST_TYPE"].lower():
        if "user_ntucal_coeffs" in ds.attrs:
            ds["Turb"] = xr.DataArray(
                np.polyval(ds.attrs["user_ntucal_coeffs"], ds["counts"]),
                dims=["time", "sample"],
            ).mean(dim="sample")
            ds["Turb"].attrs["units"] = "1"
            ds["Turb"].attrs["long_name"] = "Turbidity (NTU)"
            ds["Turb"].attrs["standard_name"] = "sea_water_turbidity"
            ds["Turb_std"] = xr.DataArray(
                np.polyval(ds.attrs["user_ntucal_coeffs"], ds["counts"]),
                dims=["time", "sample"],
            ).std(dim="sample")
            ds["Turb_std"].attrs["units"] = "1"
            ds["Turb_std"].attrs[
                "long_name"] = "Turbidity burst standard deviation (NTU)"
            ds["Turb_std"].attrs["standard_name"] = "sea_water_turbidity"
            ds["Turb_std"].attrs["cell_methods"] = "time: standard_deviation"

    ds = ds.drop(["counts", "sample"])

    # Clip data to in/out water times or via good_ens
    ds = utils.clip_ds(ds)

    ds = eco_qaqc(ds)

    # assign min/max:
    ds = utils.add_min_max(ds)

    ds = utils.add_start_stop_time(ds)

    ds = utils.add_delta_t(ds)

    # add lat/lon coordinates
    ds = utils.ds_add_lat_lon(ds)

    ds = ds_add_attrs(ds)

    ds = utils.create_z(ds)

    # add lat/lon coordinates to each variable
    for var in ds.variables:
        if (var not in ds.coords) and ("time" not in var):
            # ds = utils.add_lat_lon(ds, var)
            # ds = utils.no_p_add_depth(ds, var)
            ds = utils.add_z_if_no_pressure(ds, var)
            # cast as float32
            # ds = utils.set_var_dtype(ds, var)

    # Write to .nc file
    print("Writing cleaned/trimmed data to .nc file")
    nc_filename = ds.attrs["filename"] + "-a.nc"

    ds.to_netcdf(nc_filename,
                 unlimited_dims=["time"],
                 encoding={"time": {
                     "dtype": "i4"
                 }})
    utils.check_compliance(nc_filename)
    print("Done writing netCDF file", nc_filename)
コード例 #45
0
# 设置刻度定位器
ax = mp.gca()
ax.xaxis.set_major_locator(md.WeekdayLocator(byweekday=md.MO))
ax.xaxis.set_major_formatter(md.DateFormatter('%Y/%m/%d'))
ax.xaxis.set_minor_locator(md.DayLocator())
# dates = dates.astype(md.datetime.datetime)
mp.gcf().autofmt_xdate()

# 计算差价函数并画出
diff_prices = bhp_closing_prices - vale_closing_prices
mp.plot(dates,
        diff_prices,
        color='dodgerblue',
        label='diff price',
        linewidth=1,
        linestyle='--')

# 拟合这种数据得到多项式函数方程  画出拟合曲线
x = dates.astype('M8[D]').astype('int32')
y = diff_prices
P = np.polyfit(x, y, 4)
predict_y = np.polyval(P, x)
mp.plot(dates, predict_y, color='orangered', label='predict', linewidth=1)

Q = np.polyder(P)
xs = np.roots(Q)
ys = np.polyval(P, xs)
mp.hlines(ys, dates[0], dates[-1], zorder=0.5, color='green', alpha=0.3)

mp.legend()
mp.show()
コード例 #46
0
def main():

    import argparse
    import matplotlib.pyplot as plt
    import numpy
    import os
    import scipy.optimize as optimize
    from astropy.io import fits as pyfits

    # Parsing arguments -------------------------------------------------------
    parser = argparse.ArgumentParser(description="Fits an existing phase-map.")

    parser.add_argument('filename', type=str, help="Input phase-map name.")

    parser.add_argument('-i',
                        '--interactions',
                        default=5,
                        type=int,
                        help="Number of interactions in the process [5]")

    parser.add_argument('-n',
                        '--npoints',
                        default=2500,
                        type=int,
                        help="Number of points that will be used to fit" +
                        "the phase-map [50]")

    parser.add_argument('-o',
                        '--output',
                        type=str,
                        default=None,
                        help="Name of the output phase-map file.")

    parser.add_argument('-q',
                        '--quiet',
                        action='store_true',
                        help="Run program quietly.")

    parser.add_argument(
        '-s',
        '--show_plots',
        action='store_true',
        help=
        "Show plots (good for checking quality of the observed phase-map and the fitting."
    )

    args = parser.parse_args()
    v = not args.quiet
    if v:
        print("\n Phase-Map Fitting for BTFI")
        print(" by Bruno Quint & Fabricio Ferrari")
        print(" version 0.0a - Jan 2014")
        print("")

    check_dimensions(args.filename, dimensions=2)

    # Loading observed map ----------------------------------------------------
    if v:
        print(" Loading file: %s" % args.filename)
    phase_map = pyfits.open(args.filename)[0]

    # Check if file was obtained with BTFI instrument
    header = phase_map.header
    try:
        if header['INSTRUME'].upper() not in ['BTFI'] and v:
            if v:
                print(
                    " [Warning]: %s file was not obtained with BTFI instrument."
                    % args.filename)
    except KeyError:
        warning("'INSTRUME' card was not found in the files' header.")

    mode = check_mode(args.filename)

    # Fitting Phase-Map for IBTF ----------------------------------------------
    if mode == 'ibtf':
        if v:
            print(" File obtained through an iBTF scan.")

        width = phase_map.header['naxis1']
        height = phase_map.header['naxis2']
        vmin = phase_map.data.mean() - 1.5 * phase_map.data.std()
        vmax = phase_map.data.mean() + 1.5 * phase_map.data.std()
        plt_config = {
            'origin': 'lower',
            'cmap': get_colormap(),
            'interpolation': 'nearest',
            'vmin': vmin,
            'vmax': vmax
        }
        if v:
            print(" Phase-map dimensions: [%d, %d]" % (width, height))
            print(" Done.")

        plt.subplot(131)

        plt.imshow(phase_map.data, **plt_config)
        plt.xticks([]), plt.yticks([])
        plt.xlabel('Observed Map')

        # Starting fitting process ------------------------------------------------
        npoints = numpy.sqrt(args.npoints).astype(int)
        if v:
            print("\n Starting phase-map fitting.")
            print(" %d x %d points will be used in the process." %
                  (npoints, npoints))

        x = (numpy.linspace(0.1, 0.9, npoints) * width).astype(int)
        y = (numpy.linspace(0.1, 0.9, npoints) * height).astype(int)
        x, y = numpy.meshgrid(x, y)
        x = numpy.ravel(x)
        y = numpy.ravel(y)
        z = numpy.ravel(phase_map.data[y, x])

        fit_func = lambda p, x, y: p[0] + p[1] * x + p[2] * y
        err_func = lambda p, x, y, z: z - fit_func(p, x, y)
        params = [z.mean(), 0, 0]

        # Fitting Plane -----------------------------------------------------------
        X = numpy.arange(phase_map.header['naxis1'])
        Y = numpy.arange(phase_map.header['naxis2'])
        X, Y = numpy.meshgrid(X, Y)

        if v: print("")
        for i in range(args.interactions):
            if v:
                print(" Fitting plane - Interaction %d" % (i + 1))

            if i == 0: e = z
            condition = numpy.where(numpy.abs(e - e.mean()) <= e.std())
            xx = x[condition]
            yy = y[condition]
            zz = z[condition]

            params, _ = optimize.leastsq(err_func, params, args=(xx, yy, zz))

            Z = fit_func(params, X, Y)
            error = Z - phase_map.data
            e = numpy.ravel(error[y, x])

        if v:
            p = params
            print("  phi(x,y) = %.2f + %.2fx + %.2fy" % (p[0], p[1], p[2]))
            print("  Error abs min: %f" % numpy.abs(e).min())
            print("  Error avg: %f" % e.mean())
            print("  Error std: %f" % e.std())
            print("  Error rms: %f" % numpy.sqrt(((e**2).mean())))

        plt.scatter(xx, yy, c=zz, cmap=get_colormap())
        plt.xlim(0, width), plt.ylim(0, height)

        plt.subplot(132)
        plt.imshow(error, **plt_config)
        plt.xticks([]), plt.yticks([])
        plt.xlabel("Residual")

        plt.subplot(133)
        plt.imshow(Z, **plt_config)
        plt.xticks([]), plt.yticks([])
        plt.xlabel("Fitted map")

        plt.show()

        ref_x = header['PHMREFX']
        ref_y = header['PHMREFY']
        fname = header['PHMREFF']

        fname = os.path.splitext(fname)[0]
        pyfits.writeto(fname + '--fit_phmap.fits',
                       Z - Z[ref_y, ref_x],
                       header,
                       clobber=True)
        pyfits.writeto(fname + '--res_phmap.fits',
                       Z - phase_map.data,
                       header,
                       clobber=True)

        print("")

    # Fitting phase-map for a Fabry-Perot Map ---------------------------------
    elif mode == 'fp':
        npoints = numpy.sqrt(args.npoints).astype(int)

        if v:
            print(" File obtained through a Fabry-Perot scan.")
            print(" Starting phase-map fitting.")
            print(" %d x %d points will be used in the process." %
                  (npoints, npoints))

        # Read data
        width = header['NAXIS1']
        height = header['NAXIS2']
        ref_x = header['PHMREFX']
        ref_y = header['PHMREFY']
        unit = header['PHMUNIT']
        sampling = header['PHMSAMP']
        FSR = header['PHMFSR']
        phmap = phase_map.data

        # From coordinates to pixels
        try:
            ref_x = header['CRPIX1'] + (ref_x -
                                        header['CRVAL1']) / header['CDELT1']
            ref_y = header['CRPIX2'] + (ref_y -
                                        header['CRVAL2']) / header['CDELT2']
        except KeyError:
            warning(" WCS not found. Using phisical coordinates.")

        phmap = phmap - phmap[ref_y, ref_x]

        x = (numpy.linspace(0.05, 0.95, npoints) * width).astype(int)
        y = (numpy.linspace(0.05, 0.95, npoints) * height).astype(int)
        X, Y = numpy.meshgrid(x, y)
        R = numpy.sqrt((X - ref_x)**2 + (Y - ref_y)**2)
        Z = phmap[Y, X]

        if args.show_plots:
            phmap_figure = plt.figure()
            phmap_axes = phmap_figure.add_subplot(111)
            phmap_imshow = phmap_axes.imshow(phmap,
                                             origin='lower',
                                             interpolation='nearest',
                                             cmap='coolwarm')
            phmap_axes.scatter(ref_x,
                               ref_y,
                               c='orange',
                               s=400,
                               marker="+",
                               lw=3)
            phmap_axes.scatter(X, Y, c='g', s=1, marker=".", alpha=0.7)
            phmap_axes.set_xlabel("X [px]")
            phmap_axes.set_ylabel("Y [px]")
            phmap_axes.set_xlim(0, width)
            phmap_axes.set_ylim(0, height)
            phmap_axes.grid()
            phmap_figure.colorbar(phmap_imshow)

        x = numpy.ravel(X)
        y = numpy.ravel(Y)
        r = numpy.sqrt((x - ref_x)**2 + (y - ref_y)**2)
        z = numpy.ravel(Z)

        condition = numpy.where(z > z.min(), True, False) * \
                    numpy.where(z < z.max(), True, False)

        r = r[condition]
        z = z[condition]

        z = z[numpy.argsort(r)]
        r = numpy.sort(r)

        # Checking if parabola is up or down.
        if v:
            print("\n Checking if parabola is up or down.")

        dz = numpy.diff(z, 1)
        dz_abs = numpy.abs(dz)
        dz_sign = numpy.sign(dz)
        sign = numpy.median(dz_sign[(dz_sign != 0) * (dz_abs <= sampling)])

        if v:
            print("  Parabola is %s" % ('up' if sign > 0 else 'down'))

        # Tell me the limits to fit the first parabola
        where = numpy.argmin(numpy.abs(r[dz_abs >= FSR / 2][0] - r))

        # Plot the gradient
        if args.show_plots:
            plt.figure(figsize=(16, 7))
            plt.subplot(2, 2, 3)
            plt.plot(r[1:], dz, 'b-')
            plt.gca().yaxis.set_label_position("right")
            plt.axvline(r[where], color='black', lw=2, ls='--')
            plt.axhline(FSR / 2, color='red', ls='--', label="FSR")
            plt.axhline(-FSR / 2, color='red', ls='--')
            plt.xlabel('Radius [px]')
            plt.ylabel('Gradient \n [%s]' % unit)
            plt.legend(loc='best')
            plt.grid()

        # This is the first fit
        p = numpy.polyfit(r[:where], z[:where], 2)
        rr = numpy.linspace(r[0], r[where], 1000)
        zz = numpy.polyval(p, rr)

        # Plot the data before correction
        if args.show_plots:
            plt.subplot(2, 2, 1)
            plt.plot(r[:where],
                     z[:where],
                     'b.',
                     alpha=0.25,
                     label='Not to be fixed')
            plt.plot(r[where:],
                     z[where:],
                     'r.',
                     alpha=0.25,
                     label='Data to be fixed')
            plt.plot(rr, zz, 'k-', lw=2)
            plt.axvline(r[where], color='black', lw=2, ls='--')
            plt.gca().yaxis.set_label_position("right")
            plt.xlabel('Radius [px]')
            plt.ylabel('Peak displacement \n [%s]' % unit)
            plt.legend(loc='best')
            plt.grid()

        # Displace the FSR
        error = numpy.abs(z - numpy.polyval(p, r) + sign * FSR)

        # Plot error
        if args.show_plots:
            plt.subplot(2, 2, 4)
            plt.plot(r, error, 'k.', alpha=0.25)
            # plt.gca().yaxis.tick_right()
            plt.gca().yaxis.set_label_position("right")
            plt.xlabel('Radius [px]')
            plt.ylabel('Error \n [%s]' % unit)
            plt.ylim(ymin=-50, ymax=1.1 * error.max())
            plt.grid()

        condition = (error > 2 * sampling)

        # Plot data after correction
        if args.show_plots:
            plt.subplot(2, 2, 2)
            plt.plot(r[condition],
                     z[condition],
                     'b.',
                     alpha=0.25,
                     label='Not fixed data')
            plt.plot(r[~condition],
                     z[~condition] + sign * FSR,
                     'r.',
                     alpha=0.25,
                     label='Fixed data')
            plt.gca().yaxis.set_label_position("right")
            plt.xlabel('Radius [px]')
            plt.ylabel('Peak displacement \n [%s]' % unit)
            plt.grid()

        # This is the second fit
        z = numpy.where(error >= 2 * sampling, z, z + sign * FSR)
        p = numpy.polyfit(r, z, 2)

        if args.show_plots:
            rr = numpy.linspace(r[0], r[-1], 1000)
            zz = numpy.polyval(p, rr)
            plt.plot(rr, zz, 'k-', lw=2, label='Fitted data.')
            plt.legend(loc='best')

        error = z - numpy.polyval(p, r)

        if v:
            print("  phi(x,y) = %.2e x^2 + %.2e x + %.2e " %
                  (p[0], p[1], p[2]))
            print("  Error abs min: %f" % numpy.abs(error).min())
            print("  Error avg: %f" % error.mean())
            print("  Error std: %f" % error.std())
            print("  Error rms: %f" % numpy.sqrt(((error**2).mean())))
            print("  Sampling in Z: %s" % phase_map.header['phmsamp'])
            print(" ")

        x = numpy.arange(width)
        y = numpy.arange(height)
        X, Y = numpy.meshgrid(x, y)
        R = numpy.sqrt((X - ref_x)**2 + (Y - ref_y)**2)
        Z = numpy.polyval(p, R)
        Z = Z - Z[ref_y, ref_x]

        fname = header['PHMREFF']
        fname = os.path.splitext(fname)[0]
        pyfits.writeto(fname + '--fit_phmap.fits', Z, header, clobber=True)
        pyfits.writeto(fname + '--res_phmap.fits',
                       Z - phmap,
                       header,
                       clobber=True)

        if v:
            print(" All done.\n")

        if args.show_plots:
            plt.show()

    else:
        if v: print(" [Warning]: File was not obtained from FP or iBTF.")
        if v: print(" [Warning]: Don't know what to do. Leaving now.\n")
        from sys import exit
        exit()
コード例 #47
0
ファイル: trace_orders.py プロジェクト: lisebc/PyReduce
def mark_orders(
    im,
    min_cluster=None,
    min_width=None,
    filter_size=None,
    noise=None,
    opower=4,
    border_width=None,
    degree_before_merge=2,
    regularization=0,
    closing_shape=(5, 5),
    opening_shape=(2, 2),
    plot=False,
    plot_title=None,
    manual=True,
    auto_merge_threshold=0.9,
    merge_min_threshold=0.1,
    sigma=0,
):
    """ Identify and trace orders

    Parameters
    ----------
    im : array[nrow, ncol]
        order definition image
    min_cluster : int, optional
        minimum cluster size in pixels (default: 500)
    filter_size : int, optional
        size of the running filter (default: 120)
    noise : float, optional
        noise to filter out (default: 8)
    opower : int, optional
        polynomial degree of the order fit (default: 4)
    border_width : int, optional
        number of pixels at the bottom and top borders of the image to ignore for order tracing (default: 5)
    plot : bool, optional
        wether to plot the final order fits (default: False)
    manual : bool, optional
        wether to manually select clusters to merge (strongly recommended) (default: True)

    Returns
    -------
    orders : array[nord, opower+1]
        order tracing coefficients (in numpy order, i.e. largest exponent first)
    """

    # Convert to signed integer, to avoid underflow problems
    im = np.asanyarray(im)
    im = im.astype(int)

    if filter_size is None:
        col = im[:, im.shape[0] // 2]
        col = median_filter(col, 5)
        threshold = np.percentile(col, 90)
        npeaks = find_peaks(col, height=threshold)[0].size
        filter_size = im.shape[0] // (npeaks * 2)
        logger.info("Median filter size, estimated: %i", filter_size)
    elif filter_size <= 0:
        raise ValueError(f"Expected filter size > 0, but got {filter_size}")

    if border_width is None:
        # find width of orders, based on central column
        col = im[:, im.shape[0] // 2]
        col = median_filter(col, 5)
        idx = np.argmax(col)
        width = peak_widths(col, [idx])[0][0]
        border_width = int(np.ceil(width))
        logger.info("Image border width, estimated: %i", border_width)
    elif border_width < 0:
        raise ValueError(f"Expected border width > 0, but got {border_width}")

    if min_cluster is None:
        min_cluster = im.shape[1] // 4
        logger.info("Minimum cluster size, estimated: %i", min_cluster)
    elif not np.isscalar(min_cluster):
        raise TypeError(
            f"Expected scalar minimum cluster size, but got {min_cluster}")

    if min_width is None:
        min_width = 0.25
    if min_width == 0:
        pass
    elif isinstance(min_width, (float, np.floating)):
        min_width = int(min_width * im.shape[0])
        logger.info("Minimum order width, estimated: %i", min_width)

    # im[im < 0] = np.ma.masked
    blurred = grey_closing(im, 5)
    # blur image along columns, and use the median + blurred + noise as threshold
    blurred = gaussian_filter1d(blurred, filter_size, axis=0)

    if noise is None:
        tmp = np.abs(blurred.flatten())
        noise = np.percentile(tmp, 5)
        logger.info("Background noise, estimated: %f", noise)
    elif not np.isscalar(noise):
        raise TypeError(f"Expected scalar noise level, but got {noise}")

    mask = im > blurred + noise
    # remove borders
    if border_width != 0:
        mask[:border_width, :] = mask[-border_width:, :] = False
        mask[:, :border_width] = mask[:, -border_width:] = False
    # remove masked areas with no clusters
    mask = np.ma.filled(mask, fill_value=False)
    # close gaps inbetween clusters
    struct = np.full(closing_shape, 1)
    mask = morphology.binary_closing(mask, struct, border_value=1)
    # remove small lonely clusters
    struct = np.full(opening_shape, 1)
    # struct = morphology.generate_binary_structure(2, 1)
    mask = morphology.binary_opening(mask, struct)

    # label clusters
    clusters, _ = label(mask)

    # remove small clusters
    sizes = np.bincount(clusters.ravel())
    mask_sizes = sizes > min_cluster
    mask_sizes[
        0] = True  # This is the background, which we don't need to remove
    for i in np.arange(len(sizes))[~mask_sizes]:
        clusters[clusters == i] = 0

    # # Reorganize x, y, clusters into a more convenient "pythonic" format
    # # x, y become dictionaries, with an entry for each order
    # # n is just a list of all orders (ignore cluster == 0)
    n = np.unique(clusters)
    n = n[n != 0]
    x = {i: np.where(clusters == c)[0] for i, c in enumerate(n)}
    y = {i: np.where(clusters == c)[1] for i, c in enumerate(n)}

    def best_fit_degree(x, y):
        L1 = np.sum((np.polyval(np.polyfit(y, x, 1), y) - x)**2)
        L2 = np.sum((np.polyval(np.polyfit(y, x, 2), y) - x)**2)

        # aic1 = 2 + 2 * np.log(L1) + 4 / (x.size - 2)
        # aic2 = 4 + 2 * np.log(L2) + 12 / (x.size - 3)

        if L1 < L2:
            return 1
        else:
            return 2

    if sigma > 0:
        degree = {i: best_fit_degree(x[i], y[i]) for i in x.keys()}
        bias = {i: np.polyfit(y[i], x[i], deg=degree[i])[-1] for i in x.keys()}
        n = list(x.keys())
        yt = np.concatenate([y[i] for i in n])
        xt = np.concatenate([x[i] - bias[i] for i in n])
        coef = np.polyfit(yt, xt, deg=degree_before_merge)

        res = np.polyval(coef, yt)
        cutoff = sigma * (res - xt).std()

        # DEBUG plot
        # uy = np.unique(yt)
        # mask = np.abs(res - xt) > cutoff
        # plt.plot(yt, xt, ".")
        # plt.plot(yt[mask], xt[mask], "r.")
        # plt.plot(uy, np.polyval(coef, uy))
        # plt.show()
        #

        m = {
            i: np.abs(np.polyval(coef, y[i]) - (x[i] - bias[i])) < cutoff
            for i in x.keys()
        }

        k = max(x.keys()) + 1
        for i in range(1, k):
            new_img = np.zeros(im.shape, dtype=int)
            new_img[x[i][~m[i]], y[i][~m[i]]] = 1
            clusters, _ = label(new_img)

            x[i] = x[i][m[i]]
            y[i] = y[i][m[i]]
            if len(x[i]) == 0:
                del x[i], y[i]

            nnew = np.max(clusters)
            if nnew != 0:
                xidx, yidx = np.indices(im.shape)
                for j in range(1, nnew + 1):
                    xn = xidx[clusters == j]
                    yn = yidx[clusters == j]
                    if xn.size >= min_cluster:
                        x[k] = xn
                        y[k] = yn
                        k += 1
                # plt.imshow(clusters, origin="lower")
                # plt.show()

    if plot:  # pragma: no cover
        title = "Identified clusters"
        if plot_title is not None:
            title = f"{plot_title}\n{title}"
        plt.title(title)
        plt.xlabel("x [pixel]")
        plt.ylabel("y [pixel]")
        clusters = np.ma.zeros(im.shape, dtype=int)
        for i in x.keys():
            clusters[x[i], y[i]] = i + 1
        clusters[clusters == 0] = np.ma.masked

        plt.imshow(clusters, origin="lower", cmap="prism")
        plt.show()

    # Merge clusters, if there are even any possible mergers left
    x, y, n = merge_clusters(
        im,
        x,
        y,
        n,
        manual=manual,
        deg=degree_before_merge,
        auto_merge_threshold=auto_merge_threshold,
        merge_min_threshold=merge_min_threshold,
        plot_title=plot_title,
    )

    if min_width > 0:
        sizes = {k: v.max() - v.min() for k, v in y.items()}
        mask_sizes = {k: v > min_width for k, v in sizes.items()}
        for k, v in mask_sizes.items():
            if not v:
                del x[k]
                del y[k]
        n = x.keys()

    orders = fit_polynomials_to_clusters(x, y, n, opower)

    # sort orders from bottom to top, using relative position

    def compare(i, j):
        _, xi, i_left, i_right = i
        _, xj, j_left, j_right = j

        if i_right < j_left or j_right < i_left:
            return xi.mean() - xj.mean()

        left = max(i_left, j_left)
        right = min(i_right, j_right)

        return xi[left:right].mean() - xj[left:right].mean()

    xp = np.arange(im.shape[1])
    keys = [(c, np.polyval(orders[c], xp), y[c].min(), y[c].max())
            for c in x.keys()]
    keys = sorted(keys, key=cmp_to_key(compare))
    key = [k[0] for k in keys]

    n = np.arange(len(n), dtype=int)
    x = {c: x[key[c]] for c in n}
    y = {c: y[key[c]] for c in n}
    orders = np.array([orders[key[c]] for c in n])

    column_range = np.array([[np.min(y[i]), np.max(y[i]) + 1] for i in n])

    if plot:  # pragma: no cover
        plot_orders(im, x, y, n, orders, column_range, title=plot_title)

    return orders, column_range
コード例 #48
0
def AnalyseDetector(data,
                    bias=[-1],
                    inputpower=6.0,
                    gratingloss=-4.0,
                    figname=None):
    '''Analyse the detector measurement result in 'data' and return
        -dark current at bias
        -light current at bias
        -responsivity at bias
         
    Arguments:
    -data: the PortLossMeasurement node, read from XML using lxml.objectify,
        obtained by running the DetectorMeasurement recipe.
    -bias (list): bias voltage(s) (V) at which to evaluate the device parameters
    -inputpower (float): optical power at the tip of the input fiber (dBm)
    -gratingloss (float): optical loss of the input grating (dB).
    -figname (str): name of a file where a plot of the IV measurements will
        be saved. Default is None, in which case no plot will be made.
    
    Returns:
    -result (DetectorMeasurementResult): object that stores all the
        analysis results.
    '''
    result = DetectorAnalysisResult()
    pl = data  #.PortLossMeasurement
    #fill in the measurement conditions in the result object:
    result.Wavelength = float(pl.LightCurrent.attrib['Wavelength'])
    result.LaserPower = float(pl.LightCurrent.attrib['Power'])
    powerunit = pl.LightCurrent.attrib['PowerUnit']
    #if 'dbm' in powerunit.lower():
    #    #convert to mW
    #    result.LaserPower = 10**(result.LaserPower/10)
    lshift = pl.PositionerInfo.LeftFibre.attrib['Shift']
    shiftx = float(lshift[1:-1].split(',')[0])
    shifty = float(lshift[1:-1].split(',')[1])
    result.FiberShift = (shiftx**2 + shifty**2)**0.5
    result.Bias = bias
    result.FiberOutput = inputpower
    result.GratingLoss = gratingloss
    pwratdetect = inputpower + gratingloss
    result.PowerAtDetector = 10**(pwratdetect / 10)
    datetime = time.strptime(pl.attrib['DateStamp'], "%a %b %d %X %Y")
    result.Date = time.strftime("%Y%m%d", datetime)
    result.Time = time.strftime("%X", datetime)
    #extract the detector properties: dark current, light current,
    #responsivity
    darkv = pl.DarkCurrent.V.pyval
    darkv = np.genfromtxt(StringIO(darkv), delimiter=',')
    darki = pl.DarkCurrent.I.pyval
    darki = np.genfromtxt(StringIO(darki), delimiter=',')
    iunit = pl.DarkCurrent.I.attrib['Unit']
    if "symbol='mA'" in iunit:
        darki = darki * 0.001
    lightv = pl.LightCurrent.V.pyval
    lightv = np.genfromtxt(StringIO(lightv), delimiter=',')
    lighti = pl.LightCurrent.I.pyval
    lighti = np.genfromtxt(StringIO(lighti), delimiter=',')
    iunit = pl.LightCurrent.I.attrib['Unit']
    if "symbol='mA'" in iunit:
        lighti = lighti * 0.001
    for b in bias:
        #dark current:
        P = np.polyfit(darkv, darki, 7)
        #result.DarkCurrent.append(np.polyval(P, b))
        dark_idx = np.argwhere(darkv == b)[0]
        result.DarkCurrent.append(darki[dark_idx[0]])
        if (not figname is None):
            _, ax = plt.subplots()
            ax.plot(darkv, darki, label='dark')
            ax.hold(True)
            ax.plot(darkv[dark_idx[0]], result.DarkCurrent[-1], '^')
            ax.plot(darkv, np.polyval(P, darkv), '.', label='dark_fit')
            ax.grid(True)
        #light current:
        P = np.polyfit(lightv, lighti, 7)
        #result.LightCurrent.append(np.polyval(P, b))
        light_idx = np.argwhere(lightv == b)[0]
        result.LightCurrent.append(lighti[light_idx[0]])
        if (not figname is None):
            ax.plot(lightv, lighti, label='light')
            ax.hold(True)
            ax.plot(lightv[light_idx[0]], result.LightCurrent[-1], 's')
            ax.plot(lightv, np.polyval(P, lightv), '-.', label='light_fit')
            ax.grid(True)
            handles, labels = ax.get_legend_handles_labels()
            ax.legend(handles, labels, loc='lower right')
            plt.savefig(figname, bbox_inches='tight')
        #responsivity:
        result.Responsivity.append(np.abs(result.LightCurrent[-1] - \
                                          result.DarkCurrent[-1]))
        result.Responsivity[-1] *= 1000 / result.PowerAtDetector
    return result
コード例 #49
0
ファイル: dropAnalysis.py プロジェクト: mxardre/dropAnalysis
def plotDerivative(df,
                   dropMap,
                   label,
                   channel,
                   low=0,
                   high=5,
                   namePath='',
                   nbpt=2):
    choice = ['blue', 'green', 'red', 'cyan', 'magenta', 'yellow', 'black']

    colors = choice[0:len(label)]

    for j, tmp in enumerate(label):
        y = []
        x = []
        d = []
        m = 0
        R = []

        for i, content in enumerate(dropMap[:, 1]):
            if content == tmp:
                if channel == 'mCherry':
                    p = df[i]
                    y = np.array(np.log(p.fluo_2_mean))
                    x = np.array(p.time / 3600)
                if channel == 'GFP':
                    p = df[i]
                    y = np.array(np.log(p.fluo_3_mean))
                    x = np.array(p.time / 3600)
                if channel == 'PVD':
                    p = df[i]
                    y = np.array(np.log(p.fluo_1_mean))
                    x = np.array(p.time / 3600)

                #dforth=np.gradient(y[0::nbpt],x[0::nbpt])
                #dback=np.gradient(y[1::nbpt],x[1::nbpt])
                dforth = []
                dback = []
                for i in range(0,
                               len(x) - 1,
                               2):  #step two to split back and forth
                    xF = np.array(x[i + 0:i + nbpt:2])
                    xB = np.array(x[i + 1:i + nbpt + 1:2])
                    yF = np.array(y[i + 0:i + nbpt:2])
                    yB = np.array(y[i + 1:i + nbpt + 1:2])

                    f = np.polyfit(xF, yF, 1, full=True)
                    dforth.append(f[0][0])
                    #print(f)
                    f = np.polyfit(xB, yB, 1, full=True)
                    dback.append(f[0][0])
                    chi_squaredB = np.sum(
                        (np.polyval(f[0], xB) - yB)**2) / len(xB)
                    R.append(chi_squaredB)
                    chi_squaredF = np.sum(
                        (np.polyval(f[0], xF) - yF)**2) / len(xF)
                    R.append(chi_squaredF)

                plt.scatter(x[0:-1:2], np.abs(dforth), color='blue')
                plt.scatter(x[1::2], np.abs(dback), color='red')
                plt.plot(x[0:-1:2], np.abs(dforth), color='blue')
                plt.plot(x[1::2], np.abs(dback), color='red')

                if m < np.max(dback):
                    m = np.max(dback)
                if m < np.max(dforth):
                    m = np.max(dforth)

    plt.plot(x, np.ones(len(x)) * m * .5)
    fig1 = plt.gcf()
    plt.ylim([0, 2])
    plt.xlim([0, 40])
    plt.title(' '.join(label) + ' ' + ' '.join(choice[0:len(label)]) +
              "\nchannel=" + str(channel) + "\nnbpts=" + str(nbpt))
    plt.xlabel('time (h)')
    plt.ylabel('derive fluo (ua)')
    plt.show()
    if namePath != '':
        fig1.savefig(namePath)

    hist, bins = np.histogram(R, bins=50)
    width = 0.7 * (bins[1] - bins[0])
    center = (bins[:-1] + bins[1:]) / 2
    plt.bar(center, hist, align='center', width=width)
    #    plt.xlim([0, 1])
    plt.ylim([0, 10])

    plt.show()
コード例 #50
0
ファイル: road_detection.py プロジェクト: kalinnorman/SDCars
    def curved_road_2(self, file, imgFromRealSense=False):
        """
        Algorithm to calculate where a curved road is in the image.
        :param file:
        :param imgFromRealSense:
        :return:
        """
        # load the image
        self.load_image(file, imgFromRealSense=imgFromRealSense)
        cv2.imshow('Full image', self.img)

        # Take bottom half of image
        #imgh = self.horizontal_segment_image(self.img, 2, 1)  # first index contains y values, second index contains x values
        imgh = self.img[130:self.img.shape[0], 0:(self.img.shape[1])]
        cv2.imshow('Bottom half', imgh)

        # Detect yellow/white
        imgyh = cd.detect_hue(imgh, "yellow", val_thresh=50, sat_thresh=130)

        # Edge detection
        imgyhe = cv2.Canny(imgyh,
                           rdp.EDGE_LOW_THRESHOLD_DEFAULT,
                           rdp.EDGE_HIGH_THRESHOLD_DEFAULT,
                           edges=rdp.EDGE_TYPE,
                           apertureSize=rdp.EDGE_APERTURE_DEFAULT,
                           L2gradient=rdp.EDGE_L2GRADIENT)
        imgyhe = cv2.blur(imgyhe, (3, 3))
        cv2.imshow('edges', imgyhe)

        #imgyhec = cv2.cvtColor(imgyhe, cv2.COLOR_GRAY2BGR)
        imgyhec = np.zeros((imgyhe.shape[0], imgyhe.shape[1], 3), np.uint8)

        # Hough Lines P
        lines = cv2.HoughLinesP(imgyhe,
                                10,
                                np.pi / 180,
                                200,
                                lines=None,
                                minLineLength=3)
        #print(lines)

        x_pts = []
        y_pts = []

        rt_x_pts = []
        rt_y_pts = []

        lf_x_pts = []
        lf_y_pts = []

        for seg in lines:
            x_pts.append(seg[0][0])
            x_pts.append(seg[0][2])
            y_pts.append(seg[0][1])
            y_pts.append(seg[0][3])

            if seg[0][0] > 630:
                rt_x_pts.append(seg[0][0])
                rt_x_pts.append(seg[0][2])
                rt_y_pts.append(seg[0][1])
                rt_y_pts.append(seg[0][3])

                cv2.line(imgyhec, (seg[0][0], seg[0][1]),
                         (seg[0][2], seg[0][3]), (0, 255, 255))
            elif (seg[0][0] > 460) & (seg[0][1] > 100):
                lf_x_pts.append(seg[0][0])
                lf_x_pts.append(seg[0][2])
                lf_y_pts.append(seg[0][1])
                lf_y_pts.append(seg[0][3])

                cv2.line(imgyhec, (seg[0][0], seg[0][1]),
                         (seg[0][2], seg[0][3]), (255, 255, 0))

        cv2.imshow('segments', imgyhec)

        curve = np.polyfit(rt_y_pts, rt_x_pts, 2)
        lspace = np.linspace(0, 300, 100)
        draw_y = lspace
        draw_x = np.polyval(curve, draw_y)
        right_points = (np.asarray([draw_x, draw_y]).T).astype(np.int32)
        right_points_img = (np.asarray([draw_x,
                                        draw_y + 130]).T).astype(np.int32)
        cv2.polylines(imgyhec, [right_points],
                      False, (0, 150, 225),
                      thickness=3)
        cv2.polylines(self.imglanes, [right_points_img],
                      False, (255, 0, 225),
                      thickness=3)

        curve = np.polyfit(lf_y_pts, lf_x_pts, 3)
        lspace = np.linspace(0, 300, 100)
        draw_y = lspace
        draw_x = np.polyval(curve, draw_y)
        left_points = (np.asarray([draw_x, draw_y]).T).astype(np.int32)
        left_points_img = (np.asarray([draw_x,
                                       draw_y + 130]).T).astype(np.int32)
        cv2.polylines(imgyhec, [left_points],
                      False, (255, 0, 225),
                      thickness=3)
        cv2.polylines(self.imglanes, [left_points_img],
                      False, (255, 0, 225),
                      thickness=3)

        avg_x_pts = []
        avg_y_pts = []

        for i in range(0, len(right_points)):
            a = left_points[i]
            b = right_points[i]
            avg_x_pts.append((a[0] + b[0]) / 2)
            avg_y_pts.append((a[1] + b[1]) / 2)

        curve = np.polyfit(avg_y_pts, avg_x_pts, 4)
        lspace = np.linspace(0, 300, 100)
        draw_y = lspace
        draw_x = np.polyval(curve, draw_y)
        avg_points = (np.asarray([draw_x, draw_y]).T).astype(np.int32)
        avg_points_img = (np.asarray([draw_x,
                                      draw_y + 130]).T).astype(np.int32)
        cv2.polylines(imgyhec, [avg_points], False, (255, 0, 225), thickness=3)
        cv2.polylines(self.imglanes, [avg_points_img],
                      False, (127, 255, 127),
                      thickness=3)

        cv2.imshow('curve', imgyhec)
        cv2.imshow('Final Result', self.imglanes)

        # save the image
        cv2.imwrite('curved_demo.jpg', self.imglanes)
コード例 #51
0
ファイル: SNMCcalib.py プロジェクト: niyuanqi/SNAP
                                                         N=30,
                                                         plot=plot)
        print params, params_err
        #maximum absolute magnitude
        Mst[i][j] = params[1]
        Mst_err[i][j] = params_err[1]
    print st[i], st_err[i]
    print Mst[i][0], Mst_err[i][0]

poly = []
disp = []
for i in range(len(band)):
    #fit polynomial to phillips relation, and plot
    print "Fitting phillips relation with polynomial."
    poly.append(np.polyfit(SNsBV[i], SNM[i], npoly))
    fitvals = np.polyval(poly[i], SNsBV[i])
    plt.errorbar(SNsBV[i],
                 SNM[i],
                 xerr=SNsBV_err[i],
                 yerr=SNM_err[i],
                 fmt='r+')
    plt.scatter(SNsBV[i], fitvals)
    disp.append(np.std(SNM[i] - fitvals))
    print disp[i]
    plt.ylim(-16, -20)
    plt.xlim(1.4, 0.2)
    plt.show()

    #remove some phillips relation outliers, and plot again
    print "Kick out ouliers (>Nsig*dispersion from fit) and fit again."
    sBVcur = SNsBV[i][np.absolute(SNM[i] - fitvals) < Nsig * disp[i]]
コード例 #52
0
ファイル: dropAnalysis.py プロジェクト: mxardre/dropAnalysis
def plotDerivativeSeparate(df,
                           dropMap,
                           label,
                           channel,
                           low=0,
                           high=5,
                           namePath='',
                           nbpt=2):
    choice = ['blue', 'green', 'red', 'cyan', 'magenta', 'yellow', 'black']

    colors = choice[0:len(label)]

    for j, tmp in enumerate(label):
        y = []
        x = []
        d = []
        m = 0

        for i, content in enumerate(dropMap[:, 1]):
            if content == tmp:
                if channel == 'mCherry':
                    p = df[i]
                    y = np.array(np.log(p.fluo_2_mean))
                    x = np.array(p.time / 3600)
                if channel == 'GFP':
                    p = df[i]
                    y = np.array(np.log(p.fluo_3_mean))
                    x = np.array(p.time / 3600)
                if channel == 'PVD':
                    p = df[i]
                    y = np.array(np.log(p.fluo_1_mean))
                    x = np.array(p.time / 3600)

                #dforth=np.gradient(y[0::nbpt],x[0::nbpt])
                #dback=np.gradient(y[1::nbpt],x[1::nbpt])
                dforth = []
                dback = []
                xForth = []
                xBack = []
                xBF = []
                dBF = []
                RB = []
                RF = []

                fig, ax1 = plt.subplots()  #s(1,3,sharex=True)
                a = np.arange(20)

                for i in range(0,
                               len(x) - 1,
                               2):  #step two to split back and forth
                    xF = np.array(x[i + 0:i + nbpt:2])
                    xB = np.array(x[i + 1:i + nbpt + 1:2])
                    yF = np.array(y[i + 0:i + nbpt:2])
                    yB = np.array(y[i + 1:i + nbpt + 1:2])

                    f = np.polyfit(xF, yF, 1, full=True)
                    dforth.append(f[0][0])
                    xForth.append(np.mean(xF))

                    thplot = 0
                    if xB[0] > thplot and xB[
                            0] < thplot + 20:  #plot in the exp phase
                        ax1.plot(x, func(f[0], x))
                        plt.ylim(-6, 2)

                    chi_squaredF = np.sum(
                        (np.polyval(f[0], xF) - yF)**2) / len(xF)
                    RF.append(chi_squaredF)

                    f = np.polyfit(xB, yB, 1, full=True)
                    dback.append(f[0][0])
                    xBack.append(np.mean(xB))
                    if xB[0] > thplot and xB[
                            0] < thplot + 20:  #plot in the exp phase
                        ax1.plot(x, func(f[0], x))
                        plt.ylim(-6, 2)

                    chi_squaredB = np.sum(
                        (np.polyval(f[0], xB) - yB)**2) / len(xB)
                    RB.append(chi_squaredB)

                #ax1.scatter(x[0::2],y[0::2],color='blue')#,marker='o', linestyle='-',color='blue')
                #ax1.scatter(x[1::2],y[1::2],color='red')#,marker='o', linestyle='-',color='red')
                #ax1.set_ylim(-6, 2)
                #a=np.arange(20)
                #ax1.set_title(str(a[:nbpt:2])+' fit')
                #ax1[0].set(adjustable='box-forced', aspect='equal')
                #plt.show()

                #fig, ax1 = plt.subplots()
                ax1.scatter(
                    x[0::2], y[0::2],
                    color='blue')  #,marker='o', linestyle='-',color='blue')
                ax1.scatter(
                    x[1::2], y[1::2],
                    color='red')  #,marker='o', linestyle='-',color='red')
                ax1.set_ylim([-6, 2])
                ax2 = ax1.twinx()
                xBF = xForth + xBack
                xBF[::2] = xBack
                xBF[1::2] = xForth
                dBF = dforth + dback
                dBF[::2] = dback
                dBF[1::2] = dforth
                ax2.plot(xBF, dBF, color='green', marker='*', linestyle='-')
                ax2.set_ylim([0, 1])

                plt.show()

                if m < np.max(dback):
                    m = np.max(dback)
                if m < np.max(dforth):
                    m = np.max(dforth)
コード例 #53
0
ファイル: signaltools.py プロジェクト: chris1869/scipy
def residuez(b, a, tol=1e-3, rtype='avg'):
    """Compute partial-fraction expansion of b(z) / a(z).

    If ``M = len(b)`` and ``N = len(a)``::

                b(z)     b[0] + b[1] z**(-1) + ... + b[M-1] z**(-M+1)
        H(z) = ------ = ----------------------------------------------
                a(z)     a[0] + a[1] z**(-1) + ... + a[N-1] z**(-N+1)

                 r[0]                   r[-1]
         = --------------- + ... + ---------------- + k[0] + k[1]z**(-1) ...
           (1-p[0]z**(-1))         (1-p[-1]z**(-1))

    If there are any repeated roots (closer than tol), then the partial
    fraction expansion has terms like::

             r[i]              r[i+1]                    r[i+n-1]
        -------------- + ------------------ + ... + ------------------
        (1-p[i]z**(-1))  (1-p[i]z**(-1))**2         (1-p[i]z**(-1))**n

    See also
    --------
    invresz, poly, polyval, unique_roots

    """
    b, a = map(asarray, (b, a))
    gain = a[0]
    brev, arev = b[::-1], a[::-1]
    krev, brev = polydiv(brev, arev)
    if krev == []:
        k = []
    else:
        k = krev[::-1]
    b = brev[::-1]
    p = roots(a)
    r = p * 0.0
    pout, mult = unique_roots(p, tol=tol, rtype=rtype)
    p = []
    for n in range(len(pout)):
        p.extend([pout[n]] * mult[n])
    p = asarray(p)
    # Compute the residue from the general formula (for discrete-time)
    #  the polynomial is in z**(-1) and the multiplication is by terms
    #  like this (1-p[i] z**(-1))**mult[i].  After differentiation,
    #  we must divide by (-p[i])**(m-k) as well as (m-k)!
    indx = 0
    for n in range(len(pout)):
        bn = brev.copy()
        pn = []
        for l in range(len(pout)):
            if l != n:
                pn.extend([pout[l]] * mult[l])
        an = atleast_1d(poly(pn))[::-1]
        # bn(z) / an(z) is (1-po[n] z**(-1))**Nn * b(z) / a(z) where Nn is
        # multiplicity of pole at po[n] and b(z) and a(z) are polynomials.
        sig = mult[n]
        for m in range(sig, 0, -1):
            if sig > m:
                # compute next derivative of bn(s) / an(s)
                term1 = polymul(polyder(bn, 1), an)
                term2 = polymul(bn, polyder(an, 1))
                bn = polysub(term1, term2)
                an = polymul(an, an)
            r[indx + m -
              1] = (polyval(bn, 1.0 / pout[n]) / polyval(an, 1.0 / pout[n]) /
                    factorial(sig - m) / (-pout[n])**(sig - m))
        indx += sig
    return r / gain, p, k
コード例 #54
0
def BSM_hedge_run(p=0):
    ''' Implements delta hedging for a single path. '''
    np.random.seed(50000)

    #
    # Initial Delta
    #
    ds = 0.01
    V_1, S, ex, rg, h, dt = BSM_lsm_put_value(S0 + ds, M, I)
    V_2 = BSM_lsm_put_value(S0, M, I)[0]
    del_0 = (V_1 - V_2) / ds

    #
    # Dynamic Hedging
    #delt = np.zeros(M + 1, dtype=np.float) # vector for deltas
    print("\nAPPROXIMATION OF FIRST ORDER ")
    print("-----------------------------")
    print(" %7s | %7s | %7s " % ('step', 'S_t', 'Delta'))
    for t in xrange(1, M, 1):
        if ex[t, p] == 0:  # if option is alive
            St = S[t, p]  # relevant index level
            diff = (np.polyval(rg[t], St + ds) - np.polyval(rg[t], St))
        # numerator of difference quotient
        delt[t] = diff / ds  # delta as difference quotient
        print(" %7d | %7.2f | %7.2f" % (t, St, delt[t]))
        if (S[t, p] - S[t - 1, p]) * (delt[t] - delt[t - 1]) < 0:
            print(" wrong")
        else:
            break
    delt[0] = del_0

    print("\nDYNAMIC HEDGING OF AMERICAN PUT (BSM)")
    print("---------------------------------------")
    po = np.zeros(t, dtype=np.float)  # vector for portfolio values
    vt = np.zeros(t, dtype=np.float)  # vector for option values
    vt[0] = V_1
    po[0] = V_1
    bo = V_1 - delt[0] * S0  # bond position value
    print("Initial Hedge")
    print("Stocks %8.3f" % delt[0])
    print("Bonds %8.3f" % bo)
    print("Cost %8.3f" % (delt[0] * S0 + bo))

    print("\nRegular Rehedges ")
    print(68 * "-")
    print("step|" + 7 * " %7s|" %
          ('S_t', 'Port', 'Put', 'Diff', 'Stock', 'Bond', 'Cost'))
    for j in range(1, t, 1):
        vt[j] = BSM_lsm_put_value(S[j, p], M - j, I)[0]
        po[j] = delt[j - 1] * S[j, p] + bo * math.exp(r * dt)
        bo = po[j] - delt[j] * S[j, p]  # bond position value
        print("%4d|" % j + 7 * " %7.3f|" %
              (S[j, p], po[j], vt[j],
               (po[j] - vt[j]), delt[j], bo, delt[j] * S[j, p] + bo))

    errs = po - vt  # hedge errors
    print("MSE %7.3f" % (np.sum(errs**2) / len(errs)))
    print("Average Error %7.3f" % (np.sum(errs) / len(errs)))

    print("Total P&L %7.3f" % np.sum(errs))
    return S[:, p], po, vt, errs, t
コード例 #55
0
def zmodel(u, zmax, zmax_pfit):
    return zmax + np.polyval(zmax_pfit, zmax) * u**3
コード例 #56
0
ファイル: signaltools.py プロジェクト: chris1869/scipy
def residue(b, a, tol=1e-3, rtype='avg'):
    """
    Compute partial-fraction expansion of b(s) / a(s).

    If ``M = len(b)`` and ``N = len(a)``, then the partial-fraction
    expansion H(s) is defined as::

              b(s)     b[0] s**(M-1) + b[1] s**(M-2) + ... + b[M-1]
      H(s) = ------ = ----------------------------------------------
              a(s)     a[0] s**(N-1) + a[1] s**(N-2) + ... + a[N-1]

               r[0]       r[1]             r[-1]
           = -------- + -------- + ... + --------- + k(s)
             (s-p[0])   (s-p[1])         (s-p[-1])

    If there are any repeated roots (closer together than `tol`), then H(s)
    has terms like::

            r[i]      r[i+1]              r[i+n-1]
          -------- + ----------- + ... + -----------
          (s-p[i])  (s-p[i])**2          (s-p[i])**n

    Returns
    -------
    r : ndarray
        Residues.
    p : ndarray
        Poles.
    k : ndarray
        Coefficients of the direct polynomial term.

    See Also
    --------
    invres, numpy.poly, unique_roots

    """

    b, a = map(asarray, (b, a))
    rscale = a[0]
    k, b = polydiv(b, a)
    p = roots(a)
    r = p * 0.0
    pout, mult = unique_roots(p, tol=tol, rtype=rtype)
    p = []
    for n in range(len(pout)):
        p.extend([pout[n]] * mult[n])
    p = asarray(p)
    # Compute the residue from the general formula
    indx = 0
    for n in range(len(pout)):
        bn = b.copy()
        pn = []
        for l in range(len(pout)):
            if l != n:
                pn.extend([pout[l]] * mult[l])
        an = atleast_1d(poly(pn))
        # bn(s) / an(s) is (s-po[n])**Nn * b(s) / a(s) where Nn is
        # multiplicity of pole at po[n]
        sig = mult[n]
        for m in range(sig, 0, -1):
            if sig > m:
                # compute next derivative of bn(s) / an(s)
                term1 = polymul(polyder(bn, 1), an)
                term2 = polymul(bn, polyder(an, 1))
                bn = polysub(term1, term2)
                an = polymul(an, an)
            r[indx + m - 1] = polyval(bn, pout[n]) / polyval(an, pout[n]) \
                          / factorial(sig - m)
        indx += sig
    return r / rscale, p, k
コード例 #57
0
 def _interpolatedYield(self, t):
     yld = np.polyval(self._coeffs, t)
     return yld
コード例 #58
0
import numpy
#python3
import numpy as np
A = np.array(input().split(), float)
B = float(input())
print(numpy.polyval(A,B))
コード例 #59
0
ファイル: ip_conv.py プロジェクト: zenandrea/qmcpack
    elif 'q1' in file:
        res.q1 = r
    #end if
#end for

if len(res)==2:
    q0 = res.q0
    q1 = res.q1

    ts    = q0.timesteps
    ip    = q1.energies-q0.energies
    iperr = sqrt(q1.errs**2+q0.errs**2)

    tfit  = linspace(0,ts.max(),200)

    q0fit = polyval(polyfit(ts,q0.energies,1),tfit)
    q1fit = polyval(polyfit(ts,q1.energies,1),tfit)
    ipfit = polyval(polyfit(ts,ip,1),tfit)


    print()
    print()
    print('Total energy (q=0, eV)')
    for i in range(len(ts)):
        print('  {0:<6.4f}  {1:<6.4f} +/- {2:<6.4f}'.format(ts[i],q0.energies[i],q0.errs[i]))
    #end for
    print('----------------------------------')
    print('  {0:<6.4f}  {1:<6.4f}'.format(0.00,q0fit[0]))

    print()
    print()
コード例 #60
0
def main():
    global Icq1List
    global Icq2List
    global PAcurrentList
    # 配置线损
    lossName = PM.query("CONFigure:BASE:FDCorrection:CTABle:CATalog?")
    time.sleep(1)
    if lossName.find("CMW_loss") != -1:
        PM.write("CONFigure:BASE:FDCorrection:CTABle:DELete 'CMW_loss'")
    PM.write(
        "CONFigure:BASE:FDCorrection:CTABle:CREate 'CMW_loss', 1920000000, 0.8, 1980000000, 0.8, \
             2110000000, 0.8, 2170000000, 0.8, 1850000000, 0.8 1910000000, 0.8, 1930000000, 0.8, 1990000000, 0.8,\
             824000000, 0.5, 849000000, 0.5, 869000000, 0.5, 894000000, 0.5, 925000000, 0.5, 960000000, 0.5, \
             880000000, 0.5, 915000000, 0.5, 2350000000, 0.9, 2535000000, 0.9, 2593000000, 0.9"
    )
    PM.write("CONFigure:FDCorrection:ACTivate RF1C, 'CMW_loss', RXTX, RF1"
             )  #配置RF1 Common口的Tx Rx方向损耗
    PM.write("CONFigure:FDCorrection:ACTivate RF1O, 'CMW_loss', TX, RF1"
             )  #配置RF1 OUT口的Tx方向损耗

    if PAcurrentGenerateMode == 0:
        Icq1 = int(PAcurrentInitial / 256)
        Icq2 = int(PAcurrentInitial - Icq1 * 256)
        Icq1List = []
        Icq2List = []
        for ii in range(-LeftExtendPeriodIcq1, RightExtendPeriodIcq1 + 1):
            Temp_Icq1 = list(
                range(Icq1 + ii * period - ScanRangeIcq1,
                      Icq1 + ii * period + ScanRangeIcq1 + 1))
            Icq1List.extend(Temp_Icq1)
        for ii in range(-LeftExtendPeriodIcq2, RightExtendPeriodIcq2 + 1):
            Temp_Icq2 = list(
                range(Icq2 + ii * period - ScanRangeIcq2,
                      Icq2 + ii * period + ScanRangeIcq2 + 1))
            Icq2List.extend(Temp_Icq2)
    if PAcurrentGenerateMode < 2:
        PAcurrentList = zeros(len(Icq1List) * len(Icq2List))
        for ii in range(len(Icq1List)):
            for jj in range(len(Icq2List)):
                PAcurrentList[ii + jj * len(Icq1List)] = (Icq1List[ii] * 256 +
                                                          Icq2List[jj])
        PAcurrentList = list(map(int, PAcurrentList))
        print("Icq1List is %s" % Icq1List)
        print("Icq2List is %s" % Icq2List)
    if PAcurrentListExtendFlag == 1:
        PAcurrentList.extend(PAcurrentListExtend)
    print("PAcurrentList length is %d" % (len(PAcurrentList)))
    print("PAcurrentList is %s" % PAcurrentList)

    if ModeChosen in [0, 1]:
        ChannelList = channelLteList(bandChosen, LteBW, ScanType)
    elif ModeChosen == 2:
        ChannelList = channel3GList(bandChosen, ScanType)
    if ReadCurrentFlag == 0:
        print("expected test time is %3.1f minutes" % float(
            len(PAcurrentList) * len(RGIlist) * len(ChannelList) * 16 / 60))
    else:
        print("Reading current needed, expected test time is %3.1f minutes" %
              float(len(PAcurrentList) * len(RGIlist) * len(ChannelList)))

#########################   获取各个控件的位置     ##########################################
#########################   测试过程中请不要操作鼠标和键盘     ###############################
    mouse = PyMouse()
    key = PyKeyboard()
    input("Move mouse to 'Tear Down' then press Enter")
    (IsTearDownX, IsTearDownY) = mouse.position()  #获取当前坐标的位置
    print(IsTearDownX, IsTearDownY)
    input("Move mouse to 'Tx Channel' then press Enter")
    (TxChannelX, TxChannelY) = mouse.position()  #获取当前坐标的位置
    print(TxChannelX, TxChannelY)
    input("Move mouse to 'Set Radio Config' then press Enter")
    (SetRadioConfigX, SetRadioConfigY) = mouse.position()  #获取当前坐标的位置
    print(SetRadioConfigX, SetRadioConfigY)
    input("Move mouse to 'RGI' then press Enter")
    (RGIx, RGIy) = mouse.position()  #获取当前坐标的位置
    print(RGIx, RGIy)
    input("Move mouse to 'PA current' then press Enter")
    (PAcurrentX, PAcurrentY) = mouse.position()  #获取当前坐标的位置
    print(PAcurrentX, PAcurrentY)
    input("Move mouse to 'Tx Override' then press Enter")
    (TxOverrideX, TxOverrideY) = mouse.position()  #获取当前坐标的位置
    print(TxOverrideX, TxOverrideY)
    #########################   获取各个控件的位置     ##########################################
    #########################   测试过程中请不要操作鼠标和键盘     ###############################

    TimeStart = time.clock()
    print("test start. Please check instrument parameters")
    interpolateTemp = numpy.zeros([len(RGIlist), 4])  #用来做拟合/插值,便于比较

    for ii in range(len(bandChosen)):
        if ModeChosen == 0:
            PMwrite(PM, "CONFigure:LTE:MEAS:DMODe FDD")
        elif ModeChosen == 1:
            PMwrite(PM, "CONFigure:LTE:MEAS:DMODe TDD")
        if ModeChosen in [0, 1]:
            PMwrite(PM, "ROUTe:LTE:MEAS:SCENario:SALone RF1C, RX1")
            PMwrite(PM, "CONFigure:LTE:MEAS:BAND %s" % bandChosen[ii])
            PMwrite(
                PM, "CONFigure:LTE:MEAS:RFSettings:PCC:FREQuency %dCH" %
                ChannelList[ii][0])
            PMwrite(PM, "CONFigure:LTE:MEAS:MEValuation:REPetition SINGleshot")
            PMwrite(PM, "CONFigure:LTE:MEAS:MEValuation:MSUBframes 0, 10, 2")
            PMwrite(PM, "CONFigure:LTE:MEAS:PCC:CBANdwidth %s" % LteBW)
            PMwrite(
                PM,
                "TRIGger:LTE:MEAS:MEValuation:SOURce 'Free Run (Fast Sync)'")
            PMwrite(PM, "CONFigure:LTE:MEAS:RFSettings:ENPMode MANual")
            PMwrite(PM, "CONFigure:LTE:MEAS:RFSettings:ENPower 24")
            PMwrite(PM, "CONFigure:LTE:MEAS:RFSettings:UMARgin 12")
            PMwrite(PM, "CONFigure:LTE:MEAS:MEValuation:MOEXception ON")
        elif ModeChosen == 2:
            PMwrite(PM, "ROUTe:WCDMa:MEAS:SCENario:SALone RF1C, RX1")
            PMwrite(PM, "CONFigure:WCDMa:MEAS:BAND %s" % bandChosen[ii])
            PMwrite(
                PM, "CONFigure:WCDMa:MEAS:RFSettings:FREQuency %d CH" %
                ChannelList[ii][0])
            PMwrite(PM,
                    "CONFigure:WCDMa:MEAS:MEValuation:REPetition SINGleshot")
            PMwrite(PM,
                    "CONFigure:WCDMa:MEAS:MEValuation:SCOunt:MODulation 10")
            PMwrite(PM, "CONFigure:WCDMa:MEAS:MEValuation:SCOunt:SPECtrum 10")
            PMwrite(PM, "CONFigure:WCDMa:MEAS:MEValuation:MOEXception ON")
            PMwrite(
                PM,
                "TRIGger:WCDMa:MEAS:MEValuation:SOURce 'Free Run (Fast Sync)'")
            PMwrite(PM, "CONFigure:WCDMA:MEAS:RFSettings:ENPower 24")
            PMwrite(PM, "CONFigure:WCDMA:MEAS:RFSettings:UMARgin 12")
        os.system("pause")

        time.sleep(1)
        for jj in range(len(ChannelList[ii])):
            mouse.click(IsTearDownX, IsTearDownY, 1)
            mouse.click(SetRadioConfigX, SetRadioConfigY, 1)
            time.sleep(3)
            mouse.click(IsTearDownX, IsTearDownY, 1)
            mouse.click(TxChannelX, TxChannelY, 1, 2)
            key.type_string(str(ChannelList[ii][jj]))
            mouse.click(SetRadioConfigX, SetRadioConfigY, 1)
            time.sleep(0.2)

            if ModeChosen in [0, 1]:
                PMwrite(
                    PM, "CONFigure:LTE:MEAS:RFSettings:PCC:FREQuency %dCH" %
                    ChannelList[ii][jj])
            elif ModeChosen == 2:
                PMwrite(
                    PM, "CONFigure:WCDMa:MEAS:RFSettings:FREQuency %d CH" %
                    ChannelList[ii][jj])
            time.sleep(1)

            for kk in range(len(PAcurrentList)):
                mouse.click(PAcurrentX, PAcurrentY, 1, 2)
                key.type_string(str(PAcurrentList[kk]))
                time.sleep(0.2)
                Icqq1 = numpy.floor(PAcurrentList[kk] / 256)
                Icqq2 = PAcurrentList[kk] - Icqq1 * 256
                TestFlag = 1
                for ll in range(len(RGIlist)):
                    mouse.click(RGIx, RGIy, 1, 2)
                    key.type_string(str(RGIlist[ll]))
                    time.sleep(0.1)
                    mouse.click(TxOverrideX, TxOverrideY, 1)
                    time.sleep(0.1)

                    try:
                        if ModeChosen in [0, 1]:
                            PM.write("ABORt:LTE:MEAS:MEValuation")
                            PM.write("INIT:LTE:MEAS:MEValuation")
                            time.sleep(0.7)
                            LteAclr = PMquery(
                                PM, "FETCh:LTE:MEAS:MEValuation:ACLR:AVERage?"
                            )  #读取UE发射功率
                            LteAclrList = list(map(float, LteAclr.split(',')))
                            UEPwr = LteAclrList[4]
                            ChPwr = LteAclrList[4]
                            AdjCLRn = LteAclrList[3]
                            AdjCLRp = LteAclrList[5]
                        elif ModeChosen == 2:
                            PMwrite(PM, "ABORt:WCDMa:MEAS:MEValuation")
                            WAclrList = PMqueryWithDelay(
                                PM,
                                "READ:WCDMa:MEAS:MEValuation:SPECtrum:AVERage?"
                            ).split(',')
                            UEPwr = float(WAclrList[15])
                            ChPwr = float(WAclrList[1])
                            AdjCLRn = ChPwr - float(WAclrList[3])
                            AdjCLRp = ChPwr - float(WAclrList[4])
                    except Exception:
                        print("TxPwr underdriven or overdriven, ignored")
                        TestFlag = -1
                        break
                    if (AdjCLRn < 30 and ChPwr < 23):
                        print("ACLR too bad, ignored")
                        TestFlag = 0
                        break

                    if TestFlag == 1:
                        interpolateTemp[ll][0] = AdjCLRn
                        interpolateTemp[ll][1] = ChPwr
                        interpolateTemp[ll][2] = AdjCLRp
                        if ReadCurrentFlag == 1:
                            if ReadCurrentWaitTime == "Manual":
                                os.system("pause")
                            else:
                                time.sleep(int(ReadCurrentWaitTime))
                            if ModeChosen == 1:
                                CurrentLength = 40
                            else:
                                CurrentLength = 15
                            Current = numpy.zeros(CurrentLength)
                            for k in range(CurrentLength):
                                Current[k] = float(
                                    PMquery(PM_DCsupply, "MEASure:CURRent?"))
                                print(Current[k])
                                time.sleep(0.1 + numpy.random.rand() * 0.3)
                            if ModeChosen == 1:
                                temp_current = sorted(Current)
                                interpolateTemp[ll][3] = average(
                                    temp_current[2:(CurrentLength - 2)])
                            else:
                                interpolateTemp[ll][3] = average(Current)
                        else:
                            interpolateTemp[ll][3] = 0.001
                        print("%-4s %d %s %s %3d %3d %3.2f %3.2f %3.2f %1.3f" %(bandChosen[ii], ChannelList[ii][jj],\
                            RGIlist[ll],PAcurrentList[kk],Icqq1, Icqq2, AdjCLRn,ChPwr,AdjCLRp,interpolateTemp[ll][3]))

                    elif TestFlag == -1:
                        print("%-4s %d %s %s %3d %3d underdriven or overdriven, ignored" %(bandChosen[ii], ChannelList[ii][jj],\
                            RGIlist[ll],PAcurrentList[kk],Icqq1, Icqq2))
                    else:
                        print("%-4s %d %s %s %3d %3d ACLR too bad, ignored" %(bandChosen[ii], ChannelList[ii][jj],\
                            RGIlist[ll],PAcurrentList[kk],Icqq1, Icqq2))

                if TestFlag == 1:
                    Tmp1 = numpy.polyfit(interpolateTemp[:, 1],
                                         interpolateTemp[:, 0],
                                         len(RGIlist) - 1)
                    Tmp2 = numpy.polyfit(interpolateTemp[:, 1],
                                         interpolateTemp[:, 2],
                                         len(RGIlist) - 1)
                    Tmp3 = numpy.polyfit(interpolateTemp[:, 1],
                                         interpolateTemp[:, 3],
                                         len(RGIlist) - 1)
                    interpolateAclrLeft = numpy.polyval(Tmp1, IpNP)
                    interpolateAclrRight = numpy.polyval(Tmp2, IpNP)
                    interpolateCurrent = numpy.polyval(Tmp3, IpNP)
                    print("%-4s %d -- %s %3d %3d %3.2f %3.1f %3.2f %1.3f" %(bandChosen[ii], ChannelList[ii][jj],\
                        PAcurrentList[kk],Icqq1, Icqq2, interpolateAclrLeft,IpNP,interpolateAclrRight, interpolateCurrent))
                    LogfileWrite(LogFile, "%-4s %d -- %s %3d %3d %3.2f %3.1f %3.2f %1.3f\n" %(bandChosen[ii], ChannelList[ii][jj],\
                        PAcurrentList[kk],Icqq1, Icqq2, interpolateAclrLeft,IpNP,interpolateAclrRight, interpolateCurrent))
                elif TestFlag == -1:
                    print("%-4s %d -- %s %3d %3d --   %3.1f   --" %(bandChosen[ii], ChannelList[ii][jj],\
                        PAcurrentList[kk],Icqq1, Icqq2, IpNP))
                    LogfileWrite(LogFile, "%-4s %d -- %s %3d %3d --   %3.1f   --\n" %(bandChosen[ii], ChannelList[ii][jj],\
                        PAcurrentList[kk],Icqq1, Icqq2, IpNP))
                else:
                    print("%-4s %d -- %s %3d %3d bad  %3.1f  bad" %(bandChosen[ii], ChannelList[ii][jj],\
                        PAcurrentList[kk],Icqq1, Icqq2, IpNP))
                    LogfileWrite(LogFile, "%-4s %d -- %s %3d %3d bad  %3.1f  bad\n" %(bandChosen[ii], ChannelList[ii][jj],\
                        PAcurrentList[kk],Icqq1, Icqq2, IpNP))


############################## All test end  ##########################
    TimeEnd = time.clock()
    print("The total test time is %.1f minutes" % ((TimeEnd - TimeStart) / 60))
    endtime = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time.time()))
    print("Test finished at %s" % endtime)
    LogfileWrite(LogFile, "Test finished at %s\n\n" % endtime)