Ejemplo n.º 1
0
def polyFitToCorr(zs, corrs, pnum):
    pfit = scipy.polyfit(zs, corrs, pnum)
    evalpfit = scipy.polyval(pfit, zs)
    newzs = scipy.linspace(zs[0], zs[-1], len(zs) * 10)
    neweval = scipy.polyval(pfit, newzs)
    w = scipy.argmax(neweval)
    return evalpfit, newzs[w]
Ejemplo n.º 2
0
def scat_flat(d,c_ob,c_co):
    Centers_co = np.zeros((len(c_co),d.shape[1]))
    Centers_ob = np.zeros((len(c_ob),d.shape[1]))
    ejx = np.arange(d.shape[1])
    ejy = np.arange(d.shape[0])
    for i in range(len(c_co)):
        Centers_co[i,:]=np.around(scipy.polyval(c_co[i],ejx)).astype('int')
        Centers_ob[i,:]=np.around(scipy.polyval(c_ob[i],ejx)).astype('int')
    i = 0
    while i < d.shape[1]:
        line = d[:,i]
	refmins = []
	valmins = []
	for j in range(len(Centers_co)):
	    p1 = Centers_ob[j,i]
	    p2 = Centers_co[j,i]
	    minv = line[p1-10:p2+10]
	    refv = ejy[p1-10:p2+10]
	    im = np.argmin(minv)
	    refmins.append(refv[im])
	    valmins.append(minv[im])
	refmins,valmis = np.array(refmins),np.array(valmins)
	plot(line)
	plot(refmins,valmins,'ro')
	show()
	print gfd
Ejemplo n.º 3
0
    def plot_separation(self, i, snp, sep, data):
        '''Plot separation of IBD clique at sample i, SNP snp.'''
        parents = [
            self.ped.sample_id[self.ped.graph.predecessors(i)[a]]
            for a in im.constants.ALLELES
        ]
        print data
        print 'data[0]', data[0]
        print 'data[1]', data[1]
        (k00, k10, k01, k11), line = data[0:2]

        P.figure(1)
        P.clf()
        P.hold(True)
        # P.scatter([p], [q], color='k')
        P.scatter(k00, k10, color='r')
        P.scatter(k01, k11, color='b')
        if line is not None:
            a0, b0, a1, b1 = line
            t = linspace(0, max(k00))
            P.plot(t, polyval([a0, b0], t), 'r')
            t = linspace(0, max(k01))
            P.plot(t, polyval([a1, b1], t), 'b')
        P.title('Sample %d, SNP %d, inbreeding=%.4f, separation=%.2f' %
                (i, snp, self.params.kinship(parents[0], parents[1]), sep))
        P.show()
        P.savefig(os.environ['OBER'] + '/doc/poo/sep-chr%d-%d-%d.png' %
                  (self.chrom, i, snp))
Ejemplo n.º 4
0
    def x_do_contact(self, args):
        """
        DEBUG COMMAND to be activated in the future
        """
        xext, ysub, contact = self.find_contact_point2(debug=True)

        contact_plot = self.plots[0]
        contact_plot.add_set(xext, ysub)
        contact_plot.add_set([xext[contact]], [self.plots[0].vectors[0][1][contact]])
        # contact_plot.add_set([first_point[0]],[first_point[1]])
        # contact_plot.add_set([last_point[0]],[last_point[1]])
        contact_plot.styles = [None, None, None, "scatter"]
        self._send_plot([contact_plot])
        return

        index, regr, regr_contact = self.find_contact_point2(debug=True)
        print regr
        print regr_contact
        raw_plot = self.current.curve.default_plots()[0]
        xret = raw_plot.vectors[0][0]
        # nc_line=[(item*regr[0])+regr[1] for item in x_nc]
        nc_line = scipy.polyval(regr, xret)
        c_line = scipy.polyval(regr_contact, xret)

        contact_plot = self.current.curve.default_plots()[0]
        contact_plot.add_set(xret, nc_line)
        contact_plot.add_set(xret, c_line)
        contact_plot.styles = [None, None, None, None]
        # contact_plot.styles.append(None)
        contact_plot.destination = 1
        self._send_plot([contact_plot])
Ejemplo n.º 5
0
def _qfunc_pure(psi, alpha_vec):
    """
    Calculate the Q-function for a pure state.
    """
    n = np.prod(psi.shape)

    # Gengyan: maximun number to use factorial()
    nmax = 170

    if isinstance(psi, Qobj):
        psi = psi.full().flatten()
    else:
        psi = psi.T

    if n < nmax:
        qvec = abs(
            polyval(
                fliplr([psi / sqrt(factorial(arange(n)))])[0],
                conjugate(alpha_vec)))**2 * exp(-abs(alpha_vec)**2)
    else:
        # Gengyan: for m < nmax, use factorial()
        qvec = polyval(
            fliplr([psi[0:nmax] / sqrt(factorial(arange(nmax)))])[0],
            conjugate(alpha_vec)) * exp(-abs(alpha_vec)**2 / 2)
        # Gengyan: for m >= nmax, use Stirling's approximation
        for m in range(nmax, n):
            qvec += (conjugate(alpha_vec)/sqrt(m))**m*psi[m] * \
                exp((m-abs(alpha_vec)**2)/2)*(2*pi*m)**(-0.25)
        qvec = abs(qvec)**2

    return np.real(qvec) / pi
Ejemplo n.º 6
0
def time_fitting(x_fit,y_fit):
    """Fit a linear relation to the x_fit and y_fit parameters

    Returns the actual fit and the parameters of the fit,
    """
    import numpy as np
    x_fit = np.array( x_fit )
    y_fit = np.array( y_fit )

    ###First fit iteration and remove outliers
    POLY_FIT_ORDER = 1

    slope,intercept = scipy.polyfit(x_fit,y_fit,POLY_FIT_ORDER)
    fit = scipy.polyval((slope,intercept),x_fit)
    fit_sigma = fit.std()
    include_index = np.where(np.abs(fit-y_fit) < 1.5*fit_sigma)[0]

    if len(include_index) < 4:
        return None,None,False

    ###Final Fit
    x_fit_clipped = x_fit[include_index]
    y_fit_clipped = y_fit[include_index]

    parameters = scipy.polyfit(x_fit_clipped,y_fit_clipped,POLY_FIT_ORDER)
    fit = scipy.polyval(parameters,x_fit)

    return fit,parameters,True
Ejemplo n.º 7
0
def LinearAdjust():
    from scipy import polyfit
    from scipy import polyval
    from sklearn import metrics

    global PredictedMsdRange1
    global ScoreR2Range1
    global TotalMsdAdjustRange1

    global PredictedMsdRange2
    global ScoreR2Range2
    global TotalMsdAdjustRange2

    global PredictedMsdRange3
    global ScoreR2Range3
    global TotalMsdAdjustRange3

    TotalMsdAdjustRange1 = polyfit(TimeRange1, TotalMsdRange1, 1)
    PredictedMsdRange1 = polyval(TotalMsdAdjustRange1, TimeRange1)
    ScoreR2Range1 = metrics.r2_score(TotalMsdRange1, PredictedMsdRange1)
    print("The total msd linear regression was done in range 1, r2 = " +
          str(ScoreR2Range1))

    TotalMsdAdjustRange2 = polyfit(TimeRange2, TotalMsdRange2, 1)
    PredictedMsdRange2 = polyval(TotalMsdAdjustRange2, TimeRange2)
    ScoreR2Range2 = metrics.r2_score(TotalMsdRange2, PredictedMsdRange2)
    print("The total msd linear regression was done in range 2, r2 = " +
          str(ScoreR2Range2))

    TotalMsdAdjustRange3 = polyfit(TimeRange3, TotalMsdRange3, 1)
    PredictedMsdRange3 = polyval(TotalMsdAdjustRange3, TimeRange3)
    ScoreR2Range3 = metrics.r2_score(TotalMsdRange3, PredictedMsdRange3)
    print("The total msd linear regression was done in range 3, r2 = " +
          str(ScoreR2Range3))
Ejemplo n.º 8
0
def test_scipy():
    #Sample data creation
    #number of points
    n=50
    t=linspace(-5,5,n)
    #parameters
    a=0.8; b=-4
    x=polyval([a,b],t)
    #add some noise
    xn=x+randn(n)

    #Linear regressison -polyfit - polyfit can be used other orders polys
    (ar,br)=polyfit(t,xn,1)
    xr=polyval([ar,br],t)
    #compute the mean square error
    err=sqrt(sum((xr-xn)**2)/n)

    print('Linear regression using polyfit')
    print('parameters: a=%.2f b=%.2f \nregression: a=%.2f b=%.2f, ms error= %.3f' % (a,b,ar,br,err))

    #matplotlib ploting
    title('Linear Regression Example')
    plot(t,x,'g.--')
    plot(t,xn,'k.')
    plot(t,xr,'r.-')
    legend(['original','plus noise', 'regression'])

    show()

    #Linear regression using stats.linregress
    (a_s,b_s,r,tt,stderr)=stats.linregress(t,xn)
    print('Linear regression using stats.linregress')
    print('parameters: a=%.2f b=%.2f \nregression: a=%.2f b=%.2f, std error= %.3f' % (a,b,a_s,b_s,stderr))
Ejemplo n.º 9
0
def main(FileName):
    
    for trial in kep.iofiles.passFileToTrialList(FileName):
        tr = kep.pipelinepars.keptrial(trial)
        kw = kep.keplc.kw(**tr.kw)
        X = kep.keplc.keplc(tr.kid)
        X.runPipeline(kw)
        lc = X.lcData.lcData
        Y = kep.qats.qatslc(X.lcFinal,X.KID)
        idx = num.where((X.lcFinal['eMask'] == False))[0]
        Y.padLC(flagids=idx)
        Y.addNoise()
        Y.runQATS(f=0.01)
        
        P,ignored = calcPeriodogram(Y.periods, lc['x'], lc['ydt'], lc['yerr']) ### ADDED ###
                
        coeff = num.polyfit(num.log10(Y.periods),num.log10(Y.snrLC),1)
        outy = scipy.polyval(coeff,num.log10(Y.periods))
        normalizedPower = 10**(outy)
                
        coeff2 = num.polyfit(num.log10(Y.periods),num.log10(Y.snrFLAT),1) ### ADDED ###
        outy2 = scipy.polyval(coeff2,num.log10(Y.periods)) ### ADDED ###
        normalizedPower2 = 10**(outy2) ### ADDED ###
                
        fittedr=Y.snrFLAT/normalizedPower2 ### ADDED ###
        fittedb=Y.snrLC/normalizedPower2 ### ADDED ###
        normalizedPower3 = normalizedPower2/normalizedPower2 ### ADDED ###
                
        plt.subplot(211) ### ADDED ###
        plt.title('unflipped,'+' KID = '+X.KID)
        plt.plot(Y.periods,Y.snrFLAT/normalizedPower,'r-') 
        plt.plot(Y.periods,Y.snrLC/normalizedPower,'b-')
        plt.plot(Y.periods,normalizedPower/normalizedPower,'k-')
                #plt.plot(Y.periods,normalizedPower2, 'k-') ### ADDED ###
        plt.setp(plt.gca().set_xscale('log'))
        plt.ylabel('Signal Power')
        plt.subplot(212)
        plt.plot(Y.periods,P,'g-') ### ADDED ###
        plt.setp(plt.gca().set_xscale('log')) ### ADDED ###
        plt.xlabel('Period (days)') ### MOVED ###
        plt.ylabel('F Transform')
        plt.savefig('sn.SG0XX.unflipped.'+X.KID+'.png') ### MOVED ###
        #coeff = num.polyfit(num.log10(Y.periods),num.log10(Y.snrLC),1)
        #outy = scipy.polyval(coeff,num.log10(Y.periods))
        #normalizedPower = 10**(outy)
        
        #plt.plot(Y.periods,Y.snrFLAT,'r-') 
        #plt.plot(Y.periods,Y.snrLC,'b-')
        #plt.plot(Y.periods,normalizedPower,'k-')
        #plt.setp(plt.gca().set_xscale('log'))
        #plt.savefig('sn.'+X.KID+'.png')
        
        dfile = open('signal.'+X.KID+'.data','w')
        print >> dfile,'#',X.KID,'|', Y.periods[num.argmax(Y.snrLC/normalizedPower)],\
                Y.periods[num.argmax(Y.snrLC)], max(Y.snrLC/normalizedPower), max(Y.snrLC)
        for i in range(len(Y.SignalPower)):
            print >> dfile, Y.periods[i],'|',Y.snrLC[i],'|',\
                            Y.snrFLAT[i],'|',normalizedPower[i]
        dfile.close()
Ejemplo n.º 10
0
def spl3(x, y, x2, s=3):
    polycoeffs = array(scipy.polyfit(x, y, s)).flatten()

    y2 = scipy.polyval(polycoeffs, x2)
    dercoeffs = scipy.polyder(polycoeffs, 1)

    dy2 = scipy.polyval(dercoeffs, x2)
    return y2, dy2
Ejemplo n.º 11
0
def go(
    mags=(15, 30.1, 0.5), redshifts=(0.01, 12, 0.01), coeff_file="prior_K_zmax7_coeff.dat", outfile="prior_K_extend.dat"
):

    fp = open(coeff_file)
    lines = fp.readlines()
    fp.close()

    mag_list = np.cast[float](lines[0].split()[2:])
    z0 = np.cast[float](lines[1].split()[1:])
    gamma = np.cast[float](lines[2].split()[1:])

    z_grid = np.arange(redshifts[0], redshifts[1], redshifts[2])
    NZ = z_grid.shape[0]

    mag_grid = np.arange(mags[0], mags[1], mags[2])
    NM = mag_grid.shape[0]

    #### Polynomial extrapolation not reliable
    # p_z0 = scipy.polyfit(mag_list, z0, order)
    # z0_grid = np.maximum(scipy.polyval(p_z0, mag_grid), 0.05)
    # p_gamma = scipy.polyfit(mag_list, gamma, order)
    # gamma_grid = np.maximum(scipy.polyval(p_gamma, mag_grid), 0.05)

    #### Interpolations on the defined grid
    z0_grid = np.interp(mag_grid, mag_list, z0)
    gamma_grid = np.interp(mag_grid, mag_list, gamma)

    #### Linear extrapolations of fit coefficients
    p_z0 = scipy.polyfit(mag_list[:3], z0[:3], 1)
    z0_grid[mag_grid < mag_list[0]] = np.maximum(scipy.polyval(p_z0, mag_grid[mag_grid < mag_list[0]]), 0.05)
    p_z0 = scipy.polyfit(mag_list[-3:], z0[-3:], 1)
    z0_grid[mag_grid > mag_list[-1]] = np.maximum(scipy.polyval(p_z0, mag_grid[mag_grid > mag_list[-1]]), 0.05)

    p_gamma = scipy.polyfit(mag_list[:3], gamma[:3], 1)
    gamma_grid[mag_grid < mag_list[0]] = np.maximum(scipy.polyval(p_gamma, mag_grid[mag_grid < mag_list[0]]), 0.05)
    p_gamma = scipy.polyfit(mag_list[-3:], gamma[-3:], 1)
    gamma_grid[mag_grid > mag_list[-1]] = np.maximum(scipy.polyval(p_gamma, mag_grid[mag_grid > mag_list[-1]]), 0.05)

    out_matrix = np.zeros((NZ, NM + 1))
    out_matrix[:, 0] = z_grid

    for i in range(NM):
        pz = z_grid * np.exp(-(z_grid / z0_grid[i]) ** gamma_grid[i])
        pz /= np.trapz(pz, z_grid)
        plt.plot(z_grid, pz, label=mag_grid[i])
        out_matrix[:, i + 1] = pz

    plt.legend(ncol=3, loc="upper right")

    header = "# z "
    for m in mag_grid:
        header += "%6.1f" % (m)

    fp = open(outfile, "w")
    fp.write(header + "\n")
    np.savetxt(fp, out_matrix, fmt="%6.3e")
    fp.close()
Ejemplo n.º 12
0
def concat_extrap_ends(x, npts, polyorder=1, lowside=True, highside=True):
    i=numpy.arange(npts, dtype='float64')
    if lowside:
        ans=scipy.polyfit(-1*(i+1.), x[:npts], polyorder)
        x=numpy.concatenate([scipy.polyval(list(ans), i[::-1]), x])
    if highside:
        ans=scipy.polyfit(-1*(i[::-1]-1.), x[-1*npts:], polyorder)
        x=numpy.concatenate([x, scipy.polyval(list(ans), i)])
    return x    
Ejemplo n.º 13
0
    def anaAuxFunc(self, ep, points, n=-1):
        r"""
        Evalaute the analytic auxiliary functions reconstruction, based on g and h
        Taylor expansion at `points`.


        From the definitions of \(g\) and \(h\) the two eigenvalues can be written as 
        $$
        \begin{align}
        \lambda_{+} &= \frac{g+\sqrt{h}}{2}, \\
        \lambda_{-} &= \frac{g-\sqrt{h}}{2}.
        \end{align}
        $$
        
        These expressions can be approximated through the truncated Taylor series \( T_h\) and \(T_g\)
        $$
        \begin{align}
        {A}_{\lambda_{+}} &= \frac{T_g+\sqrt{T_h}}{2}, \\
        {A}_{\lambda_{-}} &= \frac{T_g-\sqrt{T_h}}{2}.
        \end{align}
        $$

        Parameters
        ----------
        ep: EP instance
            The EP instance that store h and g successive derivatives
        points: array-like
            the value where the serie is evaluated. Give the absolute value, not the relative % nu0.
        n: integer [optional]
            The number of terms considered in the expansion
            if no value is given or if n=-1, the size of the array dlda is considered.

        Returns
        -------
        ldap,ldam : array_like 
            the reconstructed eigenvalue pair
        """

        # compute the analytic auxiliary functions g and h from lda dérivatives
        ep._dh()
        ep._dg()
        if n > -1:
            # truncate
            dgTay = ep._dgTay[:n]
            dhTay = ep._dhTay[:n]
        else:
            dgTay = ep._dgTay
            dhTay = ep._dhTay

        # evaluate Taylor series
        mapg = sp.polyval(dgTay[::-1], points - self.nu0)
        maph = sp.polyval(dhTay[::-1], points - self.nu0)
        ldap = 0.5 * (mapg + np.sqrt(maph))
        ldam = 0.5 * (mapg - np.sqrt(maph))
        return ldap, ldam
Ejemplo n.º 14
0
 def mNm_2_raw(self, calib, tq):
     #ticks: calibrated sensor value
     #calib: yaml config calibration map
     if self.ctype == 'sea_vertx_14bit':
         val = (tq - calib['cb_bias']) / calib['cb_scale']
         val = float(polyval(calib['cb_inv_torque'], val))
         return max(0, min(val, VERTX_14BIT_MAX))
     if self.ctype == 'adc_poly':
         val = (tq - calib['cb_bias']) / calib['cb_scale']
         val = float(polyval(calib['cb_inv_torque'], val))
         return max(0, min(val, M3EC_ADC_TICKS_MAX))
Ejemplo n.º 15
0
def main():
    # Load JSON
    filename = raw_input('Name of Data file?')
    with open(filename) as f:
        data = json.load(f)

    # Extract (ID, time) points from data
    points = extract_id(data)

    # Fit line onto points
    coeff = polyfit(points[0], points[1], 1)

    # Calculate endpoints of line for drawing the line
    x1 = min(points[0])
    x2 = max(points[0])
    y1 = polyval(coeff, x1)
    y2 = polyval(coeff, x2)

    # Calculate throughput points (ID/time)
    throughput = ([], [])
    for i in range(len(points[0])):
        id1 = points[0][i]
        t = points[1][i]
        through = id1 * 1000 / t
        throughput[1].append(through)
        throughput[0].append(id1)
        
    # Calculate the ID/MT points. Sort by x-axis. Plot.
    coeff = polyfit(throughput[0], throughput[1], 1)    
    # Calculate endpoints of line for drawing the line
    x11 = min(throughput[0])
    x21 = max(throughput[0])
    y11 = polyval(coeff, x11)
    y21 = polyval(coeff, x21)

    # Draw everything
    plt.figure(num="Samples")
    plt.xlabel("Index of Difficulty")
    plt.ylabel("Movement Time (ms)")
    plt.xlim(0, max(points[0]) * 1.2)
    plt.ylim(0, max(points[1]) * 1.2)
    plt.plot(points[0], points[1], "bo", label="Linear Regression")
    plt.plot([x1,x2], [y1, y2], "r-")
    
    plt.figure(num="Throughput")
    plt.xlabel("Index of Difficulty")
    plt.ylabel("Throughput (bits/s)")
    plt.xlim(0, max(throughput[0]) * 1.2)
    plt.ylim(0, max(throughput[1]) * 1.2)
    plt.plot(throughput[0], throughput[1], "yo", label="Throughput")
    plt.plot([x11,x21], [y11, y21], "r")
 
    print("Regression coefficients: A={}, B={}".format(coeff[0] / 1000, coeff[1] / 1000))
    plt.show()
Ejemplo n.º 16
0
    def calculo(self):
        self.entrada = self.kwargs["entrada"]
        self.rendimientoCalculado = Dimensionless(self.kwargs["rendimiento"])

        if self.kwargs["Pout"]:
            DeltaP = Pressure(self.kwargs["Pout"] - self.entrada.P)
        elif self.kwargs["deltaP"]:
            DeltaP = Pressure(self.kwargs["deltaP"])
        elif self.kwargs["Carga"]:
            DeltaP = Pressure(self.kwargs["Carga"] * self.entrada.Liquido.rho *
                              g)
        else:
            DeltaP = Pressure(0)

        if self.kwargs["usarCurva"]:
            if self.kwargs["diametro"] != self.kwargs["curvaCaracteristica"][
                    0] or self.kwargs["velocidad"] != self.kwargs[
                        "curvaCaracteristica"][1]:
                self.curvaActual = self.calcularCurvaActual()
            else:
                self.curvaActual = self.kwargs["curvaCaracteristica"]
            self.Ajustar_Curvas_Caracteristicas()

        if not self.kwargs["usarCurva"]:
            head = Length(DeltaP / g / self.entrada.Liquido.rho)
            power = Power(head * g * self.entrada.Liquido.rho *
                          self.entrada.Q / self.rendimientoCalculado)
            P_freno = Power(power * self.rendimientoCalculado)
        elif not self.kwargs["incognita"]:
            head = Length(polyval(self.CurvaHQ, self.entrada.Q))
            self.DeltaP = Pressure(head * g * self.entrada.Liquido.rho)
            power = Power(self.entrada.Q * DeltaP)
            P_freno = Power(polyval(self.CurvaPotQ, self.entrada.Q))
            self.rendimientoCalculado = Dimensionless(power / P_freno)
        else:
            head = Length(self.DeltaP / g / self.entrada.Liquido.rho)
            caudalvolumetrico = roots(
                [self.CurvaHQ[0], self.CurvaHQ[1], self.CurvaHQ[2] - head])[0]
            power = Power(caudalvolumetrico * self.DeltaP)
            self.entrada = Corriente(
                self.entrada.T, self.entrada.P.atm,
                caudalvolumetrico * self.entrada.Liquido.rho * 3600,
                self.entrada.mezcla, self.entrada.solido)
            P_freno = Power(polyval(self.CurvaPotQ, caudalvolumetrico))
            self.rendimientoCalculado = Dimensionless(power / P_freno)

        self.headCalculada = head
        self.power = power
        self.P_freno = P_freno
        self.salida = [self.entrada.clone(P=self.entrada.P + DeltaP)]
        self.Pin = self.entrada.P
        self.PoutCalculada = self.salida[0].P
        self.Q = self.entrada.Q.galUSmin
        self.volflow = self.entrada.Q
	def mNm_2_raw(self,calib,tq):
		#ticks: calibrated sensor value
		#calib: yaml config calibration map
		if self.ctype=='sea_vertx_14bit':
			val=(tq-calib['cb_bias'])/calib['cb_scale']
			val=float(polyval(calib['cb_inv_torque'],val))
			return max(0,min(val,VERTX_14BIT_MAX))
		if self.ctype=='adc_poly':
			val=(tq-calib['cb_bias'])/calib['cb_scale']
			val=float(polyval(calib['cb_inv_torque'],val))
			return max(0,min(val,M3EC_ADC_TICKS_MAX))
Ejemplo n.º 18
0
    def calculo(self):
        entrada = self.kwargs["entrada"]
        self.rendimientoCalculado = Dimensionless(self.kwargs["rendimiento"])

        if self.kwargs["Pout"]:
            DeltaP = Pressure(self.kwargs["Pout"] - entrada.P)
        elif self.kwargs["deltaP"]:
            DeltaP = Pressure(self.kwargs["deltaP"])
        elif self.kwargs["Carga"]:
            DeltaP = Pressure(self.kwargs["Carga"] * entrada.Liquido.rho * g)
        else:
            DeltaP = Pressure(0)

        if self.kwargs["usarCurva"]:
            b1 = self.kwargs["diametro"] != self.kwargs["curvaCaracteristica"][
                0]  # noqa
            b2 = self.kwargs["velocidad"] != self.kwargs[
                "curvaCaracteristica"][1]  # noqa
            if b1 or b2:
                self.curvaActual = self.calcularCurvaActual()
            else:
                self.curvaActual = self.kwargs["curvaCaracteristica"]
            self.Ajustar_Curvas_Caracteristicas()

        if not self.kwargs["usarCurva"]:
            head = Length(DeltaP / g / entrada.Liquido.rho)
            power = Power(head * g * entrada.Liquido.rho * entrada.Q /
                          self.rendimientoCalculado)
            P_freno = Power(power * self.rendimientoCalculado)
        elif not self.kwargs["incognita"]:
            head = Length(polyval(self.CurvaHQ, entrada.Q))
            DeltaP = Pressure(head * g * entrada.Liquido.rho)
            power = Power(entrada.Q * DeltaP)
            P_freno = Power(polyval(self.CurvaPotQ, entrada.Q))
            self.rendimientoCalculado = Dimensionless(power / P_freno)
        else:
            head = Length(self.DeltaP / g / entrada.Liquido.rho)
            poli = [self.CurvaHQ[0], self.CurvaHQ[1], self.CurvaHQ[2] - head]
            Q = roots(poli)[0]
            power = Power(Q * self.DeltaP)
            entrada = entrada.clone(split=Q / entrada.Q)
            P_freno = Power(polyval(self.CurvaPotQ, Q))
            self.rendimientoCalculado = Dimensionless(power / P_freno)

        self.deltaP = DeltaP
        self.headCalculada = head
        self.power = power
        self.P_freno = P_freno
        self.salida = [entrada.clone(P=entrada.P + DeltaP)]
        self.Pin = entrada.P
        self.PoutCalculada = self.salida[0].P
        self.volflow = entrada.Q
        self.cp_cv = entrada.Liquido.cp_cv
Ejemplo n.º 19
0
def estimate_rate_func(t, T, N, plot_flag=False, method='central diff'):

    t_est_pts = scipy.linspace(t.min(), t.max(), N + 2)
    interp_func = scipy.interpolate.interp1d(t, T, 'linear')
    T_est_pts = interp_func(t_est_pts)

    if plot_flag == True:
        pylab.figure()
        pylab.subplot(211)
        pylab.plot(t_est_pts, T_est_pts, 'or')

    # Estimate slopes
    slope_pts = scipy.zeros((N, ))
    T_slope_pts = scipy.zeros((N, ))

    if method == 'local fit':
        for i in range(1, (N + 1)):
            mask0 = t > 0.5 * (t_est_pts[i - 1] + t_est_pts[i])
            mask1 = t < 0.5 * (t_est_pts[i + 1] + t_est_pts[i])
            mask = scipy.logical_and(mask0, mask1)
            t_slope_est = t[mask]
            T_slope_est = T[mask]
            local_fit = scipy.polyfit(t_slope_est, T_slope_est, 2)
            dlocal_fit = scipy.polyder(local_fit)
            slope_pts[i - 1] = scipy.polyval(dlocal_fit, t_est_pts[i])
            T_slope_pts[i - 1] = scipy.polyval(local_fit, t_est_pts[i])
            if plot_flag == True:
                t_slope_fit = scipy.linspace(t_slope_est[0], t_slope_est[-1],
                                             100)
                T_slope_fit = scipy.polyval(local_fit, t_slope_fit)
                pylab.plot(t_slope_fit, T_slope_fit, 'g')
    elif method == 'central diff':
        dt = t_est_pts[1] - t_est_pts[0]
        slope_pts = (T_est_pts[2:] - T_est_pts[:-2]) / (2.0 * dt)
        T_slope_pts = T_est_pts[1:-1]
    else:
        raise ValueError, 'unkown method %s' % (method, )

    # Fit line to slope estimates
    fit = scipy.polyfit(T_slope_pts, slope_pts, 1)

    if plot_flag == True:
        T_slope_fit = scipy.linspace(T_slope_pts.min(), T_slope_pts.max(), 100)
        slope_fit = scipy.polyval(fit, T_slope_fit)
        pylab.subplot(212)
        pylab.plot(T_slope_fit, slope_fit, 'r')
        pylab.plot(T_slope_pts, slope_pts, 'o')
        pylab.show()

    rate_slope = fit[0]
    rate_offset = fit[1]
    return rate_slope, rate_offset
Ejemplo n.º 20
0
def rms_ms(coeffs_pix2wav, pixel_centers, wavelengths, npix, Cheby=False):
    " Returns rms deviation of best fit in m/s"

    if (Cheby):
        residuals = Cheby_eval(coeffs_pix2wav,pixel_centers,npix) - wavelengths
        central_wav = 0.5 * (Cheby_eval(coeffs_pix2wav,50.,npix) + Cheby_eval(coeffs_pix2wav,npix-50,npix))
    else:
        residuals = scipy.polyval(coeffs_pix2wav,pixel_centers) - wavelengths
        central_wav = 0.5 * (scipy.polyval(coeffs_pix2wav,50.) + scipy.polyval(coeffs_pix2wav,npix-50))

    rms_ms = np.sqrt( np.var( residuals ) ) * 299792458.0 / central_wav
    
    return rms_ms, residuals
Ejemplo n.º 21
0
def estimate_rate_func(t, T, N, plot_flag=False, method='central diff'):

    t_est_pts = scipy.linspace(t.min(), t.max(), N+2) 
    interp_func = scipy.interpolate.interp1d(t,T,'linear')
    T_est_pts = interp_func(t_est_pts)
 
    if plot_flag == True:
        pylab.figure()
        pylab.subplot(211)
        pylab.plot(t_est_pts, T_est_pts,'or')

    # Estimate slopes
    slope_pts = scipy.zeros((N,)) 
    T_slope_pts = scipy.zeros((N,))  

    if method == 'local fit':
        for i in range(1,(N+1)):
            mask0 = t > 0.5*(t_est_pts[i-1] + t_est_pts[i])
            mask1 = t < 0.5*(t_est_pts[i+1] + t_est_pts[i])
            mask = scipy.logical_and(mask0, mask1)
            t_slope_est = t[mask]
            T_slope_est = T[mask]
            local_fit = scipy.polyfit(t_slope_est,T_slope_est,2)
            dlocal_fit = scipy.polyder(local_fit)
            slope_pts[i-1] = scipy.polyval(dlocal_fit,t_est_pts[i]) 
            T_slope_pts[i-1] = scipy.polyval(local_fit,t_est_pts[i])
            if plot_flag == True:
                t_slope_fit = scipy.linspace(t_slope_est[0], t_slope_est[-1], 100)
                T_slope_fit = scipy.polyval(local_fit,t_slope_fit)
                pylab.plot(t_slope_fit, T_slope_fit,'g')
    elif method == 'central diff':
        dt = t_est_pts[1] - t_est_pts[0]
        slope_pts = (T_est_pts[2:] - T_est_pts[:-2])/(2.0*dt)
        T_slope_pts = T_est_pts[1:-1]
    else:
        raise ValueError, 'unkown method %s'%(method,)
        
    # Fit line to slope estimates
    fit = scipy.polyfit(T_slope_pts, slope_pts,1)

    if plot_flag == True:
        T_slope_fit = scipy.linspace(T_slope_pts.min(), T_slope_pts.max(),100)
        slope_fit = scipy.polyval(fit,T_slope_fit)
        pylab.subplot(212)
        pylab.plot(T_slope_fit, slope_fit,'r')
        pylab.plot(T_slope_pts, slope_pts,'o')
        pylab.show()

    rate_slope = fit[0]
    rate_offset = fit[1]
    return rate_slope, rate_offset 
Ejemplo n.º 22
0
    def calculo(self):
        entrada = self.kwargs["entrada"]
        self.rendimientoCalculado = Dimensionless(self.kwargs["rendimiento"])

        if self.kwargs["Pout"]:
            DeltaP = Pressure(self.kwargs["Pout"]-entrada.P)
        elif self.kwargs["deltaP"]:
            DeltaP = Pressure(self.kwargs["deltaP"])
        elif self.kwargs["Carga"]:
            DeltaP = Pressure(self.kwargs["Carga"]*entrada.Liquido.rho*g)
        else:
            DeltaP = Pressure(0)

        if self.kwargs["usarCurva"]:
            b1 = self.kwargs["diametro"] != self.kwargs["curvaCaracteristica"][0]  # noqa
            b2 = self.kwargs["velocidad"] != self.kwargs["curvaCaracteristica"][1]  # noqa
            if b1 or b2:
                self.curvaActual = self.calcularCurvaActual()
            else:
                self.curvaActual = self.kwargs["curvaCaracteristica"]
            self.Ajustar_Curvas_Caracteristicas()

        if not self.kwargs["usarCurva"]:
            head = Length(DeltaP/g/entrada.Liquido.rho)
            power = Power(head*g*entrada.Liquido.rho*entrada.Q /
                          self.rendimientoCalculado)
            P_freno = Power(power*self.rendimientoCalculado)
        elif not self.kwargs["incognita"]:
            head = Length(polyval(self.CurvaHQ, entrada.Q))
            DeltaP = Pressure(head*g*entrada.Liquido.rho)
            power = Power(entrada.Q*DeltaP)
            P_freno = Power(polyval(self.CurvaPotQ, entrada.Q))
            self.rendimientoCalculado = Dimensionless(power/P_freno)
        else:
            head = Length(self.DeltaP/g/entrada.Liquido.rho)
            poli = [self.CurvaHQ[0], self.CurvaHQ[1], self.CurvaHQ[2]-head]
            Q = roots(poli)[0]
            power = Power(Q*self.DeltaP)
            entrada = entrada.clone(split=Q/entrada.Q)
            P_freno = Power(polyval(self.CurvaPotQ, Q))
            self.rendimientoCalculado = Dimensionless(power/P_freno)

        self.deltaP = DeltaP
        self.headCalculada = head
        self.power = power
        self.P_freno = P_freno
        self.salida = [entrada.clone(P=entrada.P+DeltaP)]
        self.Pin = entrada.P
        self.PoutCalculada = self.salida[0].P
        self.volflow = entrada.Q
        self.cp_cv = entrada.Liquido.cp_cv
def LinearRegressInverse(data, year, kind):
    '''
        Use the linear regression of the ratio of the winner's votes to the loser's votes.
    '''

    Dcontested = []
    Rcontested = []

    for D, R, I in data[year][kind]:
        if D is None or R is None:
            continue
        if D > R:
            Dcontested.append((D, R, I))
        else:
            Rcontested.append((D, R, I))

    Dinverse = [(D, float(D) / R) for D, R, _ in Dcontested]
    Rinverse = [(R, float(R) / D) for D, R, _ in Rcontested]

    Da_s, Db_s, Dr, Dtt, Derr = scipy.stats.linregress(
        [V for V, R in Dinverse], [R for V, R in Dinverse])
    Deval = scipy.polyval([Da_s, Db_s], [V for V, R in Dinverse])
    Ra_s, Rb_s, Rr, Rtt, Rerr = scipy.stats.linregress(
        [V for V, R in Rinverse], [R for V, R in Rinverse])
    Reval = scipy.polyval([Ra_s, Rb_s], [V for V, R in Rinverse])

    if hasPyLab:
        pylab.title('Inverse regression')
        pylab.plot([V for V, R in Dinverse], [R for V, R in Dinverse], 'b.')
        pylab.plot([V for V, R in Dinverse], Deval, 'b-')
        pylab.plot([V for V, R in Rinverse], [R for V, R in Rinverse], 'r.')
        pylab.plot([V for V, R in Rinverse], Reval, 'r-')
        pylab.legend(['D ratio', 'D l-r', 'R ratio', 'R l-r'])
        pylab.show()

    results = []
    for D, R, I in data[year][kind]:
        Ik = I if I is not None else 0
        if D is None:
            Dk = .75 * R / scipy.polyval([Ra_s, Rb_s], R)
        else:
            Dk = D
        if R is None:
            Rk = .75 * D / scipy.polyval([Da_s, Db_s], D)
        else:
            Rk = R
        results.append((Dk, Rk, Ik))

    return results
Ejemplo n.º 24
0
def fit_curve():
	global data, running
	if running == True or len(data[0])==0:
		return
	aa = []
	bb = []
	for k in range(len(data[0])):
		if data[1][k] > 1.0:
			aa.append(data[0][k])
			bb.append(data[1][k])
	x  = array(bb)
	y  = array(aa)
	from scipy import polyfit, polyval
	(ar,br)=polyfit(x,y,1)
	print polyval([ar,br],[0])
Ejemplo n.º 25
0
def fit_curve():
    global data, running
    if running == True or len(data[0]) == 0:
        return
    aa = []
    bb = []
    for k in range(len(data[0])):
        if data[1][k] > 1.0:
            aa.append(data[0][k])
            bb.append(data[1][k])
    x = array(bb)
    y = array(aa)
    from scipy import polyfit, polyval
    (ar, br) = polyfit(x, y, 1)
    print polyval([ar, br], [0])
Ejemplo n.º 26
0
def ctrl_voltage(ND):
    coefs = array([
        3.85013993e-18, -6.61616152e-15, 4.62228606e-12, -1.68733555e-09,
        3.43138077e-07, -3.82875899e-05, 2.20822016e-03, -8.38473034e-02,
        1.52678586e+00
    ])
    return scipy.polyval(coefs, ND)
Ejemplo n.º 27
0
def interpMaskedAreas(x, dtfunc):
    idx = num.where(dtfunc == -1)[0]
    # find consecutive points of dtfunc set at -1
    diffs = idx - num.roll(idx, 1)
    setIdx1 = num.where(diffs > 1)[0]
    setIdx2 = num.where(diffs > 1)[0] - 1
    setIdx1 = num.hstack( (num.array([0]), setIdx1) )
    setIdx2 = num.hstack( (setIdx2, num.array([-1])) )
    # each int in setIdx1 is beginning of a set of points
    # each int in setIdx2 is end of a set of points
    for i in range(len(setIdx1)):
        i1 = setIdx1[i]
        i2 = setIdx2[i]
        vals_to_interp = dtfunc[ idx[i1]:idx[i2]+1]
        
        # construct interpolation from points on either side of vals
        # first ydata
        nVals = len(vals_to_interp)
        left = num.array( \
            [ dtfunc[idx[i1]-1-nVals], dtfunc[idx[i1]-1] ])
        right = num.array( \
            [ dtfunc[idx[i2]+1], dtfunc[idx[i2]+1+nVals] ])
        vals_to_construct_interp = num.hstack( (left, right) )
        vtci = vals_to_construct_interp
        
        # now xdata
        leftx = num.array([ x[idx[i1]-1-nVals], x[idx[i1]-1] ])
        rightx = num.array([ x[idx[i2]+1], x[idx[i2]+1+nVals] ])
        x_vtci = num.hstack( (leftx, rightx) )
        # conduct interpolation
        intpCoeffs = scipy.polyfit(x_vtci, vtci, 3)
        dtfunc[ idx[i1]:idx[i2]+1 ] = \
            scipy.polyval(intpCoeffs, x[ idx[i1]:idx[i2]+1 ])
        
    return dtfunc
Ejemplo n.º 28
0
def scatter_fits(sys, scores, values, R, pval, aucname, show=False):
    auclabel=get_label(aucname)
    if sys=='apo':
        format='ko'
        label=
    if sys=='bi':
        format='ro'
    if sys=='car':
        format='bo'
    if pval< 0.0001:
        pval=0.0001
    pylab.figure()
    pylab.plot(scores, values, format)
    (ar,br)=polyfit(scores, values, 1)
    xr=polyval([ar,br], scores)
    pylab.plot(scores,xr,'%s-' % format[0], label='R=%s, pval=%s' %
            (round(R,2), round(pval,4)))
    if aucname=='types':
        pylab.plot(range(0, 15), [0.5]*len(range(0,15)), 'k--', label='Random Disc.')
        pylab.ylim(0.3, 0.9)
    else:
        pylab.ylim(0.5, 1.0)
    pylab.xlim(0, 15)
    lg=pylab.legend()
    lg.draw_frame(False)
    pylab.title('%s States' % label)
    pylab.xticks(range(0, 15), [' ']*2+ ['inactive']+[' ']*(len(range(0,15))-6)+['active']+[' ']*2)
    pylab.xlabel('Pathway Progress')
    pylab.ylabel('%s Aucs' % (auclabel))
    pylab.savefig('%s_%saucs.png' % (sys, aucname), dpi=300)
    if show==True:
        pylab.show()
Ejemplo n.º 29
0
def PCoeff(filename,b,Aperture,RON,Gain,NSigma,S,N,marsh_alg,min_col,max_col):
    hdulist = pyfits.open(filename)                  # Here we obtain the image...
    data=hdulist[0].data                             # ... and we obtain the image matrix.
    Result=Marsh.ObtainP((data.flatten()).astype(double),scipy.polyval(b,numpy.arange(data.shape[1])).astype(double),data.shape[0],data.shape[1],data.shape[1],Aperture,RON,Gain,NSigma,S,N,marsh_alg,min_col,max_col)
    FinalMatrix=asarray(Result)                      # After the function, we convert our list to a Numpy array.
    FinalMatrix.resize(data.shape[0],data.shape[1])  # And return the array in matrix-form.
    return FinalMatrix
Ejemplo n.º 30
0
def main(FileName):
    
    for trial in kep.iofiles.passFileToTrialList(FileName):
        tr = kep.pipelinepars.keptrial(trial)
        kw = kep.keplc.kw(**tr.kw)
        X = kep.keplc.keplc(tr.kid)
        X.runPipeline(kw)
        Y = kep.qats.qatslc(X.lcFinal,X.KID,maske=kw.maske)
        Y.padLC()
        Y.addNoise()
        Y.runQATS(f=0.01)
        
        coeff = num.polyfit(num.log10(Y.periods),num.log10(Y.snrLC),1)
        outy = scipy.polyval(coeff,num.log10(Y.periods))
        normalizedPower = 10**(outy)
        
        plt.plot(Y.periods,Y.snrFLAT,'r-') 
        plt.plot(Y.periods,Y.snrLC,'b-')
        plt.plot(Y.periods,normalizedPower,'k-')
        plt.setp(plt.gca().set_xscale('log'))
        plt.savefig('sn.'+X.KID+'.png')
        
        dfile = open('signal.'+X.KID+'.data','w')
        print >> dfile,'#',X.KID,'|', Y.periods[num.argmax(Y.snrLC/normalizedPower)],\
                Y.periods[num.argmax(Y.snrLC)], max(Y.snrLC/normalizedPower), max(Y.snrLC)
        for i in range(len(Y.SignalPower)):
            print >> dfile, Y.periods[i],'|',Y.snrLC[i],'|',\
                            Y.snrFLAT[i],'|',normalizedPower[i],'|',\
                            Y.tmin[i],'|',Y.tmax[i],'|',Y.q[i]
        dfile.close()
Ejemplo n.º 31
0
    def transform(self, rmout):
        irt = {}
        for rawspectrum in self.rt:
            for peptide in self.rt[rawspectrum]:
                self.irt[rawspectrum][peptide] = scipy.polyval(
                    [self.a[rawspectrum], self.b[rawspectrum]],
                    self.rt[rawspectrum][peptide])
                if peptide not in irt:
                    irt[peptide] = []
                irt[peptide].append(self.irt[rawspectrum][peptide])

        for peptide in irt:
            if len(irt) == 1:
                self.irt_merged[peptide] = round(irt[peptide][0], 5)
            else:
                if rmout and len(irt) > 2:
                    self.irt_merged[peptide] = round(
                        lmedian(
                            array(irt[peptide])[invert(
                                chauvenet(array(irt[peptide]),
                                          array(irt[peptide])))]), 5)
                else:
                    self.irt_merged[peptide] = round(lmedian(irt[peptide]), 5)

        for i in range(0, len(self.blocks)):
            self.blocks[i].replace(self.irt_merged[self.blocks[i].peptide])
Ejemplo n.º 32
0
def _getACDataAndCharPulse(ps):
    """Create a data array and characteristic pulse array by summing the AC data
    and characteristic pulse for each APD. Background fluctuations are removed
    from the data by fitting using a 2nd order polynomial. The two arrays are
    then scaled so they are approximately the same size. This is all done in
    order to facilitate finding the t0 point.

    Parameters:
    ps -- A PolySegData object.

    Returns: A tuple (dataArray, charPulseArray)
    """
    # First we sum the data from the APDs to make the pulse easier to find.
    yData = zeros_like(ps.acq_voltData[0])
    for data in ps.acq_voltData:
        yData += data

    # Fit the averaged data with a polynomial to (hopefully) fit the 
    # background. Currently using a 2nd degree polynomial.
    xData = arange(ps.acq_voltData.shape[1])
    ps.t0ACBgCoeffs = polyfit(xData, yData, 2) # The coefficients
    yFit = polyval(ps.t0ACBgCoeffs, xData) # The polynomial as an array

    yData -= yFit

    # Create a composite characteristic pulse by summing.
    cPulse = zeros_like(ps.calib.charPulseAC[0])
    for data in ps.calib.charPulseAC:
        cPulse += data

    # Normalize yData to the size of cPulse.
    # I don't know if this is really necessary.
    yData *= min(cPulse) / min(yData)

    return (yData, cPulse)
Ejemplo n.º 33
0
def fit_data(xdata, ydata):
    """ Fit a regression line to shift data points

    Parameters
    ----------
    xdata : astropy.table.column.Column
        A list of x values (time)
    ydata : astropy.table.column.Column
        A list of y values (shifts)

    Returns
    -------
    fit : ndarray
        The fit line
    xdata : astropy.table.column.Column
        List of x values for fit
    parameters : tuple
        fitting parameters
    err : int
        Value returned on whether the fit was a sucess.
    """
    stats = linregress(xdata, ydata)

    parameters = (stats[0], stats[1])
    err = 0
    fit = scipy.polyval(parameters, xdata)

    return fit, xdata, parameters, err
Ejemplo n.º 34
0
def draw(_case, specie, foo, color, label):
    stats = gen_stats(foo, _case, specie)
    x = range(len(stats))
    y = polyfit(x, stats, 4)
    y_pred = polyval(y, x)
    plt.plot(stats, '%s--' % color, label=label)
    plt.plot(y_pred, '%s-' % color, label='_nolegend_')
Ejemplo n.º 35
0
	def linearRegression(self,Data,Flag):
		savePath = '/home/yotoo/Project/comparison/'
		# plot original data
		title = 'test data'
		if (Flag == '19 + Ranked'):
			title = 'Linear Regression Result(Border size: 19; Game Type: Ranked)'
		elif (Flag == '19 + Free'):
			title = 'Linear Regression Result(Border size: 19; Game Type: Free)'
		elif (Flag == '9 + Free'):
			title = 'Linear Regression Result(Border size: 9; Game Type: Free)'

		degreeArray = []
		playerArray = []
		degreeList = []
		playerList = []

		for record in Data:
			record = record.split()
			degree = int(record[0])
			player = int(record[1])
			degreeList.append(degree)
			playerList.append(player)

			degree = float(record[0])
			player = float(record[1])
			degreeArray.append(math.log10(degree))
			playerArray.append(math.log10(player))

		degreeArray = np.array(degreeArray)
		playerArray = np.array(playerArray)


		# linear regression
		n = len(degreeList)
		slope,intercept = polyfit(degreeArray,playerArray,1)
		playerRegression=polyval([degreeArray,playerArray],degreeArray)
		ms_error=sqrt(sum((playerRegression-playerArray)**2)/n)

		# plot the original data points
		plt.grid(True)
		plt.xlabel('Log Degree')
		plt.ylabel('Log Players')
		plt.title(title)
		plt.plot(degreeArray,playerArray,'bo')


		# error
		# plot regression line
		x = [x for x in range(0,5)]
		y = [i*slope+intercept for i in x]

		regressLineText = 'Slope = %.3f \nIntercept = %.3f \nMean Square Error = %.3f' %(slope,intercept,ms_error)
		plt.plot(x,y,'r.-',label = regressLineText)

		plt.legend()
		plt.savefig(savePath + title + ".png")

		plt.show()

		print regressLineText
	def raw_2_deg(self,calib,qei_on,qei_period,qei_rollover, qs_to_qj = 1.0):
		#qei: raw sensor value
		#calib: yaml config calibration map
			
		if self.ctype=="vertx_14bit":
			val = qs_to_qj*360.0*(qei_on)/VERTX_14BIT_MAX
			return val*calib['cb_scale']+calib['cb_bias']
		
		if self.ctype=="ma3_12bit":
			if qei_period==0.0:
				return 0.0
			val = ((qei_on*MA3_12BIT_PERIOD_US)/qei_period)-1 
			val= qs_to_qj*360.0*(val/MA3_12BIT_MAX_PULSE_US)
			return val*calib['cb_scale']+calib['cb_bias']
		
		if self.ctype=="ma3_10bit":
			if qei_period==0.0:
				return 0.0
			val = ((qei_on*MA3_10BIT_PERIOD_US)/qei_period)-1
			val= qs_to_qj*360.0*(val/MA3_10BIT_MAX_PULSE_US)
			return val*calib['cb_scale']+calib['cb_bias']
		
		if self.ctype=="ma3_12bit_poly":
			if qei_period==0.0:
				return 0.0
			val = qs_to_qj*(((qei_on*MA3_12BIT_PERIOD_US)/qei_period)-1)
			val=float(polyval(calib['cb_theta'],val))
			return val*calib['cb_scale']+calib['cb_bias']
Ejemplo n.º 37
0
    def getDiameterQuantilesAlongSinglePath(self,path,G,counter=None):
        
        G=self.filterPathDiameters(path, G,self.__scale)
        x=[]
        y=[]
        length=0
        vprop=G.vertex_properties["vp"]
        for i in path:
            length+=1
            if vprop[i]['diameter'] > 0:
                x.append(length)
                y.append(vprop[i]['diameter'])
        coeffs=polyfit(x,y,1)

        besty =  polyval ( coeffs ,    x)
        
        self.__io.saveArray(x,self.__io.getHomePath()+'Plots/'+self.__io.getFileName()+'_DiameterX')
        self.__io.saveArray(y,self.__io.getHomePath()+'Plots/'+self.__io.getFileName()+'_DiameterY')

        l=len(y)-1
        l25=int(l*0.25)
        l50=int(l*0.5)
        l75=int(l*0.75)
        l90=int(l*0.90)
        
        d25=np.average(y[:l25])
        d50=np.average(y[l25:l50])
        d75=np.average(y[l50:l75])
        d90=np.average(y[l90:])

        
        self.__io.saveArray(y,self.__io.getHomePath()+'Plots/'+self.__io.getFileName()+'_DiameterHistoTP')
        
        return d25,d50,d75,d90
Ejemplo n.º 38
0
def plotLinearRegression(item, indicatorList):
    """Saves to file a linear regression plot of any indicator in item
    that contains at least 30 values"""
    country = loadPickle(item)
    print len(item[1])*"-" + "\n" + item[1] + "\n" + len(item[1]) * "-"
    country.indicators.sort()
    for indicator in country.indicators:
	indicator.values.sort()
	if indicator.id in indicatorList:
	#if len(indicator.values) >= 40:
	#if indicator.id == "NY.GDP.MKTP.KD":
	    print "  ", indicator.name
	    xvals = []
	    yvals = []
	    for entry in indicator.values:
		xvals.append(entry.year)
		yvals.append(entry.value)
	    (a, b, r, tt, err) = stats.linregress(xvals, yvals)
	    regVals = scipy.polyval([a, b], xvals)
	    plt.title(indicator.name + "\n" + item[1])
	    plt.plot(xvals, yvals, 'k.')
	    plt.plot(xvals, regVals, 'r.-')
	    plt.legend(['data', 'regression'])

	    dir = os.path.join(os.pardir, 'plots', 'linear_regressions',
		indicator.id)
	    if not os.path.exists(dir):
		os.makedirs(dir)

	    plt.savefig(os.path.join(dir,item[0] + ".pdf"))
	    plt.clf()
Ejemplo n.º 39
0
def linearRegression(histogram):
    x=[]
    y=[]
    for i in range(256):
        for k in range(256):
            if histogram.GetBinContent(i,k)!=0:
                x.append(i)
                y.append(k)
    (ar,br)=polyfit(x,y,1)
    xr=polyval([ar,br],x)
    err=sqrt(sum((xr-y)**2)/len(y))
    if abs(ar)>1.0:
        (ar,br)=polyfit(y,x,1)
        xr=polyval([ar,br],y)
        err=sqrt(sum((xr-x)**2)/len(y))
    return err
Ejemplo n.º 40
0
def construct_linear_regression(x_learn, y_dev_learn, x_test, y_dev_test):

    regression = sp.polyfit(x_learn, y_dev_learn, 1)

    y_exp_learn = sp.polyval(regression, x_learn)
    y_exp_test = sp.polyval(regression, x_test)

    print("For train dataset:\n\ty = a*x + b\n\ta = %f\n\tb = %f" % (regression[0], regression[1]))

    mse_learn = np.sqrt(np.mean((y_exp_learn - y_dev_learn) ** 2))
    mse_test = np.sqrt(np.mean((y_exp_test - y_dev_test) ** 2))
    mse_total = np.sqrt((((mse_learn**2) * learn_set_size)
                         + ((mse_test**2) * (dataset_size - learn_set_size)))
                        / dataset_size)
    print("Train MSE = %f\nTest MSE = %f\nTotal MSE = %f" % (mse_learn, mse_test,mse_total ))
    return regression, mse_learn, mse_test, mse_total, y_exp_learn, y_exp_test
	def raw_2_mA(self,calib,ticks_a,ticks_b):
		#ticks: adc sensor value
		#calib: yaml config calibration map
		if self.ctype=='adc_linear_5V':
			mV_a = 5000.0*(ticks_a-calib['cb_ticks_at_zero_a'])/M3EC_ADC_TICKS_MAX
			mV_b = 5000.0*(ticks_b-calib['cb_ticks_at_zero_b'])/M3EC_ADC_TICKS_MAX
			i_a = 1000.0*mV_a/calib['cb_mV_per_A']
			i_b = 1000.0*mV_b/calib['cb_mV_per_A']
			val=max(abs(i_a), abs(i_b))
			return max(0,val*calib['cb_scale']+calib['cb_bias'])	
		if self.ctype=='adc_poly':
			i_a= float(polyval(calib['cb_current_a'],ticks_a))
			i_b= float(polyval(calib['cb_current_b'],ticks_b))
			val=max(abs(i_a), abs(i_b))
			return val*calib['cb_scale']+calib['cb_bias']
		return 0.0
Ejemplo n.º 42
0
def graph_stock_regression(data, filename):
    """
    This function should take a list containing two lists of the form
    returned by get_yahoo_data (list of date, adj. close tuples) and
    save the graph of the series of daily return pairs as well as
    the regression line. The graph should be saved to the given
    filename.
    """
    one = []
    two = []
    for i in xrange(1, len(data[0])):
        one.append((data[0][i][1]-data[0][i-1][1])/data[0][i-1][1])
        two.append((data[1][i][1]-data[1][i-1][1])/data[1][i-1][1])
        
    (a_s, b_s, r, tt, stderr) = linregress(one, two)
    line = scipy.polyval([a_s, b_s], one)
    
    title = filename.split('.')[0]
    axies = title.split('vs')
    
    pylab.title(title)
    pylab.plot(one, two, 'r.', one, line, 'k')
    pylab.xlabel(axies[0])
    pylab.ylabel(axies[1])
    pylab.legend(['data', 'regression'])
    pylab.savefig(filename)
Ejemplo n.º 43
0
def calculate_polyfit(x, y):
    """ Calculate a linear regression using polyfit instead """

    # FIXME(pica) This doesn't work for large ranges (tens of orders of
    # magnitude). No idea why as the fixRange() function above should make
    # all values greater than one. The trouble mainly seems to happen when
    # log10(min(x)) negative.

    # Check the smallest value in x or y isn't below 2x10-7 otherwise
    # we hit numerical instabilities.
    xFactorFix, xFactor = False, 0
    yFactorFix, yFactor = False, 0
    minX = min(x)
    minY = min(y)
    if minX < 2e-7:
        x, xFactor, xFactorFix = fixRange(x)
    if minY < 2e-7:
        y, yFactor, yFactorFix = fixRange(y)

    (ar, br) = polyfit(x, y, 1)
    yf = polyval([ar, br], x)
    xf = x

    if xFactorFix:
        xf = xf / 10**xFactor
    if yFactorFix:
        yf = yf / 10**yFactor

    return xf, yf
Ejemplo n.º 44
0
def plot_rate_vs_mass(array_vs,ks):
  #array_v=array_vs
  #k_growth = ks
  colors = ['#800000','#c87942']#,'#008080','#82bcd3']
  #col='#82bcd3'
  f1 = p.figure()
  for array_v, k_growth,col in zip(array_vs,ks,colors):
    zahl = max([len(i) for i in array_v['R'].flat if i]) # longest list of X values
    nr_cells=array_v.size
    tmp2=zeros((nr_cells,zahl),dtype=float)
    tmp3=zeros((nr_cells,zahl),dtype=float)

    for k1, vector in enumerate(array_v.flat):
      if vector['R']:
	  for k2, r, am, ad, v in zip(range(zahl-len(vector['R']),zahl),vector['R'],vector['B_Am'],vector['B_Ad'],vector['V']):
	    if vector['S_entry'] != 0:
	      tmp2[k1,k2] = log2(r + am + ad) # cell mass
	      tmp3[k1,k2] = log2(r * (am + ad) * k_growth / v) # metabolic rate
    
    tmp2 = array([i for i in tmp2 if i[-1]!=0])
    tmp3 = array([i for i in tmp3 if i[-1]!=0])
    p.plot(tmp2[:,-1],tmp3[:,-1],'o',color=col)
    
    (a_s,b_s,r,tt,stderr)=stats.linregress(tmp2[:,-1],tmp3[:,-1])
    p.text(min(tmp2[:,-1])+1,min(tmp3[:,-1]),'slope = %s'%(round(a_s,2)))
    p.plot(linspace(min(tmp2[:,-1]),max(tmp2[:,-1]),2),polyval([a_s,b_s],linspace(min(tmp2[:,-1]),max(tmp2[:,-1]),2)),'-',color='#25597c',lw=3)

  p.xlabel('Cell mass')
  p.ylabel('Metabolic rate')
  f1.savefig('rate_vs_mass_0.02_and_0.03_SG2_68_G2_cells.pdf')
	def raw_2_mNm(self,calib,ticks):
		#ticks: raw sensor value
		#calib: yaml config calibration map
		if self.ctype=='sea_vertx_14bit' or self.ctype=='adc_poly':
			val=float(polyval(calib['cb_torque'],ticks))
			return val*float(calib['cb_scale'])+float(calib['cb_bias'])
		return 0.0
Ejemplo n.º 46
0
def session_fits(session_id, fit_length=50,
        output_filename='results/check_fit.dat',
        write=True):
    dbs = database.DBSession()
    session = dbs.query(database.Session).get(session_id)
    runs = session.experiments[0].runs

    data = []
    for run in runs:
        parameter = run.parameters['release_rate']
        fitness = run.get_objective('pi_fit')
        if fitness is not None:
            data.append((parameter, fitness))

    fit_sorted_data = sorted(data, key=operator.itemgetter(1))
    px, py = zip(*fit_sorted_data[:fit_length])
    coeffs, R, n, svs, rcond = scipy.polyfit(px, py, 2, full=True)

    x, y = zip(*sorted(data))

    peak = float(-coeffs[1] / (2 * coeffs[0]))
    fit = float(R/fit_length)

    if write:
        p = scipy.polyval(coeffs, x)
        header = '# Parabola peak at %s\n# Parabola R^2/n = %s\n'
        rows = zip(x, y, p)
        _small_writer(output_filename, rows, ['release_rate', 'pi_fit', 'parabola'],
                header=header % (peak, fit))

    return session.parameters.get('release_cooperativity'), peak, fit
	def raw_2_C(self,calib,ticks):
		print "type1:", self.ctype
		#ticks: adc sensor value
		#calib: yaml config calibration map
		if self.ctype=='adc_linear_3V3':
			mV=ticks/(M3EC_ADC_TICKS_MAX/3300.0)
			bias = 25.0-(calib['cb_mV_at_25C']/calib['cb_mV_per_C'])
			val = mV/calib['cb_mV_per_C'] + bias
		if self.ctype=='adc_linear_5V':
			mV=ticks/(M3EC_ADC_TICKS_MAX/5000.0)
			bias = 25.0-(calib['cb_mV_at_25C']/calib['cb_mV_per_C'])
			val = mV/calib['cb_mV_per_C'] + bias
		if self.ctype=='adc_linear_5V_ns':
			mV=ticks/(M3EC_ADC_TICKS_MAX/3300.0)
			bias = 25.0-(calib['cb_mV_at_25C']/calib['cb_mV_per_C'])
			val = mV/calib['cb_mV_per_C'] + bias
		if self.ctype=='adc_poly':
			val = float(polyval(calib['cb_temp'],ticks))
		if self.ctype=='temp_25C':
			return 25.0
		if self.ctype=='none':
			return 0.0
		if self.ctype == 'dsp_calib':
			val = ticks
			print "type2:", self.ctype
		return val*calib['cb_scale']+calib['cb_bias']
Ejemplo n.º 48
0
def session_fits(session_id,
                 fit_length=50,
                 output_filename='results/check_fit.dat',
                 write=True):
    dbs = database.DBSession()
    session = dbs.query(database.Session).get(session_id)
    runs = session.experiments[0].runs

    data = []
    for run in runs:
        parameter = run.parameters['release_rate']
        fitness = run.get_objective('pi_fit')
        if fitness is not None:
            data.append((parameter, fitness))

    fit_sorted_data = sorted(data, key=operator.itemgetter(1))
    px, py = zip(*fit_sorted_data[:fit_length])
    coeffs, R, n, svs, rcond = scipy.polyfit(px, py, 2, full=True)

    x, y = zip(*sorted(data))

    peak = float(-coeffs[1] / (2 * coeffs[0]))
    fit = float(R / fit_length)

    if write:
        p = scipy.polyval(coeffs, x)
        header = '# Parabola peak at %s\n# Parabola R^2/n = %s\n'
        rows = zip(x, y, p)
        _small_writer(output_filename,
                      rows, ['release_rate', 'pi_fit', 'parabola'],
                      header=header % (peak, fit))

    return session.parameters.get('release_cooperativity'), peak, fit
Ejemplo n.º 49
0
 def setBestFit(self):
     imin = self.chiSqrs.argmin()
     self.polynom = scipy.polyval(self.coeffs[imin][:-1], \
         self.periods)
     self.bestFit = self.polynom + self.coeffs[imin][-1] * \
         firstFareyVals(self.periods, self.periods[imin])
     self.minSqr  = self.chiSqrs[imin]
Ejemplo n.º 50
0
    def getDiameterQuantilesAlongSinglePath(self,path,G,counter=None):
        
        G=self.filterPathDiameters(path, G,self.__scale)
        x=[]
        y=[]
        length=0
        vprop=G.vertex_properties["vp"]
        for i in path:
            length+=1
            if vprop[i]['diameter'] > 0:
                x.append(length)
                y.append(vprop[i]['diameter'])
        coeffs=polyfit(x,y,1)

        besty =  polyval ( coeffs ,    x)
        
        self.__io.saveArray(x,self.__io.getHomePath()+'Plots/'+self.__io.getFileName()+'_DiameterX')
        self.__io.saveArray(y,self.__io.getHomePath()+'Plots/'+self.__io.getFileName()+'_DiameterY')

        l=len(y)-1
        l25=int(l*0.25)
        l50=int(l*0.5)
        l75=int(l*0.75)
        l90=int(l*0.90)
        
        d25=np.average(y[:l25])
        d50=np.average(y[l25:l50])
        d75=np.average(y[l50:l75])
        d90=np.average(y[l90:])

        
        self.__io.saveArray(y,self.__io.getHomePath()+'Plots/'+self.__io.getFileName()+'_DiameterHistoTP')
        
        return d25,d50,d75,d90
Ejemplo n.º 51
0
def scatter_fits(sys, scores, values, R, pval, aucname, show=False):
    auclabel=get_label(aucname)
    if sys=='apo':
        format='ko'
        label=get_label(sys)
    if sys=='bi':
        format='ro'
        label=get_label(sys)
    if sys=='car':
        format='bo'
        label=get_label(sys)
    if pval< 0.0001:
        pval=0.0001
    pylab.figure()
    pylab.plot(scores, values, format)
    (ar,br)=polyfit(scores, values, 1)
    xr=polyval([ar,br], scores)
    pylab.plot(scores,xr,'%s-' % format[0], label='R=%s, pval=%s' %
            (round(R,2), round(pval,4)))
    if aucname=='types':
        pylab.plot(range(0, 15), [0.5]*len(range(0,15)), 'k--', label='Random Disc.')
        pylab.ylim(0.3, 0.9)
    else:
        pylab.ylim(0.5, 1.0)
    pylab.xlim(0, 15)
    lg=pylab.legend()
    lg.draw_frame(False)
    pylab.title('%s States' % label)
    pylab.xticks(range(0, 15), [' ']*2+ ['inactive']+[' ']*(len(range(0,15))-6)+['active']+[' ']*2)
    pylab.xlabel('Pathway Progress')
    pylab.ylabel('%s Aucs' % (auclabel))
    pylab.savefig('%s_%saucs.png' % (sys, aucname), dpi=300)
    if show==True:
        pylab.show()
Ejemplo n.º 52
0
def show_dialog_len():
    import numpy as np
    from scipy import polyfit,polyval
    import matplotlib.pyplot as plt
    import LetsgoCorpus as lc
    import operator

    corpus = lc.Corpus('D:/Data/training',prep=True)
    
    l = [];avg_cs = []
    for dialog in corpus.dialogs():
        if len(dialog.turns) > 40:
            continue
        l.append(len(dialog.turns))
        avg_cs.append(reduce(operator.add,map(lambda x:x['CS'],dialog.turns))/l[-1])
    (ar,br) = polyfit(l,avg_cs,1)
    csr = polyval([ar,br],l)
    plt.plot(l,avg_cs,'g.',alpha=0.75)
    plt.plot(l,csr,'r-',alpha=0.75,linewidth=2)
    plt.axis([0,50,0,1.0])
    plt.xlabel('Dialog length (turn)')
    plt.ylabel('Confidence score')
    plt.title('Dialog length vs. Confidence score')
    plt.grid(True)
    plt.savefig('img/'+'Dialog length vs Confidence score'+'.png')
#    plt.show()
    plt.clf()
Ejemplo n.º 53
0
def plot_ci_bootstrap(xs, ys, resid, nboot=500, ax=None):
    """Return an axes of confidence bands using a bootstrap approach.

    Notes
    -----
    The bootstrap approach iteratively resampling residuals.
    It plots `nboot` number of straight lines and outlines the shape of a band.
    The density of overlapping lines indicates improved confidence.

    Returns
    -------
    ax : axes
        - Cluster of lines
        - Upper and Lower bounds (high and low) (optional)  Note: sensitive to outliers

    References
    ----------
    .. [1] J. Stults. "Visualizing Confidence Intervals", Various Consequences.
       http://www.variousconsequences.com/2010/02/visualizing-confidence-intervals.html

    """
    if ax is None:
        ax = plt.gca()

    bootindex = scipy.random.randint

    for _ in range(nboot):
        resamp_resid = resid[bootindex(0, len(resid) - 1, len(resid))]
        # Make coeffs of for polys
        pc = scipy.polyfit(xs, ys + resamp_resid, 1)                  
        # Plot bootstrap cluster
        ax.plot(xs, scipy.polyval(pc, xs), "b-", linewidth=2, alpha=3.0 / float(nboot))

    return ax
Ejemplo n.º 54
0
def calculatePolyfit(x, y):
    """ Calculate a linear regression using polyfit instead """

    # FIXME(pica) This doesn't work for large ranges (tens of orders of
    # magnitude). No idea why as the fixRange() function above should make
    # all values greater than one. The trouble mainly seems to happen when
    # log10(min(x)) negative.

    # Check the smallest value in x or y isn't below 2x10-7 otherwise
    # we hit numerical instabilities.
    xFactorFix, xFactor = False, 0
    yFactorFix, yFactor = False, 0
    minX = min(x)
    minY = min(y)
    if minX < 2e-7:
        x, xFactor, xFactorFix = fixRange(x)
    if minY < 2e-7:
        y, yFactor, yFactorFix = fixRange(y)

    (ar, br) = polyfit(x, y, 1)
    yf = polyval([ar, br], x)
    xf = x

    if xFactorFix:
        xf = xf / 10**xFactor
    if yFactorFix:
        yf = yf / 10**yFactor

    return xf, yf
Ejemplo n.º 55
0
    def objective_scale(p, Ax, data, spec_wave, fit_mask, sivarf, Nphot, Next, return_coeffs):
        """
        Objective function for fitting for a scale term between photometry and
        spectra
        """
        import scipy.optimize
        from scipy import polyval

        scale = np.ones(Ax.shape[1])
        scale[:-Nphot] = polyval(p[::-1]/10., (spec_wave-1.e4)/1000.)
        AxT = Ax*scale

        # Remove scaling from background component
        for i in range(Next):
            AxT[i, :] /= scale

        #AxT = AxT[:,fit_mask].T
        # (Ax*scale)[:,fit_mask].T
        #AxT[:,:Next] = 1.

        coeffs, rnorm = scipy.optimize.nnls(AxT[:, fit_mask].T, data[fit_mask])

        full = np.dot(coeffs, AxT/sivarf)
        resid = data/sivarf - full  # - background
        chi2 = np.sum(resid[fit_mask]**2*sivarf[fit_mask]**2)

        #print('{0} {1}'.format(p, chi2))

        if return_coeffs:
            return coeffs, full, resid, chi2, AxT
        else:
            return chi2
    def acceptable_fit(self):
        ordered_y, ordered_x = zip(
            *sorted(self.pairs, key=operator.itemgetter(1)))
        parabola_y = scipy.polyval(self.coeffs, ordered_x)

        if self.plot:
            import matplotlib.pyplot
            pyplot = matplotlib.pyplot

            pyplot.ion()
            pyplot.draw()

            a = pyplot.subplot(1, 1, 1)
            a.clear()

            #            a.set_xscale('log')
            #            a.set_yscale('log')

            pyplot.plot(ordered_x, ordered_y, 'ro')
            pyplot.plot(ordered_x, parabola_y, 'b-')

            pyplot.axvline(self.parabola_peak, 0, 1, linestyle=':', color='g')

            pyplot.draw()

        return ((self.last_parabola_peak is not None)
                and ((abs(self.parabola_peak - self.last_parabola_peak) /
                      self.parabola_peak) < self.parameter_tolerance))
def linearRegression(histogram):
    x = []
    y = []
    for i in range(256):
        for k in range(256):
            if histogram.GetBinContent(i, k) != 0:
                x.append(i)
                y.append(k)
    (ar, br) = polyfit(x, y, 1)
    xr = polyval([ar, br], x)
    err = sqrt(sum((xr - y)**2) / len(y))
    if abs(ar) > 1.0:
        (ar, br) = polyfit(y, x, 1)
        xr = polyval([ar, br], y)
        err = sqrt(sum((xr - x)**2) / len(y))
    return err
Ejemplo n.º 58
0
def show_correlation_num_comments_and_socre():
	f = open(os.getcwd()+"\\data\\all_data.json","r")
	data = json.loads(f.read())
	f.close()
	#for data,sr in data:
	x = []
	y = []
	for item in data['children']:
		x.append(item['num_comment'])
		y.append(item['ups']-item['downs'])
	slope, intercept, r, p, std = stats.linregress(x,y)
	ry = polyval([slope, intercept], x)

	print stats.pearsonr(x,y)

	print(slope, intercept, r, p, std)
		#print(ry)
	plot(x,y, 'k.')
	plot(x,ry, 'r.-')
	title("all data")
	pylab.xlabel('num_comments')
	pylab.ylabel('score')
	#legend(['original', 'regression'])

	show()
Ejemplo n.º 59
0
def ar1fit(ts):
    '''
    Fits an AR(1) model to the time series data ts.  AR(1) is a
    linear model of the form
       x_t = beta * x_{t-1} + c + e_{t-1}
    where beta is the coefficient of term x_{t-1}, c is a constant
    and x_{t-1} is an i.i.d. noise term.  Here we assume that e_{t-1}
    is normally distributed. 
    Returns the tuple (beta, c, sigma).
    '''
    # Fitting AR(1) entails finding beta, c, and the noise term.
    # Beta is well approximated by the coefficient of OLS regression
    # on the lag of the data with itself.  Since the noise term is
    # assumed to be i.i.d. and normal, we must only estimate sigma,
    # the standard deviation.

    # Estimate beta
    x = ts[0:-1]
    y = ts[1:]
    p = sp.polyfit(x, y, 1)
    beta = p[0]

    # Estimate c
    c = sp.mean(ts) * (1 - beta)

    # Estimate the variance from the residuals of the OLS regression.
    yhat = sp.polyval(p, x)
    variance = sp.var(y - yhat)
    sigma = sp.sqrt(variance)

    return beta, c, sigma
Ejemplo n.º 60
0
 def setBestFit(self):
     imin = self.chiSqrs.argmin()
     self.polynom = scipy.polyval(self.coeffs[imin][:-1], \
         self.periods[:-1])
     self.bestFit = self.polynom + self.coeffs[imin][-1] * \
         1. / sqrtMN(self.periods[imin], self.periods)
     self.minSqr  = self.chiSqrs[imin]