Ejemplo n.º 1
0
  def linear_regression_numpy(xs, ys):
    # pylint: disable=wrong-import-order, wrong-import-position
    import numpy as np
    from numpy import sum
    xs = np.asarray(xs, dtype=float)
    ys = np.asarray(ys, dtype=float)

    # First do a simple least squares fit for y = a + bx over all points.
    b, a = np.polyfit(xs, ys, 1)

    n = len(xs)
    if n < 10:
      return a, b
    else:
      # Refine this by throwing out outliers, according to Cook's distance.
      # https://en.wikipedia.org/wiki/Cook%27s_distance
      sum_x = sum(xs)
      sum_x2 = sum(xs**2)
      errs = a + b * xs - ys
      s2 = sum(errs**2) / (n - 2)
      if s2 == 0:
        # It's an exact fit!
        return a, b
      h = (sum_x2 - 2 * sum_x * xs + n * xs**2) / (n * sum_x2 - sum_x**2)
      cook_ds = 0.5 / s2 * errs**2 * (h / (1 - h)**2)

      # Re-compute the regression, excluding those points with Cook's distance
      # greater than 0.5, and weighting by the inverse of x to give a more
      # stable y-intercept (as small batches have relatively more information
      # about the fixed overhead).
      weight = (cook_ds <= 0.5) / xs
      b, a = np.polyfit(xs, ys, 1, w=weight)
      return a, b
Ejemplo n.º 2
0
def var_fit(var_mean,var_name):
	size_list = []
	throuput_list = []
	for var in var_mean:
		get_per,set_per = normal_ops
		get_per_s = "%.2f" % get_per
		set_per_s = "%.2f" % set_per
		if (var_name=='key'):
			upperlimit = 64
			filename = 'data_collected/'+str(var)+'_'+str(normal_value)+'_'+str(normal_hash)+'_'+get_per_s+'_'+set_per_s+'.out'
		elif (var_name=='value'):
			upperlimit = 2048
			filename = 'data_collected/'+str(normal_key)+'_'+str(var)+'_'+str(normal_hash)+'_'+get_per_s+'_'+set_per_s+'.out'
		with open(filename,'r') as f:
			load, time = zip(*[(int(line.strip().split(',')[0]), float(line.strip().split(',')[1])) for line in f])			
			z = np.polyfit(time,load,2)
			p = np.poly1d(z)
			size = var
			throuput = p(1)
			size_list.append(size)
			throuput_list.append(throuput)
	#Record raw data point
	with open ('data_collected/'+var_name+'_throuput','w') as g:
		for size,throuput in zip(size_list,throuput_list):
			g.write(str(size)+','+str(throuput)+'\n')	

	#Recide fit data point
	z = np.polyfit(np.array(size_list),np.array(throuput_list),1)
	p = np.poly1d(z)
	size_fit_list = [i for i in range(1,upperlimit)]
	throuput_fit_list = [p(i) for i in size_fit_list]
	with open ('data_collected/'+var_name+'_throuput_fit','w') as g:
		for size,throuput in zip(np.array(size_fit_list),np.array(throuput_fit_list)):
			g.write(str(size)+','+str(throuput)+'\n')
	var_plot(var_name,list(z))
Ejemplo n.º 3
0
def line_of_best_fit():
    PDout_Weights=[]
    PDin_Weights=[]

    for num in my_range(0, len(x), 1000):
        x_d = x[num:num+1000]
        pdin_d = pdin[num:num+1000]
        pdout_d = pdout[num:num+1000]

        par_pdin = np.polyfit(x_d, pdin_d, 1, full=True)
        par_pdout = np.polyfit(x_d, pdout_d, 1, full=True)

        slope_pdin = par_pdin[0][0]
        slope_pdout = par_pdout[0][0]

        intercept_pdin = par_pdin[0][1]
        intercept_pdout = par_pdout[0][1]

        line_equation_out={}
        line_equation_out['k'] = (x_d[0], x_d[len(x_d)-1])
        line_equation_out['w1_out']= slope_pdout
        line_equation_out['w0_out']= intercept_pdout
        PDout_Weights.append(line_equation_out)

        line_equation_in={}
        line_equation_in['k'] = (x_d[0], x_d[len(x_d)-1])
        line_equation_in['w1_in']= slope_pdin
        line_equation_in['w0_in']= intercept_pdin
        PDin_Weights.append(line_equation_in)

    return (PDout_Weights, PDin_Weights)
Ejemplo n.º 4
0
 def test_polyfit(self):
     "Tests polyfit"
     # On ndarrays
     x = np.random.rand(10)
     y = np.random.rand(20).reshape(-1, 2)
     assert_almost_equal(polyfit(x, y, 3), np.polyfit(x, y, 3))
     # ON 1D maskedarrays
     x = x.view(MaskedArray)
     x[0] = masked
     y = y.view(MaskedArray)
     y[0, 0] = y[-1, -1] = masked
     #
     (C, R, K, S, D) = polyfit(x, y[:, 0], 3, full=True)
     (c, r, k, s, d) = np.polyfit(x[1:], y[1:, 0].compressed(), 3, full=True)
     for (a, a_) in zip((C, R, K, S, D), (c, r, k, s, d)):
         assert_almost_equal(a, a_)
     #
     (C, R, K, S, D) = polyfit(x, y[:, -1], 3, full=True)
     (c, r, k, s, d) = np.polyfit(x[1:-1], y[1:-1, -1], 3, full=True)
     for (a, a_) in zip((C, R, K, S, D), (c, r, k, s, d)):
         assert_almost_equal(a, a_)
     #
     (C, R, K, S, D) = polyfit(x, y, 3, full=True)
     (c, r, k, s, d) = np.polyfit(x[1:-1], y[1:-1, :], 3, full=True)
     for (a, a_) in zip((C, R, K, S, D), (c, r, k, s, d)):
         assert_almost_equal(a, a_)
Ejemplo n.º 5
0
def meanclip3(xx,yy,slope, clipsig=3.0, maxiter=5, converge_num=0.1, verbose=0):
    from numpy import array, polyfit
    import numpy
    xx=array(xx)
    yy=array(yy)
    xx0=array(xx[:])
    yy0=array(yy[:])
    ct=len(yy)
    iter = 0; c1 = 1.0 ; c2 = 0.0
    while (c1 >= c2) and (iter < maxiter):
        lastct = ct
        pol = polyfit(xx0,yy0,1,full=True) ###
        mean0=pol[0][1]
        slope=pol[0][0]
        sig=numpy.std(yy0-mean0-slope*xx0)
        wsm = numpy.where( abs(yy0-xx0*slope) < mean0+clipsig*sig )
        ct = len(wsm[0])
        if ct > 0:
            xx0=xx0[wsm]
            yy0=yy0[wsm]
        c1 = abs(ct - lastct)
        c2 = converge_num * lastct
        iter += 1
# End of while loop
    pol = polyfit(xx0,yy0,1,full=True) ###
    mean0=pol[0][1]
    slope=pol[0][0]
    sig=numpy.std(yy0-mean0-slope*xx0)
    if verbose: pass
    return mean0, sig,slope,yy0,xx0
Ejemplo n.º 6
0
def getParams(inputF,cosineScore,error,lib,minimumFeature):
    # parse the argument
    def search(lib,row):
        if row['INCHI'] in lib:
            return lib[row['INCHI']]
        return -1
    df = pd.read_csv(inputF,sep='\t')
    # filter out by cosinScore
    df = df[df.MQScore > cosineScore]
    # search for ki average
    lib_df = pd.read_csv(lib)
    lib_df = lib_df[lib_df.polarity.str.contains('non-polar')]
    lib = pd.Series(lib_df.ki_nonpolar_average.values,index=lib_df.INCHI.values).to_dict()
    df['ki_average'] = df.apply(lambda row:search(lib,row),axis = 1)
    df = df[df.ki_average>0]

    #clean the data for polynomial fitting:
    df = df[df['ki_average']>500]
    df = df[df.RT_Query<800]
    if len(df) < minimumFeature:
        return None
    #simply find the polynomial fitting
    p_a = np.polyfit(df.RT_Query,df.ki_average,2)
    # we fit it twice to have more robust results:
    df =df[abs(df.ki_average-np.polyval(p_a,df.RT_Query))/np.polyval(p_a,df.RT_Query)<error]
    p_b = np.polyfit(df.RT_Query,df.ki_average,2)
    return p_b
Ejemplo n.º 7
0
def showExamplePolyFit(xs,ys,fitDegree1 = 1,fitDegree2 = 2):
    pylab.figure()    
    pylab.plot(xs,ys,'r.',ms=2.0,label = "measured")

    # poly fit to noise
    coeeff = numpy.polyfit(xs, ys, fitDegree1)

    # Predict the curve
    pys = numpy.polyval(numpy.poly1d(coeeff), xs)

    se = mse(ys, pys)
    r2 = rSquared(ys, pys)

    pylab.plot(xs,pys, 'g--', lw=5,label="%d degree fit, SE = %0.10f, R2 = %0.10f" %(fitDegree1,se,r2))

    # Poly fit to noise
    coeeffs = numpy.polyfit(xs, ys, fitDegree2)

    # Predict the curve
    pys = numpy.polyval(numpy.poly1d(coeeffs), xs)

    se = mse(ys, pys)
    r2 = rSquared(ys, pys)

    pylab.plot(xs,pys, 'b--', lw=5,label="%d degree fit, SE = %0.10f, R2 = %0.10f" %(fitDegree2,se,r2))

    pylab.legend()
Ejemplo n.º 8
0
	def plot(self):
		self.worksheet()
		fig, ax = plt.subplots()
		axes = [ax, ax.twinx(), ax.twinx()]
		fig.subplots_adjust(right=0.75)
		axes[-1].spines['right'].set_position(('axes', 1.2))
		colors = ('Green', 'Red', 'Blue')
		cur=np.poly1d(np.polyfit(self.DISCHARGE,self.current,2))
		eff=np.poly1d(np.polyfit(self.DISCHARGE,self.EFFICIENCY,2))
		head=np.poly1d(np.polyfit(self.DISCHARGE,self.del_head,2))
		dis=np.linspace(self.DISCHARGE[0],self.DISCHARGE[9],500)
		#Head Axis Plotting
		axes[2].plot(dis,eff(dis), color=colors[0])
		axes[2].plot(self.DISCHARGE,self.EFFICIENCY,'ko',color=colors[0])
		axes[2].set_ylabel('Efficiency (%)', color=colors[0])
		axes[2].tick_params(axis='y', colors=colors[0])
		#Current Axis Plotting
		axes[1].plot(dis,cur(dis), color=colors[1])
		axes[1].plot(self.DISCHARGE,self.current,'k+',color=colors[1])
		axes[1].set_ylabel('Current (A)', color=colors[1])
		axes[1].tick_params(axis='y', colors=colors[1])
		#Efficiency Axis Plotting
		axes[0].plot(dis,head(dis), color=colors[2])
		axes[0].plot(self.DISCHARGE,self.del_head,'kx',color=colors[2])
		axes[0].set_ylabel('Head (m)', color=colors[2])
		axes[0].tick_params(axis='y', colors=colors[2])
		axes[0].set_xlabel('Discharge in lps')
		plt.grid()
		plt.show()
		self.DISCHARGE = []
		self.EFFICIENCY= []
Ejemplo n.º 9
0
def polysmooth(x,y,z,NI,NJ):

    # size of the incoming array
    Nx, Ny = np.shape(z)
    x1d = x[:,0]
    y1d = y[0,:]

    # Get the C coefficients
    #NI = 7
    CIj = np.zeros((NI,Ny))
    for j in range (Ny):
        CIj[:,j] = np.flipud(np.polyfit(x1d,z[:,j],NI-1))

    # Get the D coefficients
    #NJ = 7
    DIJ = np.zeros((NI,NJ))
    for I in range (NI):
        DIJ[I,:] = np.flipud(np.polyfit(y1d,CIj[I,:],NJ-1))
    
    # Reconstruct the entire surface
    zsmooth = np.zeros((Nx,Ny))
    for I in range(NI):
        for J in range(NJ):
            zsmooth += DIJ[I,J]*x**I*y**J

    return zsmooth
Ejemplo n.º 10
0
def extractfeatures(buf):
    global yp1, yp2, yp3, x
    x = range(len(buf))
    features = []
    link1 = []
    link2 = []
    link3 = []
    for p in buf:
        link1.append(p[0])
        link2.append(p[1])
        link3.append(p[2])
    features.append(avgdelta(link1))
    features.append(avgdelta(link2))
    features.append(avgdelta(link3))
    features.append(abs(link1[0]-link1[len(link1)-1]))
    features.append(abs(link2[0]-link2[len(link2)-1]))
    features.append(abs(link3[0]-link3[len(link3)-1]))
    z1 = np.polyfit(x,link1,2)
    z2 = np.polyfit(x,link2,2)
    z3 = np.polyfit(x,link3,2)
    abslink1 = [abs(z1[i]) for i in range(len(z1)-1)]
    abslink2 = [abs(z2[i]) for i in range(len(z2)-1)]
    abslink3 = [abs(z3[i]) for i in range(len(z3)-1)]
    for a in abslink1:
        features.append(a)
    for a in abslink2:
        features.append(a)
    for a in abslink3:
        features.append(a)
    yp1 = np.poly1d(z1)
    yp2 = np.poly1d(z2)
    yp3 = np.poly1d(z3)
    return features
Ejemplo n.º 11
0
def test_run():
    # Read data
    dates = pd.date_range('2009-01-01', '2012-12-31')  # one month only
    symbols = ['SPY','XOM','GLD']
    df = get_data(symbols, dates)
    plot_data(df)

    # Compute daily returns
    daily_returns = compute_daily_returns(df)
    #plot_data(daily_returns, title="Daily returns", ylabel="Daily returns")

    # Scatterplot SPY vs XOM
    daily_returns.plot(kind='scatter',x='SPY',y='XOM')
    beta_XOM,alpha_XOM=np.polyfit(daily_returns['SPY'],daily_returns['XOM'],1)
    print "beta_XOM= ",beta_XOM
    print "alpha_XOM= ",alpha_XOM
    plt.plot(daily_returns['SPY'],beta_XOM*daily_returns['SPY']+alpha_XOM,'-',color='r')
    plt.grid()
    plt.show()

    # Scatterplot SPY vs GLD
    daily_returns.plot(kind='scatter',x='SPY',y='GLD')
    beta_GLD,alpha_GLD=np.polyfit(daily_returns['SPY'],daily_returns['GLD'],1)
    print "beta_GLD= ",beta_GLD
    print "alpha_GLD= ",alpha_GLD
    plt.plot(daily_returns['SPY'],beta_GLD*daily_returns['SPY']+alpha_GLD,'-',color='r')
    plt.grid()
    plt.show()

    # Calculate correlation coefficient
    print daily_returns.corr(method='pearson')
Ejemplo n.º 12
0
def next_start(X, F, G, i, j,scandirec=0,radius=1.,method='default'):
        radius*=np.sqrt(abs(F[i-1]**2+G[i-1]**2))
        df=j*radius*np.cos(scandirec)
        dg=j*radius*np.sin(scandirec)
        if 'default' in method: #use old root position as starting point, plus potential scandirec modifications
                fval=F[i]+df
                gval=G[i]+dg
        elif 'predict' in method: #use first order prediction of the root's new position
                if i>1:
                        p=np.polyfit(X[i-2:i+1],F[i-2:i+1],2)
                        fval=p[0]*X[i+1]**2+p[1]*X[i+1]+p[2]+df
                        p=np.polyfit(X[i-2:i+1],G[i-2:i+1],2)
                        gval=p[0]*X[i+1]**2+p[1]*X[i+1]+p[2]+dg                     
                elif i==1:
                        fval=F[i]+np.diff(F)[i-1]/np.diff(X)[i-1]*np.diff(X)[i]+df
                        gval=G[i]+np.diff(G)[i-1]/np.diff(X)[i-1]*np.diff(X)[i]+dg
                else:
                        fval=F[i]
                        gval=G[i]
                if 'ldr' in method:
                        gval=-0.75*abs(G[i]+np.diff(G)[i-1]/np.diff(X)[i-1]*np.diff(X)[i])+dg
        elif 'ldr' in method: #lower damping rate (designed to prefer more weakly damped modes)
                gval=-0.75*abs(G[i])+dg #always aim for lower damping rate so the less damped mode will be chosen
                fval=F[i]+0.03*F[i]*np.sign(np.diff(X)[-1])+df #include slight frequency shift (depending on the scan direction) in order to help jump across resonances
        #lower limit for damping rates, to prevent runaway to extremely small values 
        if abs(gval)<1e-13: gval=np.sign(gval)*1e-13
        return [fval,gval] 
def fit(data, nz):
    x = [0 for iz in range(0, nz, 1)]
    y = [0 for iz in range(0, nz, 1)]
    z = [iz for iz in range(0, nz, 1)]

    for iz in range(0, nz, 1):
        x[iz], y[iz] = ndimage.measurements.center_of_mass(np.array(data[:,:,iz]))

    #Fit centerline in the Z-X plane using polynomial function
    print '\nFit centerline in the Z-X plane using polynomial function...'
    coeffsx = np.polyfit(z, x, 1)
    polyx = np.poly1d(coeffsx)
    x_fit = np.polyval(polyx, z)
    print 'x_fit'
    print x_fit

    #Fit centerline in the Z-Y plane using polynomial function
    print '\nFit centerline in the Z-Y plane using polynomial function...'
    coeffsy = np.polyfit(z, y, 1)
    polyy = np.poly1d(coeffsy)
    y_fit = np.polyval(polyy, z)


    #### 3D plot
    fig1 = plt.figure()
    ax = Axes3D(fig1)
    ax.plot(x,y,z,zdir='z')
    ax.plot(x_fit,y_fit,z,zdir='z')
    plt.show()
    return x, y, x_fit, y_fit
Ejemplo n.º 14
0
def DFA(indata,scale,q,m):
    y = np.cumsum(indata-indata.mean())             #Equation 1 in paper
    RMSt = []                                       #Temporary RMS variable: contain F(s,v) value
    F = []                                          #F: Fluctuation function
    N = len(indata)
    print 'len indata: ',N
    for i in range(len(scale)):
        ns = int(np.floor(len(y)/scale[i]))         #number of segments: Ns = int(N/s)
        for v in range(2*ns):
            if v < ns:
                index_start = v*scale[i]
                index_end = (v+1)*scale[i]
            else:
                index_start = N - (v-ns)*scale[i]-scale[i]
                index_end = N - (v-ns)*scale[i]
            index = range(index_start,index_end)    #calculate index for each segment
            yv = y[index_start:index_end]           #Extract values of time series for each segments
            c = np.polyfit(index,yv,m)
            fit = np.polyval(c,index)
            RMSt.append(math.sqrt(np.mean((yv-fit)**2))) #Equation 2. But calculating only F(v,s) not F(v,s)**2
        RMS = np.asarray(RMSt)                      #Convert RMSt to array
        qRMS = RMS**q
        F.append(np.mean(qRMS)**(1.0/q))              #Equation 4
        del RMSt[:]                                 #Reset RMSt[:]
    C = np.polyfit(np.log2(scale),np.log2(F),1)
    H = C[0]                                        #Hurst parameter
    return (H,scale,F)
Ejemplo n.º 15
0
def dfa(X, Ave = None, L = None, sampling= 1):
    """
    WIP on this function. It is basically copied and pasted from [PYEEG]_, without verification of the maths or unittests.
    """
    X = np.array(X)
    if Ave is None:
        Ave = np.mean(X)
    Y = np.cumsum(X)
    Y -= Ave
    if not L:
        max_power = np.int(np.log2(len(X)))-4
        L = X.size / 2 ** np.arange(4,max_power)
    if len(L)<2:
        raise Exception("Too few values for L. Time series too short?")
    F = np.zeros(len(L)) # F(n) of different given box length n

    for i,n in enumerate(L):
        sampled = 0
        for j in xrange(0,len(X) -n ,n):

            if np.random.rand() < sampling:
                F[i] += np.polyfit(np.arange(j,j+n), Y[j:j+n],1, full=True)[1]
                sampled += 1
        if sampled > 0:
            F[i] /= float(sampled)

    LF = np.array([(l,f) for l,f in zip(L,F) if l>0]).T

    F = np.sqrt(LF[1])
    Alpha = np.polyfit(np.log(LF[0]), np.log(F),1)[0]
    return Alpha
Ejemplo n.º 16
0
def corrNonlinGetPar(linearDet,nonLinearDet,order=2,data_0=0,
    correct_0=0,plot=False,returnCorrectedDet=False):
  """ Find parameters for non linear correction
    *linearDet* should be an 1D array of the detector that is linear
    *nonLinearDet* is the detector that is sussposed to be none linear
    *data_0" is an offset to use for the data (used only if plotting)"
    *correct_0* offset of the "linear detector"""
  p =  np.polyfit(nonLinearDet,linearDet,order)
  p[-1] = p[-1]-correct_0
  if plot:
    d = corrNonlin(nonLinearDet,p,data_0=data_0,correct_0=correct_0)
    plt.plot(linearDet,nonLinearDet,".",label="before correction")
    plt.plot(linearDet,d,".",label="after correction")
    poly_lin = np.polyfit(linearDet,d,1)
    xmin = min(linearDet.min(),0)
    xtemp = np.asarray( (xmin,linearDet.max()) )
    plt.plot(xtemp,np.polyval(poly_lin,xtemp),label="linear fit")
    plt.plot(linearDet,d-np.polyval(poly_lin,linearDet),
       ".",label="difference after-linear")
    plt.xlabel("linearDet")
    plt.ylabel("nonLinearDet")
    plt.legend()
  if order>=2 and p[-3]<0:
    log.warn("corrNonlinGetPar: consistency problem, second order coefficient should \
    be > 0, please double check result (plot=True) or try inverting the data and the\
    correct arguments")

  if returnCorrectedDet:
    return corrNonlin(nonLinearDet,p,data_0=data_0,correct_0=correct_0)
  else:
    return p
Ejemplo n.º 17
0
def calc_def_potential(info_file):
	data =[]
	counter = 0
	with open(info_file, 'r') as energy_info:
#		enfo.seek(0)
#		next(enfo)
			for row in energy_info:
				counter += 1
				if counter > 1:
					data.append([float(i) for i in row.split()])
	
	data = np.mat(data)
	eqidx = np.where(data[:,0]==0)[0]
#	eqidx = abs(int(data[0,0])) + 1
	eqvol = data[eqidx,1]
	data[:,1] -= eqvol 
	data[:,4] -= data[:,3]
	data[:,5] -= data[:,3]
	
	x = np.array(np.ravel(data[:,1]))
	y = np.array(np.ravel(data[:,4]))
	z = np.polyfit(x, y, 6)
	VBM_deformation = abs(z[-2]*eqvol)
	
	x = np.array(np.ravel(data[:,1]))
	y = np.array(np.ravel(data[:,5]))
	z = np.polyfit(x, y, 6)
	CBM_deformation = abs(z[-2]*eqvol)
	
	return float(VBM_deformation), float(CBM_deformation)
Ejemplo n.º 18
0
def measure_dA_dphi_fir(lock, li, tp, dA_dphi_before, dA_dphi_after):
    """Correct for impulsive phase shift at end of pulse time."""

    i_tp = np.arange(lock.t.size)[lock.t < tp][-1]
    # Use 20 data points for interpolating; this is slightly over one
    # cycle of our oscillation
    m = np.arange(-10, 11) + i_tp
    # This interpolator worked reasonably for similar, low-frequency sine waves
    interp = interpolate.KroghInterpolator(lock.t[m], lock.x[m])
    x0 = interp(tp)[()]
    # We only need t0 approximately; the precise value of f0 doesn't matter very much.
    t0 = li.t[(li.t < tp)][-1]
    f0 = li.df[(li.t < tp)][-1] + li.f0(t0)
    v0 = interp.derivative(tp)[()]
    x2 = v0 / (2*np.pi*f0)

    phi0 = np.arctan2(-x2, x0)

    ml = masklh(li.t, tp-t_fit, tp)
    mr = masklh(li.t, tp, tp + t_fit)

    A = abs(li.z_out)
    phi = np.unwrap(np.angle(li.z_out))/(2*np.pi)

    mbAl = np.polyfit(li.t[ml], A[ml], 1)
    mbAr = np.polyfit(li.t[mr], A[mr], 1)

    mb_phi_l = np.polyfit(li.t[ml], phi[ml], 1)
    mb_phi_r = np.polyfit(li.t[mr], phi[mr], 1)

    dA = np.polyval(mbAr, tp) - np.polyval(mbAl, tp)
    dphi = np.polyval(mb_phi_r, tp) - np.polyval(mb_phi_l, tp)

    return phi0, dA, dphi
Ejemplo n.º 19
0
    def splitBimodal(self, x, y, largepoly=30):
        p = np.polyfit(x, y, largepoly) # polynomial coefficients for fit

        extrema = np.roots(np.polyder(p))
        extrema = extrema[np.isreal(extrema)]
        extrema = extrema[(extrema - x[1]) * (x[-2] - extrema) > 0] # exclude the endpoints due false maxima during fitting
        try:
            root_vals = [sum([p[::-1][i]*(root**i) for i in range(len(p))]) for root in extrema]
            peaks = extrema[np.argpartition(root_vals, -2)][-2:] # find two peaks of bimodal distribution

            mid, = np.where((x - peaks[0])* (peaks[1] - x) > 0)
             # want data points between the peaks
        except:
            warnings.warn("Peak finding failed!")
            return None

        try:
            p_mid = np.polyfit(x[mid], y[mid], 2) # fit middle section to a parabola
            midpoint = np.roots(np.polyder(p_mid))[0]
        except:
            warnings.warn("Polynomial fit between peaks of distribution poorly conditioned. Falling back on using the minimum! May result in inaccurate split determination.")
            if len(mid) == 0:
                return None

            midx = np.argmin(y[mid])
            midpoint = x[mid][midx]

        return midpoint
Ejemplo n.º 20
0
    def estimate_linear_fit(self, data, split_b, less_than=True):
        """Estimate a linear fit by taking log of data.

        Parameters
        ----------
        data : array
            An array containing the data to be fit

        split_b : float
            The b value to split the data

        less_than : bool
            If True, splitting occurs for bvalues less than split_b

        Returns
        -------
        S0 : float
            The estimated S0 value. (intercept)

        D : float
            The estimated value of D.
        """
        if less_than:
            bvals_split = self.gtab.bvals[self.gtab.bvals <= split_b]
            D, neg_log_S0 = np.polyfit(bvals_split,
                                       -np.log(data[self.gtab.bvals <=
                                                    split_b]), 1)
        else:
            bvals_split = self.gtab.bvals[self.gtab.bvals >= split_b]
            D, neg_log_S0 = np.polyfit(bvals_split,
                                       -np.log(data[self.gtab.bvals >=
                                                    split_b]), 1)

        S0 = np.exp(-neg_log_S0)
        return S0, D
Ejemplo n.º 21
0
def bsa_count(diams, style='single'):
    ''' Returns bsa molecules per unit surface area given a particle diameter,
    and a fitting style.  Essentially just returns the y value of a fit curve
    given x (diamter).'''

    if style=='single':
        z=np.polyfit(x, y, 1)  
        p=np.poly1d(z)        
        return p(diams)
                        
    elif style=='dual':
        dout=[]

        x1=x[0:2] #Make x[0:2]
        y1=y[0:2]# ditto
        z1=np.polyfit(x1, y1, 1)  
        p1=np.poly1d(z1)         
            
        x2=x[1:3]   #Make x[1:3]
        y2=y[1:3] # ditto
        z2=np.polyfit(x2, y2, 1)  
        p2=np.poly1d(z2)         
                
        for d in diams:
            if d < x[1]:  #If d < 30
                dout.append(p1(d))
            else:
                dout.append(p2(d))
        return dout
         
    else:
        raise AttributeError('syle must be "single" or "dual", not %s'%style)
Ejemplo n.º 22
0
    def get_spectrum(self,sample_str,x_scale = 'energy'):

        #normalize the signals to the tube current
        direct_beam = self.scan_groups['direct_beam']
        sample = self.scan_groups[sample_str]

        theta = direct_beam['signal']['theta']

        I0 = direct_beam['signal']['intensity']/direct_beam['signal']['tube_current']
        I0_err = direct_beam['signal']['intensity_error']/direct_beam['signal']['tube_current']

        I = sample['signal']['intensity']/sample['signal']['tube_current']
        I_err = sample['signal']['intensity_error']/sample['signal']['tube_current']

        theta_I0bg = direct_beam['background']['theta']
        theta_Ibg = sample['background']['theta']

        I0_bg =  direct_beam['background']['intensity']/direct_beam['background']['tube_current']
        I_bg =  sample['background']['intensity']/sample['background']['tube_current']

        #fit backgrounds
        p0 = np.polyfit(theta_I0bg,I0_bg,self.background_fit_order)
        p = np.polyfit(theta_Ibg,I_bg,self.background_fit_order)

        #compute mux
        mux = -np.log((I-np.polyval(p,theta))/(I0-np.polyval(p0,theta)))
        mux_error = np.sqrt((I0_err/I0)**2 + (I_err/I)**2)

        if x_scale == 'theta':
            return theta+self.theta_calibration, mux, mux_error
        else:
            return energy(theta+self.theta_calibration,*self.analyser), mux, mux_error
Ejemplo n.º 23
0
def fitPlotFW(nGraphs, nNodesIni, nNodesFin, step,sparseFactor =0.25):

    times1 = timeDijkstraMAllPairs(nGraphs, nNodesIni, nNodesFin, step, sparseFactor)
    times2 = timeFloydWarshall(nGraphs, nNodesIni, nNodesFin, step, sparseFactor)

    x = []
    for z in range(nNodesIni, nNodesFin, step):
        x.append(z)

    ydata1 = np.polyfit(x, times1, 2)
    f1 = np.poly1d(ydata1)
    yfit1 = np.linspace(nNodesIni, nNodesFin,nNodesFin - nNodesIni / step)

    ydata2 = np.polyfit(x, times2, 2)
    f2 = np.poly1d(ydata2)
    yfit2 = np.linspace(nNodesIni, nNodesFin,nNodesFin - nNodesIni / step)

    plt.plot(x,times1,"b.", label="valor real Dijkstra")
    plt.plot(yfit1, f1(yfit1),"r-", label='Dijkstra')
    plt.plot(x,times2, "r.", label="valor real FloydWarshall")
    plt.plot(yfit2,f2(yfit2),"b-", label ='FloydWarshall')
    plt.legend(bbox_to_anchor=(0., 1.02, 1., .102), loc=3,ncol=2, mode="expand", borderaxespad=0.)
    plt.show()

    return
Ejemplo n.º 24
0
    def FitLine(self, definition):
        '''
        Fits a 1st order polynom (line) to the
        start and end points of a straight path,
        then populates the line with equally spaced
        points, and returns the list of points
        '''
    
        Ax = self.A.get_Pos_X();
        Ay = self.A.get_Pos_Y();
        
        Bx = self.B.get_Pos_X();
        By = self.B.get_Pos_Y();
        
        SubWP_No = numpy.linalg.norm(numpy.array([Ax-Bx,Ay-By])) * definition * 0.01;
        '''
        If the path is vertical, the X and Y axes must be swapped before the
        polynom fitting and populating, then switched back to return the
        proper point coordinates
        ''' 
        if abs(Ax - Bx) < 1:

            self.poly = numpy.polyfit([Ay, By], [Ax, Bx], 1);
            prange = numpy.linspace(Ay, By, SubWP_No);

            values = numpy.polyval(self.poly, prange);
            self.SubWP = numpy.array([prange, values]);
            
        else:            
            self.poly = numpy.polyfit([Ax, Bx], [Ay, By], 1);
            prange = numpy.linspace(Ax, Bx, SubWP_No);
            values = numpy.polyval(self.poly, prange);
            self.SubWP = numpy.array([values, prange]);
Ejemplo n.º 25
0
def polyfit(x, y, deg, rcond=None, full=False):
    'units wrapped numpy.polyfit'
    
    _polyfit = np.polyfit

    if full:
        dP, residuals, rank, singular_values, rcond  = np.polyfit(np.array(x), np.array(y), deg, rcond, full)
    else:
        dP = np.polyfit(np.array(x), np.array(y), deg, rcond, full)

    # now we need to put units on P
    # p(x) = p[0] * x**deg + ... + p[deg]
    P = []
    for i, p in enumerate(dP):
        power = deg - i
        X = x**power

        # units on ith element of P from y / X
        uX = Unit(1.0, X.exponents, X.label)
        uy = Unit(1.0, y.exponents, y.label)
        uPi = uy / uX
        # so annoying. if you do dP[i] * uP you lose units.
        P += [uPi * p]

    if full:
        return P, residuals, rank, singular_values, rcond
    else:
        return P
Ejemplo n.º 26
0
def plot_reml(C_eta, sel, sel_label, ax, ax2, sigma_diag):
    coh_arr = np.linspace(0.00, 0.30, 16)
    reml_arr = reml(C_eta, coh_arr, sel, sel_label, ax2, sigma_diag)
    ax.plot(coh_arr, reml_arr, 'm+', label=sel_label)
    ax.set_xlabel(r'$\sigma_i$')
    ax.set_ylabel('REML')
    ax.set_xlim(-0.02,0.30)

    # Fit a polynomial to the points
    coeff=np.polyfit(coh_arr, reml_arr, 4)
    p=np.poly1d(coeff)
    coh_arr2=np.linspace(0.00, 0.20, 201)
    fit=p(coh_arr2)
    ax.plot(coh_arr2,fit)

    # Determine where the minumum occurs
    minimum=sp.fmin(p,0.1,disp=False)
    print "Miminum at %4.3f" % (minimum[0])
    # The uncetainty occurs whwn the the REML increases by 1 - need to double check
    # To find where the the REML increases by 1, we look for the roots
    coeff=np.polyfit(coh_arr, reml_arr-p(minimum[0])-1, 4)
    p=np.poly1d(coeff)
    sol=sp.root(p,[0.0,0.2])

    m=minimum[0]
    upper=sol.x[1]-m
    lower=m-sol.x[0]
    ax.plot(coh_arr2,fit,label="Min at %4.2f+%4.2f-%4.2f" % (m,upper,lower))
    
    return
Ejemplo n.º 27
0
 def homogenize(self, other, maxdiff=1):
     """ Return overlapping part of self and other as (self, other) tuple.
     Homogenize intensities so that the images can be used with
     combine_frequencies. Note that this works best when most of the 
     picture is signal, so use :py:meth:`in_interval` to select the subset
     of your image before applying this method.
     
     Parameters
     ----------
     other : CallistoSpectrogram
         Spectrogram to be homogenized with the current one.
     maxdiff : float
         Threshold for which frequencies are considered equal.
     """
     one, two = self._overlap(other)
     pairs_indices, factors, constants = one._homogenize_params(
         two, maxdiff
     )
     # XXX: Maybe (xd.freq_axis[x] + yd.freq_axis[y]) / 2.
     pairs_freqs = [one.freq_axis[x] for x, y in pairs_indices]
     
     # XXX: Extrapolation does not work this way.
     # XXX: Improve.
     f1 = np.polyfit(pairs_freqs, factors, 3)
     f2 = np.polyfit(pairs_freqs, constants, 3)
     
     return (
         one,
         two * polyfun_at(f1, two.freq_axis)[:, np.newaxis] +
             polyfun_at(f2, two.freq_axis)[:, np.newaxis]
     )
Ejemplo n.º 28
0
def poleward_speed(KD):

    N = int(KD.params['N'])
    trans_MA = KD.params['t_A']
    dt = KD.params['dt']
    start = int(trans_MA/dt)

    trans_MA = KD.params['t_A']
    dt = KD.params['dt']
    trans_AB = anaph_transAB(KD)

    pole_speeds = []
    for ch in KD.chromosomes:
        r_stop = int(ch.cen_A.toa/dt)
        if r_stop - start > 2:
            r_dist = - KD.spbR.traj[start:r_stop] + ch.cen_A.traj[start:r_stop]
            elapsed = np.r_[trans_MA:ch.cen_A.toa:dt]
            (ra,rb) = np.polyfit(elapsed, r_dist, 1)
            pole_speeds.append(ra)
        l_stop = int(ch.cen_B.toa/dt)
        if l_stop - start > 2:
            l_dist = KD.spbL.traj[start:l_stop] - ch.cen_B.traj[start:l_stop]
            elapsed = np.r_[trans_MA:ch.cen_B.toa:dt]
            (la,lb) = np.polyfit(elapsed, l_dist, 1)
            pole_speeds.append(la)

    pole_speeds = np.array(pole_speeds)

    return pole_speeds.mean(), pole_speeds.std()
Ejemplo n.º 29
0
    def fit(self, data):
        magnitude = data[0]
        time = data[1]

        global m_21
        global m_31
        global m_32

        Nsf = 100
        Np = 100
        sf1 = np.zeros(Nsf)
        sf2 = np.zeros(Nsf)
        sf3 = np.zeros(Nsf)
        f = interp1d(time, magnitude)

        time_int = np.linspace(np.min(time), np.max(time), Np)
        mag_int = f(time_int)

        for tau in np.arange(1, Nsf):
            sf1[tau-1] = np.mean(np.power(np.abs(mag_int[0:Np-tau] - mag_int[tau:Np]) , 1.0))
            sf2[tau-1] = np.mean(np.abs(np.power(np.abs(mag_int[0:Np-tau] - mag_int[tau:Np]) , 2.0)))
            sf3[tau-1] = np.mean(np.abs(np.power(np.abs(mag_int[0:Np-tau] - mag_int[tau:Np]) , 3.0)))
        sf1_log = np.log10(np.trim_zeros(sf1))
        sf2_log = np.log10(np.trim_zeros(sf2))
        sf3_log = np.log10(np.trim_zeros(sf3))

        m_21, b_21 = np.polyfit(sf1_log, sf2_log, 1)
        m_31, b_31 = np.polyfit(sf1_log, sf3_log, 1)
        m_32, b_32 = np.polyfit(sf2_log, sf3_log, 1)

        return m_21
Ejemplo n.º 30
0
def optimum_polyfit(x, y, score=functoolz.compose(np.max, np.abs), max_degree=50, stop_at=1e-10):
    """
    Optimize the degree of a polyfit polynomial so that score(y - poly(x)) is minimized.

    :param max_degree: The maximum degree to try. LinAlgErrors are automatically ignored.
    :param stop_at: If a score lower than this is reached, the function returns early
    :param score: The score function that is applied to y - poly(x). Default: max deviation.
    :return A tuple (poly1d object, degree, score)
    """
    scores = np.empty(max_degree - 1, dtype=np.float64)
    # Ignore rank warnings now, but do not ignore for the final polynomial if not early returning
    with warnings.catch_warnings():
        warnings.simplefilter('ignore', np.RankWarning)
        for deg in range(1, max_degree):
            # Set score to max float value
            try:
                poly = np.poly1d(np.polyfit(x, y, deg))
            except np.linalg.LinAlgError:
                scores[deg - 1] = np.finfo(np.float64).max
                continue
            scores[deg - 1] = score(y - poly(x))
            # Early return if we found a polynomial that is good enough
            if scores[deg - 1] <= stop_at:
                return poly, deg, scores[deg - 1]
    # Find minimum score
    deg = np.argmin(scores) + 1
    # Compute polyfit for that degreet
    poly = np.poly1d(np.polyfit(x, y, deg))
    return poly, deg, np.min(scores)
Ejemplo n.º 31
0
        s = next_s
        if done:
            train(replay_records)
            score_list.append(score)
            print('episode:', i, 'score:', score, 'max:', max(score_list))
            break
    #最后10次的平均分大于195时,停止并保存模型
    if np.mean(score_list[-10:]) > 195:
        model.save('CartPole-v0-pg.h5')
        break
env.close()

#画一张图,多了三行多项式拟合代码,能够更好地展现整个分数的变化趋势
plt.plot(score_list)
x = np.array(range(len(score_list)))
smooth_func = np.poly1d(np.polyfit(x, score_list, 3))
plt.plot(x, smooth_func(x), label='Mean', linestyle='--')
plt.show()
# 测试  test_policy_gradient.py
import time
import numpy as np
import gym
from tensorflow.keras import models

saved_model = models.load_model('CartPole-v0-pg.h5')
env = gym.make('CartPole-v0')
for i in range(5):
    s = env.reset()
    score = 0
    while True:
        time.sleep(0.01)
Ejemplo n.º 32
0
def full_plot(args):

    DATA = get_dataset()
    VC, SE, ECR, ICR, TAU2, TAU1, DUR, MONKEY = [], [], [], [], [], [], [], []
    for i in range(len(DATA)):
        args.data_index = i
        params = get_minimum_params(args)
        for vec, VEC in zip(params, [VC, SE, ECR, ICR, TAU2, TAU1]):
            VEC.append(vec)
        DUR.append(DATA[i]['duration'])
        MONKEY.append(DATA[i]['Monkey'])

    # vc
    fig1, ax1 = plt.subplots(1, figsize=(1.5,2.3));plt.subplots_adjust(bottom=.4, left=.6)
    ax1.fill_between([-1., 1.], np.ones(2)*args.vc[0], np.ones(2)*args.vc[1],
                       color='lightgray', alpha=.8, label=r'$\mathcal{D}$ domain')
    ax1.bar([0], [np.array(VC).mean()], yerr=[np.array(VC).std()],
               color='lightgray', edgecolor='k', lw=3)
    ax1.legend(frameon=False)
    print('Vc = ', round(np.array(VC).mean()), '+/-', round(np.array(VC).std()), 'mm/s')
    set_plot(ax1, ['left'], xticks=[], ylabel='$v_c$ (mm/s)')
    # connectivity
    fig2, ax2 = plt.subplots(1, figsize=(2.,2.3));plt.subplots_adjust(bottom=.4, left=.6)
    ax2.bar([0], [np.array(ECR).mean()], yerr=[np.array(ECR).std()],
               color='lightgray', edgecolor='g', lw=3, label='$l_{exc}$')
    print('Ecr=', round(np.array(ECR).mean(),1), '+/-', round(np.array(ECR).std(),1), 'mm/s')
    ax2.bar([1.5], [np.array(ICR).mean()], yerr=[np.array(ICR).std()],
               color='lightgray', edgecolor='r', lw=3, label='$l_{inh}$')
    print('Icr=', round(np.array(ICR).mean(),1), '+/-', round(np.array(ICR).std(),1), 'mm/s')
    ax2.fill_between([-1., 2.5], np.ones(2)*args.Econn_radius[0],
                       np.ones(2)*args.Econn_radius[1],
                       color='lightgray', alpha=.8)
    ax2.legend(frameon=False)
    ax2.annotate("p=%.1e" % ttest_rel(ECR, ICR).pvalue, (0.1, .1), xycoords='figure fraction')
    set_plot(ax2, ['left'], xticks=[], ylabel='extent (mm)')
    # stim extent
    fig3, ax3 = plt.subplots(1, figsize=(1.5,2.3));plt.subplots_adjust(bottom=.4, left=.6)
    ax3.bar([0], [np.array(SE).mean()], yerr=[np.array(SE).std()],
               color='lightgray', edgecolor='k', lw=3)
    print('Ecr=', round(np.array(SE).mean(),1), '+/-', round(np.array(SE).std(),1), 'mm/s')
    ax3.fill_between([-1., 1.], np.ones(2)*args.stim_extent[0], np.ones(2)*args.stim_extent[1],
                       color='lightgray', alpha=.8)
    set_plot(ax3, ['left'], xticks=[], ylabel='$l_{stim}$ (mm)', yticks=[0,1,2])

    DUR, TAU1, TAU2 = np.array(DUR), 1e3*np.array(TAU1), 1e3*np.array(TAU2)
    
    fig4, ax4 = plt.subplots(1, figsize=(2.5,2.3));plt.subplots_adjust(bottom=.4, left=.6)
    for d in np.unique(DUR):
        ax4.errorbar([d], [TAU1[DUR==d].mean()], yerr=[TAU1[DUR==d].std()], marker='o', color='k')
    ax4.plot([DUR.min(), DUR.max()],
             np.polyval(np.polyfit(DUR, TAU1, 1), [DUR.min(), DUR.max()]), 'k--', lw=0.5)
    ax4.fill_between([DUR.min(), DUR.max()],
                     1e3*np.ones(2)*args.Tau1[0], 1e3*np.ones(2)*args.Tau1[1],
                       color='lightgray', alpha=.8)
    ax4.annotate("c=%.1e" % pearsonr(DUR, TAU1)[0], (0.1, .2), xycoords='figure fraction')
    ax4.annotate("p=%.1e" % pearsonr(DUR, TAU1)[1], (0.1, .1), xycoords='figure fraction')
    set_plot(ax4, xticks=[10, 50, 100],
             xlabel='$T_{stim}$ (ms)', ylabel='$\\tau_1$ (ms)', yticks=[0, 25, 50])
    
    fig5, ax5 = plt.subplots(1, figsize=(2.5,2.3));plt.subplots_adjust(bottom=.4, left=.6)
    for d in np.unique(DUR):
        ax5.errorbar([d], [TAU2[DUR==d].mean()], yerr=[TAU2[DUR==d].std()], marker='o', color='k')
    ax5.plot([DUR.min(), DUR.max()],
             np.polyval(np.polyfit(DUR, TAU2, 1), [DUR.min(), DUR.max()]), 'k--', lw=0.5)
    ax5.fill_between([DUR.min(), DUR.max()],
                     1e3*np.ones(2)*args.Tau2[0], 1e3*np.ones(2)*args.Tau2[1],
                       color='lightgray', alpha=.8)
    ax5.annotate("c=%.1e" % pearsonr(DUR, TAU2)[0], (0.1, .2), xycoords='figure fraction')
    ax5.annotate("p=%.1e" % pearsonr(DUR, TAU2)[1], (0.1, .1), xycoords='figure fraction')
    set_plot(ax5, xticks=[10, 50, 100],
             xlabel='$T_{stim}$ (ms)', ylabel='$\\tau_2$ (ms)', yticks=[40, 120, 200])
Ejemplo n.º 33
0
def windows(img, min_pix=1, margin=100, num_wind=9, windows_flag=True):
    global left_p, left_q, left_r, right_p, right_q, right_r
    l_point = np.empty(3)
    r_point = np.empty(3)
    out_img = np.dstack((img, img, img)) * 255

    histogram = get_hist(img)
    # find peaks of left and right halves
    mid_point = int(histogram.shape[0] / 2)

    # Creating the base of bins/windows for  left and right lanes
    left_bin_base = np.argmax(histogram[:mid_point])
    right_bin_base = np.argmax(histogram[mid_point:]) + mid_point

    # Creating empty lists to receive left and right lane pixel indices
    leftlane_indices = []
    rightlane_indices = []

    # Setting the height of windows
    bin_h = np.int(img.shape[0] / num_wind)

    # Finding the x and y positions of all nonzero pixels
    pixel_indices = img.nonzero()
    pixel_y = np.array(pixel_indices[0])
    pixel_x = np.array(pixel_indices[1])

    # Current position to be updated for each window
    current_bin_left = left_bin_base
    current_bin_right = right_bin_base

    # Iterating over the bins/windows
    for w in range(num_wind):
        # Identify window boundaries in x and y (and right and left)
        w_y_bottom = img.shape[0] - (w + 1) * bin_h
        w_y_top = img.shape[0] - w * bin_h

        w_xleft_bottom = current_bin_left - margin
        w_xleft_top = current_bin_left + margin

        w_xright_bottom = current_bin_right - margin
        w_xright_top = current_bin_right + margin

        # Draw the windows on the  image
        if windows_flag == True:
            cv2.rectangle(out_img, (w_xleft_bottom, w_y_bottom),
                          (w_xleft_top, w_y_top), (100, 255, 255), 3)
            cv2.rectangle(out_img, (w_xright_bottom, w_y_bottom),
                          (w_xright_top, w_y_top), (100, 255, 255), 3)

        # Findding the nonzero pixels in x and y within the window
        req_left_pixels = ((pixel_y >= w_y_bottom) & (pixel_y < w_y_top) &
                           (pixel_x >= w_xleft_bottom) &
                           (pixel_x < w_xleft_top)).nonzero()[0]

        req_right_pixels = ((pixel_y >= w_y_bottom) & (pixel_y < w_y_top) &
                            (pixel_x >= w_xright_bottom) &
                            (pixel_x < w_xright_top)).nonzero()[0]

        # Append these indices to the corresponding lists
        leftlane_indices.append(req_left_pixels)
        rightlane_indices.append(req_right_pixels)

        # If we found > minpix pixels, recenter next window on their mean position
        if len(req_left_pixels) > min_pix:
            current_bin_left = np.int(np.mean(pixel_x[req_left_pixels]))
        if len(req_right_pixels) > min_pix:
            current_bin_right = np.int(np.mean(pixel_x[req_right_pixels]))

# Concatenate the arrays of left and right lane pixel indices
    leftlane_indices = np.concatenate(leftlane_indices)
    rightlane_indices = np.concatenate(rightlane_indices)

    # Calculating the left and right lane pixel positions
    leftlane_x_pixels = pixel_x[leftlane_indices]
    leftlane_y_pixels = pixel_y[leftlane_indices]

    rightlane_x_pixels = pixel_x[rightlane_indices]
    rightlane_y_pixels = pixel_y[rightlane_indices]

    # Fitting a second order polynomial to each lane
    leftlane_fit = np.polyfit(leftlane_y_pixels, leftlane_x_pixels, 2)
    rightlane_fit = np.polyfit(rightlane_y_pixels, rightlane_x_pixels, 2)

    left_p.append(leftlane_fit[0])
    left_q.append(leftlane_fit[1])
    left_r.append(leftlane_fit[2])

    right_p.append(rightlane_fit[0])
    right_q.append(rightlane_fit[1])
    right_r.append(rightlane_fit[2])

    l_point[0] = np.mean(left_p[-10:])
    l_point[1] = np.mean(left_q[-10:])
    l_point[2] = np.mean(left_r[-10:])

    r_point[0] = np.mean(right_p[-10:])
    r_point[1] = np.mean(right_q[-10:])
    r_point[2] = np.mean(right_r[-10:])

    # Generating x and y values for plotting
    y_values = np.linspace(0, img.shape[0] - 1, img.shape[0])
    leftlane_fit_x = l_point[0] * y_values**2 + l_point[
        1] * y_values + l_point[2]
    rightlane_fit_x = r_point[0] * y_values**2 + r_point[
        1] * y_values + r_point[2]

    out_img[pixel_y[leftlane_indices],
            pixel_x[leftlane_indices]] = [255, 0, 100]
    out_img[pixel_y[rightlane_indices],
            pixel_x[rightlane_indices]] = [0, 100, 255]

    return out_img, (leftlane_fit_x, rightlane_fit_x), (l_point,
                                                        r_point), y_values
Ejemplo n.º 34
0
#6,31 	23,13
#9,15 	24,68
#5,06 	21,89

# Resolucion.
# Autor: Facundo A. Lucianna
# Fecha: 15/07/19

import matplotlib.pyplot as plt
import numpy as np

X = [7.5, 4.48, 8.60, 7.73, 5.28, 4.25, 6.99, 6.31, 9.15, 5.06]
Y = [28.66, 20.37, 22.33, 26.35, 22.29, 21.74, 23.11, 23.13, 24.68, 21.89]

#Realizamos un ajuste lineal
polLineal = np.poly1d(np.polyfit(X, Y, 1))
pol3 = np.poly1d(np.polyfit(X, Y, 3))

#Encontramos los puntos de ajustes para cada caso
Xsorted = X
Xsorted.sort()
AjusteLineal = polLineal(Xsorted)
AjustePol3 = pol3(Xsorted)

fig, ax = plt.subplots()
ax.scatter(X, Y, label='Datos')
ax.plot(X,
        AjusteLineal,
        linewidth=1,
        linestyle='--',
        color='black',
Ejemplo n.º 35
0
U_kenn, N = np.genfromtxt('Kennlinie.dat', unpack=True)

U_zaehl, I = np.genfromtxt('Zaehlrohrstrom.dat', unpack=True)

N_err = np.sqrt(N)

N_miterr = unp.uarray(N, N_err)

I_miterr = unp.uarray(I * 1e-06, 0.05e-06)

# Plots ######################################################################

# Plot zur Kennlinie des Geiger-Müller Zählrohrs

par, cov = np.polyfit(U_kenn[3:36], N[3:36], deg=1, cov=True)
err = np.sqrt(np.diag(cov))

plt.errorbar(U_kenn,
             N,
             yerr=stds(N_miterr),
             fmt='kx',
             label='Messwerte mit Fehler')
x_plot = np.linspace(350, 680, 10000)
plt.plot(x_plot, par[0] * x_plot + par[1], 'r-', label='Fitkurve')
plt.legend(loc="best")
plt.xlabel(r'Spannung $U \:/\:V$')
plt.ylabel(r'Intensität $I\:/\:Imp/60s$')
plt.grid()
plt.tight_layout
plt.savefig('build/plot_kenn.pdf')
Ejemplo n.º 36
0
    def extractDoppler(self):
        """
        Return the doppler centroid as defined in the HDF5 file.
        """
        import numpy as np

        quadratic = {}
        midtime = (self.rangeLastTime +
                   self.rangeFirstTime) * 0.5 - self.rangeRefTime

        fd_mid = 0.0
        x = 1.0
        for ind, coeff in enumerate(self.dopplerRangeTime):
            fd_mid += coeff * x
            x *= midtime

        ####insarApp style
        quadratic['a'] = fd_mid / self.frame.getInstrument(
        ).getPulseRepetitionFrequency()
        quadratic['b'] = 0.
        quadratic['c'] = 0.

        ####For roiApp more accurate
        ####Convert stuff to pixel wise coefficients
        from isceobj.Util import Poly1D

        coeffs = self.dopplerRangeTime
        dr = self.frame.getInstrument().getRangePixelSize()
        rref = 0.5 * Const.c * self.rangeRefTime
        r0 = self.frame.getStartingRange()
        norm = 0.5 * Const.c / dr

        dcoeffs = []
        for ind, val in enumerate(coeffs):
            dcoeffs.append(val / (norm**ind))

        poly = Poly1D.Poly1D()
        poly.initPoly(order=len(coeffs) - 1)
        poly.setMean((rref - r0) / dr - 1.0)
        poly.setCoeffs(dcoeffs)

        pix = np.linspace(0,
                          self.frame.getNumberOfSamples(),
                          num=len(coeffs) + 1)
        evals = poly(pix)
        fit = np.polyfit(pix, evals, len(coeffs) - 1)
        self.frame._dopplerVsPixel = list(fit[::-1])
        print('Doppler Fit: ', fit[::-1])

        #EMG - 20160420 This section was introduced in the populateMetadata method by EJF in r2022
        #Its pupose seems to be to set self.doppler_coeff and self.azfmrate_coeff, which don't seem
        #to be used anywhere in ISCE. Need to take time to understand the need for this and consult
        #with EJF.
        #
        ## save the Doppler centroid coefficients, converting units from .h5 file
        ## units in the file are quadratic coefficients in Hz, Hz/sec, and Hz/(sec^2)
        ## ISCE expects Hz, Hz/(range sample), Hz/(range sample)^2
        ## note that RS2 Doppler values are estimated at time dc.dopplerCentroidReferenceTime,
        ## so the values might need to be adjusted for ISCE usage
        ## adapted from RS2 version EJF 2015/09/05
        #        poly = self.frame._dopplerVsPixel
        #        rangeSamplingRate = self.frame.getInstrument().getPulseRepetitionFrequency()
        #        # need to convert units
        #        poly[1] = poly[1]/rangeSamplingRate
        #        poly[2] = poly[2]/rangeSamplingRate**2
        #        self.doppler_coeff = poly
        #
        ## similarly save Doppler azimuth fm rate values, converting units
        ## units in the file are quadratic coefficients in Hz, Hz/sec, and Hz/(sec^2)
        ## units are already converted below
        ## Guessing that ISCE expects Hz, Hz/(azimuth line), Hz/(azimuth line)^2
        ## note that RS2 Doppler values are estimated at time dc.dopplerRateReferenceTime,
        ## so the values might need to be adjusted for ISCE usage
        ## modified from RS2 version EJF 2015/09/05
        ## CSK Doppler azimuth FM rate not yet implemented in reading section, set to zero for now
        #
        #        fmpoly = self.dopplerRateCoeffs
        #        # don't need to convert units
        ##        fmpoly[1] = fmpoly[1]/rangeSamplingRate
        ##        fmpoly[2] = fmpoly[2]/rangeSamplingRate**2
        #        self.azfmrate_coeff = fmpoly
        #EMG - 20160420

        return quadratic
    def get_text_lines(self, text_proposals, scores, im_size):
        """
        text_proposals:boxes
        
        """
        # tp=text proposal
        # 首先还是建图,获取到文本行由哪几个小框构成
        tp_groups=self.group_text_proposals(text_proposals, scores, im_size)
        
        # 创建 [l, 8] ,含义为 [xmin, ymin, xmax, ymax, avg_score, k, b, avg_height+2.5]
        # k ,b 是中心拟合框的斜率和偏置,可以用于后期的位图旋转矫正
        text_lines=np.zeros((len(tp_groups), 8), np.float32)

        for index, tp_indices in enumerate(tp_groups):
            # 每个文本行的全部小框
            text_line_boxes=text_proposals[list(tp_indices)]

            # 求每一个小框的中心x,y坐标
            X = (text_line_boxes[:,0] + text_line_boxes[:,2]) / 2
            Y = (text_line_boxes[:,1] + text_line_boxes[:,3]) / 2
            
            # 多项式拟合,根据之前求的中心店拟合一条直线(最小二乘)
            z1 = np.polyfit(X,Y,1)

            # 文本行x坐标最小值
            x0=np.min(text_line_boxes[:, 0])
            # 文本行x坐标最大值
            x1=np.max(text_line_boxes[:, 2])

            # 小框宽度的一半
            offset=(text_line_boxes[0, 2]-text_line_boxes[0, 0])*0.5

            # 以全部小框的左上角这个点去拟合一条直线,然后计算一下文本行x坐标的极左极右对应的y坐标
            lt_y, rt_y=self.fit_y(text_line_boxes[:, 0], text_line_boxes[:, 1], x0+offset, x1-offset)
            # 以全部小框的左下角这个点去拟合一条直线,然后计算一下文本行x坐标的极左极右对应的y坐标
            lb_y, rb_y=self.fit_y(text_line_boxes[:, 0], text_line_boxes[:, 3], x0+offset, x1-offset)

            # 求全部小框得分的均值作为文本行的均值
            score=scores[list(tp_indices)].sum()/float(len(tp_indices))

            text_lines[index, 0]=x0
            text_lines[index, 1]=min(lt_y, rt_y)#文本行上端 线段 的y坐标的小值
            text_lines[index, 2]=x1
            text_lines[index, 3]=max(lb_y, rb_y)#文本行下端 线段 的y坐标的大值
            text_lines[index, 4]=score#文本行得分
            text_lines[index, 5]=z1[0]#根据中心点拟合的直线的k,b
            text_lines[index, 6]=z1[1]
            height = np.mean( (text_line_boxes[:,3]-text_line_boxes[:,1]) )#小框平均高度
            text_lines[index, 7]= height + 2.5

        # 进一步微调,得到 【l,9】,含义为 4个点的坐标,加一个概率得分
        text_recs = np.zeros((len(text_lines), 9), np.float)
        index = 0
        for line in text_lines:
            b1 = line[6] - line[7] / 2  # 根据高度和文本行中心线,求取文本行上下两条线的b值
            b2 = line[6] + line[7] / 2
            x1 = line[0]
            y1 = line[5] * line[0] + b1  # 左上
            x2 = line[2]
            y2 = line[5] * line[2] + b1  # 右上
            x3 = line[0]
            y3 = line[5] * line[0] + b2  # 左下
            x4 = line[2]
            y4 = line[5] * line[2] + b2  # 右下
            disX = x2 - x1
            disY = y2 - y1
            width = np.sqrt(disX * disX + disY * disY)  # 文本行宽度

            fTmp0 = y3 - y1  # 文本行高度
            fTmp1 = fTmp0 * disY / width
            x = np.fabs(fTmp1 * disX / width)  # 做补偿
            y = np.fabs(fTmp1 * disY / width)
            if line[5] < 0:
                x1 -= x
                y1 += y
                x4 += x
                y4 -= y
            else:
                x2 += x
                y2 += y
                x3 -= x
                y3 -= y
            text_recs[index, 0] = x1
            text_recs[index, 1] = y1
            text_recs[index, 2] = x2
            text_recs[index, 3] = y2
            text_recs[index, 4] = x3
            text_recs[index, 5] = y3
            text_recs[index, 6] = x4
            text_recs[index, 7] = y4
            text_recs[index, 8] = line[4]
            index = index + 1

        return text_recs
Ejemplo n.º 38
0
def fit_exp_linear(t, y, C=0):
    y = y - C
    y = np.log(y)
    K, A_log = np.polyfit(t, y, 1)
    A = np.exp(A_log)
    return A, K
Ejemplo n.º 39
0
contours_codeStrap = cv2.findContours(canny_codeStrap, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)[0]
contoured_codeStrap = cv2.drawContours(codeStrap.copy(), contours_codeStrap, -1, (0, 255, 0), 1)
show(contoured_codeStrap, "contoured_codeStrap")

pointsX = []
pointsY = []
for contour in contours_codeStrap:
    moment = cv2.moments(contour)
    cX = (moment["m10"] / moment["m00"])
    cY = (moment["m01"] / moment["m00"])
    pointsX.append(cX)
    pointsY.append(cY)
xArr = np.array(pointsX)
yArr = np.array(pointsY)
m, b = np.polyfit(xArr, yArr, 1)
angle = ((math.atan(m) / math.pi) * 180)
if angle >= 0:
    angle = angle - 90
else:
    angle = 90 + angle
pointsY, pointsX = zip(*sorted(zip(pointsY, pointsX)))
xCenter = sum(pointsX)/len(pointsX)
yCenter = sum(pointsY)/len(pointsY)
image_center = (xCenter, yCenter)
rot_mat = cv2.getRotationMatrix2D(image_center, angle, 1)
fixed = cv2.warpAffine(colorMasked.copy(), rot_mat, colorMasked.shape[1::-1], flags=cv2.INTER_CUBIC)
show(fixed, "fixed")

######################################
Ejemplo n.º 40
0
def render_voxel_ani(vox_list):
    '''Render latent space traversal as animation.
       Displays np array in the browser as a mesh using np2vox func and three.js lib'''    
    verts_list = []
    faces_list = []
    for vox in vox_list:
        # compute the mesh data for each voxel array
        verts, faces = np2vox(vox)
        verts_list.append(verts)
        faces_list.append(faces)
    # json version of list of mesh data
    mesh_data = {'verts': verts_list, 'faces': faces_list}
    json_meshes = json.dumps(mesh_data)
    html = Template('''<html>
            <head>
                <title>Viewer</title>
                <meta charset="utf-8">
                <meta name="viewport" content="width=device-width, user-scalable=no, minimum-scale=1.0, maximum-scale=1.0">
                <link rel="stylesheet" type="text/css" href="css/styles1.css"/>
            </head>
            <body>
                <canvas id="canvas"></canvas>
                <div id="top_panel"></div>
                <div id="bottom_panel">  
                </div>

                <script src="js/three.min.js"></script>
                <script src="js/OrbitControls.js"></script>
              
                <script>
                    var renderer = new THREE.WebGLRenderer({canvas: document.getElementById('canvas'), antialias: true});
                    var camera = new THREE.PerspectiveCamera(70, window.innerWidth / window.innerHeight, 0.1, 10000);
                    var scene = new THREE.Scene();

                    renderer.setClearColor(0x37373B); 
                    renderer.setSize(window.innerWidth, window.innerHeight);
                    renderer.setPixelRatio(window.devicePixelRatio);
                    document.body.appendChild(renderer.domElement);
                    
                    //scene and camera setup
                    camera.position.set(-200, 200, -200);
                    camera.up = new THREE.Vector3(0, 1, 0);
                    camera.lookAt(new THREE.Vector3(0, 0, 0))
                    scene.add(camera);
                
                    //controls
                    var controls = new THREE.OrbitControls(camera, renderer.domElement);
                    controls.target.set( 0, 0, 0);
                    controls.update();
                    //controls.minDistance = 150;
                    controls.maxDistance = 2000;
                    controls.zoomSpeed = 0.5;
                    controls.enablePan = true;
                    controls.rotateSpeed = 0.5;

                    //lights
                    var light1 = new THREE.AmbientLight(0xffffff, 1.0);
                    scene.add(light1);
                    var light2 = new THREE.PointLight(0xff3333, 0.75);
                    light2.position.set(100, 200, 500);
                    scene.add(light2)
                    var light4 = new THREE.PointLight(0x3333ff, 0.75);
                    light2.position.set(-100, 200, -500);
                    scene.add(light4)
                    var light3 = new THREE.SpotLight(0xddddff, 1);
                    light3.position.set(-300, -300, 300);
                    scene.add(light3);

                    window.addEventListener('resize',windowResize, false);

                    function windowResize(){
                        camera.aspect = window.innerWidth / window.innerHeight;
                        camera.updateProjectionMatrix();
                        renderer.setSize( window.innerWidth, window.innerHeight);
                    }

                    function render() {
                        requestAnimationFrame(render);
                        renderer.render(scene, camera);
                    }

                    var material = new THREE.MeshLambertMaterial({color: 0x616c72});
                    var material1 = new THREE.MeshLambertMaterial({
                                color: 0x000000,
                                wireframe: true,
                                transparent: true,
                                opacity: 0.7,                                
                    });

                    var geo = new THREE.PlaneGeometry(200, 200);
                    var mat = new THREE.MeshLambertMaterial({
                                color: 0xe0e3e5,
                                wireframe: false,
                                transparent: true,
                                opacity: 0.7,
                                side: THREE.DoubleSide 
                    });

                    var plane = new THREE.Mesh(geo, mat);
                    plane.rotateX(Math.PI / 2);
                    scene.add(plane);
                    var x = {{x}};
                    var y = {{y}};
                    var z = {{z}};
                    var cube_geo = new THREE.BoxGeometry(5, 5, 5);
                    var cube_mat = new THREE.MeshLambertMaterial({ color: 0x442222});
                    var cube = new THREE.Mesh(cube_geo, cube_mat);
                    scene.add(cube);
                    cube.position.set(-102.5, 0, -102.5);
                    
                    // mesh
                    var mesh_data = JSON.parse(JSON.stringify({{ mesh_list }}));
                    var verts_list = mesh_data.verts;
                    var faces_list = mesh_data.faces;

                    for (j=0; j < verts_list.length; j++)
                    {
                        // build each object as a three js mesh
                        verts = verts_list[j];
                        faces = faces_list[j];
                        
                        var geometry = new THREE.Geometry();
                        for (i=0; i < verts.length; i++){
                            geometry.vertices.push(new THREE.Vector3(verts[i][1], verts[i][2], verts[i][0]));
                        }
                        for (i=0; i < faces.length; i++){
                            geometry.faces.push(new THREE.Face3(faces[i][0], faces[i][1], faces[i][2]));
                            geometry.faces.push(new THREE.Face3(faces[i][2], faces[i][3], faces[i][0]));
                        }
                        geometry.computeBoundingSphere();
                        geometry.computeFaceNormals();
                        geometry.computeVertexNormals();
                        var voxmesh = new THREE.Mesh(geometry, material);
                        voxmesh.name = 'mesh' + i.toString();
                        voxmesh.visible = false;
                        voxmesh.position.set(x, y, z);
                        scene.add(voxmesh);
                        console.log('built mesh');
                    }

                    var delay = {{ speed }};
                    var i = 7;
                    setInterval(function()
                    {
                        if (i == scene.children.length - 1){
                            scene.children[i-1].visible = false;
                            i = 7;
                        } else {
                            if (i != 7){
                                scene.children[i-1].visible = false;
                            }
                            scene.children[i].visible = true;
                            i++;
                        }
                    }, delay);

                    render();
                </script>
            </body>
        </html>''')

    # animation speed line eq
    x = [5, 20]
    y = [75, 8]
    c = np.polyfit(x, y, 1)
    line = np.poly1d(c)
    new_html = html.render(mesh_list=json_meshes, 
                            x=-vox_list[0].shape[0], 
                            y=vox_list[0].shape[1] / 2, 
                            z=-vox_list[0].shape[2],
                            speed=line(len(vox_list)) * len(vox_list))
    
    path = get_path('templates', 'template_ani.html')
    with open(path, 'w') as f:
        f.write(new_html)
    webbrowser.open(path, new=2) 
Ejemplo n.º 41
0
 def reg_func(_x, _y):
     return np.polyval(np.polyfit(_x, _y, order), grid)
Ejemplo n.º 42
0
def get_r2_numpy(x, y):
    slope, intercept = np.polyfit(x, y, 1)
    r_squared = 1 - (sum(
        (y - (slope * x + intercept))**2) / (len(y) * np.var(y)))
    return r_squared
Ejemplo n.º 43
0
    def _draw_attack_annotation(self, win, interval=0, traj='straight'):
        """

        Args:
            win:
            interval:
            traj: 'straight', 'parabola','updown'

        Returns:

        """
        #TODO change the time from contact to distance dependent
        if self.attack_anno_counter != 0:
            fps = 100  #from game class
            start_x = self.bullet_from_x
            start_y = self.bullet_from_y
            end_x = self.attacked_enemy.x  #- self.attacked_enemy.width//3
            end_y = self.attacked_enemy.y  # - self.attacked_enemy.height//3
            start_angle = 45  #deg
            end_angle = -80
            counter_max = fps * interval

            if self.attack_anno_counter < counter_max * 0.9:
                angle = (
                    end_angle - start_angle
                ) * self.attack_anno_counter / counter_max + start_angle

                if traj == 'straight':
                    draw_x = (
                        end_x - start_x
                    ) * self.attack_anno_counter / counter_max + start_x
                    draw_y = (
                        end_y - start_y
                    ) * self.attack_anno_counter / counter_max + start_y
                    attack_img = self.attack_img

                elif traj == 'parabola':
                    draw_x = (
                        end_x - start_x
                    ) * self.attack_anno_counter / counter_max + start_x
                    middle_x = (start_x + end_x) // 2
                    middle_y = (start_y + end_y) // 2 - 50
                    a, b, c = np.polyfit([start_x, middle_x, end_x],
                                         [start_y, middle_y, end_y], 2)
                    draw_y = draw_x**2 * a + draw_x * b + c
                    attack_img = pygame.transform.rotate(
                        self.attack_img, angle)

                elif traj == 'updown':
                    if self.attack_anno_counter < counter_max * 3 // 4:
                        draw_x = start_x
                        ratio = (1 -
                                 self.attack_anno_counter / counter_max * 2)
                        draw_y = int(start_y * ratio)
                        attack_img = self.attack_img
                    else:
                        draw_x = end_x - self.attack_img.get_width() // 2
                        ratio = (self.attack_anno_counter -
                                 counter_max * 3 // 4) / counter_max * 4
                        draw_y = int(
                            (end_y + self.attacked_enemy.height) * ratio)
                        attack_img = pygame.transform.flip(
                            self.attack_img, False, True)
                # offset for attack img with it
                win.blit(attack_img, (draw_x, draw_y))
                self.attack_anno_counter += 1
            else:
                self.attacked_enemy.hit(self.damage)
                self.attack_anno_counter = 0
                self.attacked_enemy = []
Ejemplo n.º 44
0
#141206150652 2549262 512 ver: 5 of: 0 mapr: 0 mapw: 0 redr: 0 redw: 0 red_input 28073830 linecount: 0 start 141206150454 mapend: 141206150644 end 141206150652
import numpy as np
import matplotlib.pyplot as plt
#map input, mapr, mapw, redr, redw, red input
data = np.loadtxt("terasort.txt", usecols=(3, 9, 11, 13, 15, 17), delimiter=" ")

mapr = np.polyfit(data[:,0], data[:,1]/(1024*1024), 1)
mapw = np.polyfit(data[:,0], data[:,2]/(1024*1024), 1)
redr = np.polyfit(data[:,5]/(1024*1024), data[:,3]/(1024*1024), 1)
redw = np.polyfit(data[:,5]/(1024*1024), data[:,4]/(1024*1024), 1)

x = np.array(range(1,11)) * 1024
y_mapr = x * mapr[0] + mapr[1]
plt.subplot(2,2,1)
plt.plot(data[:,0], data[:,1]/(1024*1024), "*", x, y_mapr, "-")
plt.xlabel("Map Input (MBytes) %f * x + %f" % (mapr[0], mapr[1]))
plt.ylabel("Remote Read (MBytes)")

y_mapw = x * mapw[0] + mapw[1]
plt.subplot(2,2,2)
plt.plot(data[:,0], data[:,2]/(1024*1024), "*", x, y_mapw, "-")
plt.xlabel("Map Input (MBytes) %f * x + %f" % (mapw[0], mapw[1]))
plt.ylabel("Remote Write (MBytes)")

x = np.array(range(1,11)) * 1024
y_redr = x * redr[0] + redr[1]
plt.subplot(2,2,3)
plt.plot(data[:,5]/(1024*1024), data[:,3]/(1024*1024), "*", x, y_redr, "-")
plt.xlabel("Reduce Input (MBytes) %f * x + %f" % (redr[0], redr[1]))
plt.ylabel("Remote Read (MBytes)")
Ejemplo n.º 45
0
def get_regi_params(array_3d,
                    ref_ind_num=0,
                    sig_mask=3,
                    thres_rel=0.2,
                    poly_deg=2,
                    rotation_multplier=1,
                    translation_multiplier=1,
                    diagnostic=False,
                    show_trace=True,
                    use_ransac=False,
                    frame_rate=3.33):
    """
	Get the rigid registration parameters.

	Parameters
	----------
	array_3d : ndarray
		3d ndarray from tif file.
	ref_ind_num : int, optional
		reference index number.
	sig_mask : float, optional
		Sigma for mask generation.
	thres_rel : float, optional
		Relative threshold for mask generation.
	poly_deg : int, optional
		Polynomial degree.
	rotation_multplier : float, optional
		Use this number to manually control the rotation amplitude.
	translation_multplier : float, optional
		Use this number to manually control the translation amplitude.
	diagnostic : bool, optional
		If True, run the diagnostic.

	Returns
	-------
	regi_params_array_2d: ndarray
		Registration parameters for each frame.
		Five columns: 'x0', 'y0', 'angle', 'delta_x', 'delta_y'.
	"""

    # """
    # ~~~~~~~~~~~~~~~~~~~~~~~~Get Ref_mask and Ref_props~~~~~~~~~~~~~~~~~~~~~~~~
    # """

    Ref = array_3d[ref_ind_num]
    Ref_mask = get_mask(Ref, sig=sig_mask, thres_rel=thres_rel)
    Ref_props = regionprops(Ref_mask)

    # """
    # ~~~~~~~~~~~~~~~~Get registration parameters for each frame~~~~~~~~~~~~~~~~
    # """

    regi_params_array_2d = np.zeros((len(array_3d), 5))
    for i in range(len(array_3d)):
        Reg = array_3d[i]
        Reg_mask = get_mask(Reg, sig=sig_mask, thres_rel=thres_rel)
        Reg_props = regionprops(Reg_mask)

        # params for rotation
        angle = (Ref_props[0].orientation -
                 Reg_props[0].orientation) / np.pi * 180
        center = (Reg_props[0].centroid[1], Reg_props[0].centroid[0])

        # params for translation
        delta_r = Reg_props[0].centroid[0] - Ref_props[0].centroid[0]
        delta_c = Reg_props[0].centroid[1] - Ref_props[0].centroid[1]
        delta_y, delta_x = delta_r, delta_c

        regi_params_array_2d[i] = np.array(
            [center[0], center[1], angle, delta_x, delta_y])

        print("Get params NO.%d is done!" % i)

    # """
    # ~~~~~~~~~~~~~~~~Ploynomial fitting the rotation~~~~~~~~~~~~~~~~
    # """

    index = range(len(regi_params_array_2d))

    angle_raw = np.array(regi_params_array_2d[:, 2])
    poly_params1 = np.polyfit(index, angle_raw, poly_deg)
    poly_params2 = ransac_polyfit(index,
                                  angle_raw,
                                  poly_deg,
                                  min_sample_num=len(index) // 2,
                                  residual_thres=0.1,
                                  max_trials=300)
    p1 = np.poly1d(poly_params1)
    p2 = np.poly1d(poly_params2)
    angle_fit1 = p1(index) * rotation_multplier
    angle_fit2 = p2(index) * rotation_multplier
    if use_ransac:
        regi_params_array_2d[:, 2] = np.array(angle_fit2)
    else:
        regi_params_array_2d[:, 2] = np.array(angle_fit1)

    regi_params_array_2d[:,
                         3] = regi_params_array_2d[:,
                                                   3] * translation_multiplier
    regi_params_array_2d[:,
                         4] = regi_params_array_2d[:,
                                                   4] * translation_multiplier

    # """
    # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~Diagnostic~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
    # """

    if diagnostic:
        fig, ax = plt.subplots(1, 2, figsize=(12, 6))
        index = np.array(index) / frame_rate
        ax[0].plot(index, angle_raw, '.-r', label='RAW')
        ax[0].plot(index, angle_fit1, '-k', label='POLY')
        ax[0].plot(index, angle_fit2, '-b', label='RANSAC')
        ax[0].legend(loc='upper right')

        if show_trace:
            Ref = adjust_gamma(Ref, gain=1)
            trace = mark_boundaries(Ref, Reg_mask)
            ax[1].imshow(trace, cmap='gray')
        else:
            ax[1].imshow(Reg_mask, cmap='gray')

        anno_ellipse(ax[1], Ref_props)
        anno_ellipse(ax[1], Reg_props)

        format_ax(ax[0],
                  xlabel=r'$\mathbf{Time (s)}$',
                  ylabel=r'$\mathbf{\Delta\theta (rad)}$',
                  label_fontsize=15,
                  label_fontweight='bold',
                  tklabel_fontsize=13,
                  tklabel_fontweight='bold',
                  ax_is_box=True)

        ax[1].set_xticks([])
        ax[1].set_yticks([])
        plt.show()

    return regi_params_array_2d
Ejemplo n.º 46
0
def draw_lines(img, lines, color=[255, 0, 0], thickness=2):
    """
    NOTE: this is the function you might want to use as a starting point once you want to 
    average/extrapolate the line segments you detect to map out the full
    extent of the lane (going from the result shown in raw-lines-example.mp4
    to that shown in P1_example.mp4).  
    
    Think about things like separating line segments by their 
    slope ((y2-y1)/(x2-x1)) to decide which segments are part of the left
    line vs. the right line.  Then, you can average the position of each of 
    the lines and extrapolate to the top and bottom of the lane.
    
    This function draws `lines` with `color` and `thickness`.    
    Lines are drawn on the image inplace (mutates the image).
    If you want to make the lines semi-transparent, think about combining
    this function with the weighted_img() function below
    """
    # breakpoint()

    # y_size = img.shape[0]
    original_image = img.copy()
    y_size = int(img.shape[0])
    x_size = img.shape[1]

    #min_y = y_size * (3/5)
    min_y = int(y_size * (3/5))

    def _return_slope(x0, y0, x1, y1):
        return ((y1 - y0)/(x1 - x0))

    right_lines, left_lines = [], []

    for line in lines:
        # breakpoint()
        line_slope = _return_slope(*line.T)
        target = left_lines if line_slope > 0 else right_lines
        target.append(line)

    if (not left_lines) or (not right_lines):
        return img

    def _break_into_x_y(lines):
        x_coordinates = []
        y_coordinate = []
        for line in lines:
            for x0, y0, x1, y1 in line:
                x_coordinates.extend([x0, x1])
                y_coordinate.extend([y0, y1])

        return x_coordinates, y_coordinate

    right_x_coordinates, right_y_coordinates = _break_into_x_y(right_lines)
    left_x_coordinates, left_y_coordinates = _break_into_x_y(left_lines)

    line_right = np.poly1d(np.polyfit(right_y_coordinates, right_x_coordinates, deg=1))
    line_left = np.poly1d(np.polyfit(left_y_coordinates, left_x_coordinates, deg=1))

    # Regression evaluation
    left_x0_coordinate = int(line_left(y_size))
    left_x1_coordinate = int(line_left(min_y))


    right_x0_coordinate = int(line_right(y_size))
    right_x1_coordinate = int(line_right(min_y))


    # breakpoint()
    cv2.line(img, (left_x0_coordinate, y_size), (left_x1_coordinate, min_y), color, thickness)
    cv2.line(img, (right_x0_coordinate, y_size), (right_x1_coordinate, min_y), color, thickness)

    weighted_img(img, original_image)




    return None
Ejemplo n.º 47
0
    #print(xfin[:100])
    J0 = pickle.load(op)
    #successcheck =  pickle.load(op)
    maxmomentum = pickle.load(op)

    #magnitude[magnitude>0.0]=-10.0

    x = np.array([
        0.0, 0.0213, 0.0260, 0.0307, 0.0587, 0.1053, 0.1333, 0.1660, 0.1846,
        0.1893, 0.2080, 0.1800, 0.2033, 0.1660
    ])
    y = np.array([
        0.0, 0.1446, 0.2893, 0.4572, 0.4619, 0.5319, 0.6018, 0.6112, 0.6765,
        0.7278, 0.7698, 0.9191, 1.1198, 1.1804
    ]) * -1
    z = np.polyfit(x, y, 7)

    plt.figure(1)
    p = np.poly1d(z)
    plt.plot(x, y)
    plt.scatter(x, y)

    xc = np.array([
        0.19 - 1.03j, 0.15 - 1.05j, 0.1 - 1.08j, 0.11 - 1.12j, 0.14 - 1.15j,
        0.17 - 1.18j
    ])
    plt.plot(real(xc), imag(xc), color='g')
    plt.scatter(real(xc), imag(xc), color='g')
    print('Hi')

    plotvalue = yfinbar  #exp(1j*phaseangle)*magnitude
Ejemplo n.º 48
0
    def form_table(self):

        # 將共整合係數標準化,此權重為資金權重,因此必須依股價高低轉為張數權重。
        for i in range(len(self.name)):

            total = abs(self.weight.w1[i]) + abs(self.weight.w2[i])

            self.weight.w2[i] = (self.weight.w2[i] / total)
            self.weight.w1[i] = (self.weight.w1[i] / total)

        con = pd.concat([self.name, self.select_model, self.weight], axis=1)

        #print("共整合係數標準化 done in " + str(end - start))
        #------------------------------------------------------------------------------
        #計算spread序列,做單根檢定,並刪除非定態spread序列

        spread = np.zeros((len(self.data), len(con)))

        for i in range(len(con)):
            spread[:, i] = con.w1[i] * self.data[
                con.stock1[i]] + con.w2[i] * self.data[con.stock2[i]]

        self.spread = pd.DataFrame(spread)

        #print("刪除非定態spread序列 done in " + str(end - start))
        #------------------------------------------------------------------------------
        # 計算信噪比 ( spread )------------------------------

        for i in range(len(self.spread.T)):

            y = self.spread.iloc[:, i]
            self.s_n_r.append(snr(y, 100))

        self.s_n_r = pd.DataFrame(self.s_n_r)
        self.s_n_r.columns = ["snr"]

        #print("計算信噪比 done in " + str(end - start))
        #------------------------------------------------------------------------------
        # 計算過零率 ( spread )------------------------------

        #pool = Pool(processes=16)
        Boot = 500

        for j in range(len(self.spread.T)):
            y = self.spread.iloc[:, j]
            #t = pool.apply_async(zcr, (y,Boot,con.stock1[j],con.stock2[j],),callback=self.append_zcr_result)
            self.z_c_r.append(zcr(y, Boot))
        #pool.close()
        #pool.join()

        #self.stock1_name = pd.DataFrame(self.stock1_name) ; self.stock1_name.columns = ["stock1"]
        #self.stock2_name = pd.DataFrame(self.stock2_name) ; self.stock2_name.columns = ["stock2"]
        self.z_c_r = pd.DataFrame(self.z_c_r)
        self.z_c_r.columns = ["zcr"]

        #mix = pd.concat([self.stock1_name,self.stock2_name,self.z_c_r],axis=1)

        #print("計算過零率 done in " + str(end - start))
        #------------------------------------------------------------------------------
        # 開倉門檻and平倉門檻and偏度--------------------------------------

        for i in range(len(self.spread.T)):

            y = self.spread.iloc[:, i]

            # 有時間趨勢項的模型必須分開計算
            if con.model_type[i] == 'model4':

                x = np.arange(0, len(y))
                b1, b0 = np.polyfit(x, y, 1)

                trend_line = x * b1 + b0
                y = y - trend_line

                # 將spread消除趨勢項後,計算mu與std
                self.ave.append(np.mean(y))
                self.std.append(np.std(y))
                self.ske.append(skew(y))

            else:

                self.ave.append(np.mean(y))
                self.std.append(np.std(y))
                self.ske.append(skew(y))

        self.ave = pd.DataFrame(self.ave)
        self.ave.columns = ["mu"]
        self.std = pd.DataFrame(self.std)
        self.std.columns = ["stdev"]
        self.ske = pd.DataFrame(self.ske)
        self.ske.columns = ["skewness"]
        #print("開倉門檻and平倉門檻 done in " + str(end - start))
        #------------------------------------------------------------------------------
        # 整理表格
        #start = datetime.now()

        #con = pd.concat([con,self.s_n_r],axis=1)

        #con = pd.merge( con , mix , on=["stock1","stock2"] , how="outer" )

        con = pd.concat(
            [con, self.s_n_r, self.z_c_r, self.ave, self.std, self.ske],
            axis=1)

        #end = datetime.now()
        del self.s_n_r
        del self.z_c_r
        del self.ave
        del self.std
        del self.ske
        del self.select_model
        del self.weight
        del self.name
        #print("整理表格 done in " + str(end - start))
        #print(con)
        return con
def plot_save_superposition(which_cell, expDir, use_mod_resp=0, fitType=2, excType=1, useHPCfit=1, conType=None, lgnFrontEnd=None, force_full=1, f1_expCutoff=2, to_save=1):

  if use_mod_resp == 2:
    rvcAdj   = -1; # this means vec corrected F1, not phase adjustment F1...
    _applyLGNtoNorm = 0; # don't apply the LGN front-end to the gain control weights
    recenter_norm = 1;
    newMethod = 1; # yes, use the "new" method for mrpt (not that new anymore, as of 21.03)
    lossType = 1; # sqrt
    _sigmoidSigma = 5;

  basePath = os.getcwd() + '/'
  if 'pl1465' in basePath or useHPCfit:
    loc_str = 'HPC';
  else:
    loc_str = '';

  rvcName = 'rvcFits%s_220531' % loc_str if expDir=='LGN/' else 'rvcFits%s_220609' % loc_str
  rvcFits = None; # pre-define this as None; will be overwritten if available/needed
  if expDir == 'altExp/': # we don't adjust responses there...
    rvcName = None;
  dFits_base = 'descrFits%s_220609' % loc_str if expDir=='LGN/' else 'descrFits%s_220631' % loc_str
  if use_mod_resp == 1:
    rvcName = None; # Use NONE if getting model responses, only
    if excType == 1:
      fitBase = 'fitList_200417';
    elif excType == 2:
      fitBase = 'fitList_200507';
    lossType = 1; # sqrt
    fitList_nm = hf.fitList_name(fitBase, fitType, lossType=lossType);
  elif use_mod_resp == 2:
    rvcName = None; # Use NONE if getting model responses, only
    if excType == 1:
      fitBase = 'fitList%s_210308_dG' % loc_str
      if recenter_norm:
        #fitBase = 'fitList%s_pyt_210312_dG' % loc_str
        fitBase = 'fitList%s_pyt_210331_dG' % loc_str
    elif excType == 2:
      fitBase = 'fitList%s_pyt_210310' % loc_str
      if recenter_norm:
        #fitBase = 'fitList%s_pyt_210312' % loc_str
        fitBase = 'fitList%s_pyt_210331' % loc_str
    fitList_nm = hf.fitList_name(fitBase, fitType, lossType=lossType, lgnType=lgnFrontEnd, lgnConType=conType, vecCorrected=-rvcAdj);

  # ^^^ EDIT rvc/descrFits/fitList names here; 

  ############
  # Before any plotting, fix plotting paramaters
  ############
  plt.style.use('https://raw.githubusercontent.com/paul-levy/SF_diversity/master/paul_plt_style.mplstyle');
  from matplotlib import rcParams
  rcParams['font.size'] = 20;
  rcParams['pdf.fonttype'] = 42 # should be 42, but there are kerning issues
  rcParams['ps.fonttype'] = 42 # should be 42, but there are kerning issues
  rcParams['lines.linewidth'] = 2.5;
  rcParams['axes.linewidth'] = 1.5;
  rcParams['lines.markersize'] = 8; # this is in style sheet, just being explicit
  rcParams['lines.markeredgewidth'] = 0; # no edge, since weird tings happen then

  rcParams['xtick.major.size'] = 15
  rcParams['xtick.minor.size'] = 5; # no minor ticks
  rcParams['ytick.major.size'] = 15
  rcParams['ytick.minor.size'] = 0; # no minor ticks

  rcParams['xtick.major.width'] = 2
  rcParams['xtick.minor.width'] = 2;
  rcParams['ytick.major.width'] = 2
  rcParams['ytick.minor.width'] = 0

  rcParams['font.style'] = 'oblique';
  rcParams['font.size'] = 20;

  ############
  # load everything
  ############
  dataListNm = hf.get_datalist(expDir, force_full=force_full);
  descrFits_f0 = None;
  dLoss_num = 2; # see hf.descrFit_name/descrMod_name/etc for details
  if expDir == 'LGN/':
    rvcMod = 0; 
    dMod_num = 1;
    rvcDir = 1;
    vecF1 = -1;
  else:
    rvcMod = 1; # i.e. Naka-rushton (1)
    dMod_num = 3; # d-dog-s
    rvcDir = None; # None if we're doing vec-corrected
    if expDir == 'altExp/':
      vecF1 = 0;
    else:
      vecF1 = 1;

  dFits_mod = hf.descrMod_name(dMod_num)
  descrFits_name = hf.descrFit_name(lossType=dLoss_num, descrBase=dFits_base, modelName=dFits_mod, phAdj=1 if vecF1==-1 else None);

  ## now, let it run
  dataPath = basePath + expDir + 'structures/'
  save_loc = basePath + expDir + 'figures/'
  save_locSuper = save_loc + 'superposition_220713/'
  if use_mod_resp == 1:
    save_locSuper = save_locSuper + '%s/' % fitBase

  dataList = hf.np_smart_load(dataPath + dataListNm);
  print('Trying to load descrFits at: %s' % (dataPath + descrFits_name));
  descrFits = hf.np_smart_load(dataPath + descrFits_name);
  if use_mod_resp == 1 or use_mod_resp == 2:
    fitList = hf.np_smart_load(dataPath + fitList_nm);
  else:
    fitList = None;

  if not os.path.exists(save_locSuper):
    os.makedirs(save_locSuper)

  cells = np.arange(1, 1+len(dataList['unitName']))

  zr_rm = lambda x: x[x>0];
  # more flexible - only get values where x AND z are greater than some value "gt" (e.g. 0, 1, 0.4, ...)
  zr_rm_pair = lambda x, z, gt: [x[np.logical_and(x>gt, z>gt)], z[np.logical_and(x>gt, z>gt)]];
  # zr_rm_pair = lambda x, z: [x[np.logical_and(x>0, z>0)], z[np.logical_and(x>0, z>0)]] if np.logical_and(x!=[], z!=[])==True else [], [];

  # here, we'll save measures we are going use for analysis purpose - e.g. supperssion index, c50
  curr_suppr = dict();

  ############
  ### Establish the plot, load cell-specific measures
  ############
  nRows, nCols = 6, 2;
  cellName = dataList['unitName'][which_cell-1];
  expInd = hf.get_exp_ind(dataPath, cellName)[0]
  S = hf.np_smart_load(dataPath + cellName + '_sfm.npy')
  expData = S['sfm']['exp']['trial'];

  # 0th, let's load the basic tuning characterizations AND the descriptive fit
  try:
    dfit_curr = descrFits[which_cell-1]['params'][0,-1,:]; # single grating, highest contrast
  except:
    dfit_curr = None;
  # - then the basics
  try:
    basic_names, basic_order = dataList['basicProgName'][which_cell-1], dataList['basicProgOrder']
    basics = hf.get_basic_tunings(basic_names, basic_order);
  except:
    try:
      # we've already put the basics in the data structure... (i.e. post-sorting 2021 data)
      basic_names = ['','','','',''];
      basic_order = ['rf', 'sf', 'tf', 'rvc', 'ori']; # order doesn't matter if they are already loaded
      basics = hf.get_basic_tunings(basic_names, basic_order, preProc=S, reducedSave=True)
    except:
      basics = None;

  ### TEMPORARY: save the "basics" in curr_suppr; should live on its own, though; TODO
  curr_suppr['basics'] = basics;

  try:
    oriBW, oriCV = basics['ori']['bw'], basics['ori']['cv'];
  except:
    oriBW, oriCV = np.nan, np.nan;
  try:
    tfBW = basics['tf']['tfBW_oct'];
  except:
    tfBW = np.nan;
  try:
    suprMod = basics['rfsize']['suprInd_model'];
  except:
    suprMod = np.nan;
  try:
    suprDat = basics['rfsize']['suprInd_data'];
  except:
    suprDat = np.nan;

  try:
    cellType = dataList['unitType'][which_cell-1];
  except:
    # TODO: note, this is dangerous; thus far, only V1 cells don't have 'unitType' field in dataList, so we can safely do this
    cellType = 'V1';


  ############
  ### compute f1f0 ratio, and load the corresponding F0 or F1 responses
  ############
  f1f0_rat = hf.compute_f1f0(expData, which_cell, expInd, dataPath, descrFitName_f0=descrFits_f0)[0];
  curr_suppr['f1f0'] = f1f0_rat;
  respMeasure = 1 if f1f0_rat > 1 else 0;

  if vecF1 == 1:
    # get the correct, adjusted F1 response
    if expInd > f1_expCutoff and respMeasure == 1:
      respOverwrite = hf.adjust_f1_byTrial(expData, expInd);
    else:
      respOverwrite = None;

  if (respMeasure == 1 or expDir == 'LGN/') and expDir != 'altExp/' : # i.e. if we're looking at a simple cell, then let's get F1
    if vecF1 == 1:
      spikes_byComp = respOverwrite
      # then, sum up the valid components per stimulus component
      allCons = np.vstack(expData['con']).transpose();
      blanks = np.where(allCons==0);
      spikes_byComp[blanks] = 0; # just set it to 0 if that component was blank during the trial
    else:
      if rvcName is not None:
        try:
          rvcFits = hf.get_rvc_fits(dataPath, expInd, which_cell, rvcName=rvcName, rvcMod=rvcMod, direc=rvcDir, vecF1=vecF1);
        except:
          rvcFits = None;
      else:
        rvcFits = None
      spikes_byComp = hf.get_spikes(expData, get_f0=0, rvcFits=rvcFits, expInd=expInd);
    spikes = np.array([np.sum(x) for x in spikes_byComp]);
    rates = True if vecF1 == 0 else False; # when we get the spikes from rvcFits, they've already been converted into rates (in hf.get_all_fft)
    baseline = None; # f1 has no "DC", yadig?
  else: # otherwise, if it's complex, just get F0
    respMeasure = 0;
    spikes = hf.get_spikes(expData, get_f0=1, rvcFits=None, expInd=expInd);
    rates = False; # get_spikes without rvcFits is directly from spikeCount, which is counts, not rates!
    baseline = hf.blankResp(expData, expInd)[0]; # we'll plot the spontaneous rate
    # why mult by stimDur? well, spikes are not rates but baseline is, so we convert baseline to count (i.e. not rate, too)
    spikes = spikes - baseline*hf.get_exp_params(expInd).stimDur; 

  #print('###\nGetting spikes (data): rates? %d\n###' % rates);
  _, _, _, respAll = hf.organize_resp(spikes, expData, expInd, respsAsRate=rates); # only using respAll to get variance measures
  resps_data, stimVals, val_con_by_disp, _, _ = hf.tabulate_responses(expData, expInd, overwriteSpikes=spikes, respsAsRates=rates, modsAsRate=rates);

  if fitList is None:
    resps = resps_data; # otherwise, we'll still keep resps_data for reference
  elif fitList is not None: # OVERWRITE the data with the model spikes!
    if use_mod_resp == 1:
      curr_fit = fitList[which_cell-1]['params'];
      modResp = mod_resp.SFMGiveBof(curr_fit, S, normType=fitType, lossType=lossType, expInd=expInd, cellNum=which_cell, excType=excType)[1];
      if f1f0_rat < 1: # then subtract baseline..
        modResp = modResp - baseline*hf.get_exp_params(expInd).stimDur; 
      # now organize the responses
      resps, stimVals, val_con_by_disp, _, _ = hf.tabulate_responses(expData, expInd, overwriteSpikes=modResp, respsAsRates=False, modsAsRate=False);
    elif use_mod_resp == 2: # then pytorch model!
      resp_str = hf_sf.get_resp_str(respMeasure)
      curr_fit = fitList[which_cell-1][resp_str]['params'];
      model = mrpt.sfNormMod(curr_fit, expInd=expInd, excType=excType, normType=fitType, lossType=lossType, lgnFrontEnd=lgnFrontEnd, newMethod=newMethod, lgnConType=conType, applyLGNtoNorm=_applyLGNtoNorm)
      ### get the vec-corrected responses, if applicable
      if expInd > f1_expCutoff and respMeasure == 1:
        respOverwrite = hf.adjust_f1_byTrial(expData, expInd);
      else:
        respOverwrite = None;

      dw = mrpt.dataWrapper(expData, respMeasure=respMeasure, expInd=expInd, respOverwrite=respOverwrite); # respOverwrite defined above (None if DC or if expInd=-1)
      modResp = model.forward(dw.trInf, respMeasure=respMeasure, sigmoidSigma=_sigmoidSigma, recenter_norm=recenter_norm).detach().numpy();

      if respMeasure == 1: # make sure the blank components have a zero response (we'll do the same with the measured responses)
        blanks = np.where(dw.trInf['con']==0);
        modResp[blanks] = 0;
        # next, sum up across components
        modResp = np.sum(modResp, axis=1);
      # finally, make sure this fills out a vector of all responses (just have nan for non-modelled trials)
      nTrialsFull = len(expData['num']);
      modResp_full = np.nan * np.zeros((nTrialsFull, ));
      modResp_full[dw.trInf['num']] = modResp;

      if respMeasure == 0: # if DC, then subtract baseline..., as determined from data (why not model? we aren't yet calc. response to no stim, though it can be done)
        modResp_full = modResp_full - baseline*hf.get_exp_params(expInd).stimDur;

      # TODO: This is a work around for which measures are in rates vs. counts (DC vs F1, model vs data...)
      stimDur = hf.get_exp_params(expInd).stimDur;
      asRates = False;
      #divFactor = stimDur if asRates == 0 else 1;
      #modResp_full = np.divide(modResp_full, divFactor);
      # now organize the responses
      resps, stimVals, val_con_by_disp, _, _ = hf.tabulate_responses(expData, expInd, overwriteSpikes=modResp_full, respsAsRates=asRates, modsAsRate=asRates);

  predResps = resps[2];

  respMean = resps[0]; # equivalent to resps[0];
  respStd = np.nanstd(respAll, -1); # take std of all responses for a given condition
  # compute SEM, too
  findNaN = np.isnan(respAll);
  nonNaN  = np.sum(findNaN == False, axis=-1);
  respSem = np.nanstd(respAll, -1) / np.sqrt(nonNaN);

  ############
  ### first, fit a smooth function to the overall pred V measured responses
  ### --- from this, we can measure how each example superposition deviates from a central tendency
  ### --- i.e. the residual relative to the "standard" input:output relationship
  ############
  all_resps = respMean[1:, :, :].flatten() # all disp>0
  all_preds = predResps[1:, :, :].flatten() # all disp>0
  # a model which allows negative fits
  #         myFit = lambda x, t0, t1, t2: t0 + t1*x + t2*x*x;
  #         non_nan = np.where(~np.isnan(all_preds)); # cannot fit negative values with naka-rushton...
  #         fitz, _ = opt.curve_fit(myFit, all_preds[non_nan], all_resps[non_nan], p0=[-5, 10, 5], maxfev=5000)
  # naka rushton
  myFit = lambda x, g, expon, c50: hf.naka_rushton(x, [0, g, expon, c50]) 
  non_neg = np.where(all_preds>0) # cannot fit negative values with naka-rushton...
  try:
    if use_mod_resp == 1: # the reference will ALWAYS be the data -- redo the above analysis for data
      predResps_data = resps_data[2];
      respMean_data = resps_data[0];
      all_resps_data = respMean_data[1:, :, :].flatten() # all disp>0
      all_preds_data = predResps_data[1:, :, :].flatten() # all disp>0
      non_neg_data = np.where(all_preds_data>0) # cannot fit negative values with naka-rushton...
      fitz, _ = opt.curve_fit(myFit, all_preds_data[non_neg_data], all_resps_data[non_neg_data], p0=[100, 2, 25], maxfev=5000)
    else:
      fitz, _ = opt.curve_fit(myFit, all_preds[non_neg], all_resps[non_neg], p0=[100, 2, 25], maxfev=5000)
    rel_c50 = np.divide(fitz[-1], np.max(all_preds[non_neg]));
  except:
    fitz = None;
    rel_c50 = -99;

  ############
  ### organize stimulus information
  ############
  all_disps = stimVals[0];
  all_cons = stimVals[1];
  all_sfs = stimVals[2];

  nCons = len(all_cons);
  nSfs = len(all_sfs);
  nDisps = len(all_disps);

  maxResp = np.maximum(np.nanmax(respMean), np.nanmax(predResps));
  # by disp
  clrs_d = cm.viridis(np.linspace(0,0.75,nDisps-1));
  lbls_d = ['disp: %s' % str(x) for x in range(nDisps)];
  # by sf
  val_sfs = hf.get_valid_sfs(S, disp=1, con=val_con_by_disp[1][0], expInd=expInd) # pick 
  clrs_sf = cm.viridis(np.linspace(0,.75,len(val_sfs)));
  lbls_sf = ['sf: %.2f' % all_sfs[x] for x in val_sfs];
  # by con
  val_con = all_cons;
  clrs_con = cm.viridis(np.linspace(0,.75,len(val_con)));
  lbls_con = ['con: %.2f' % x for x in val_con];

  ############
  ### create the figure
  ############
  fSuper, ax = plt.subplots(nRows, nCols, figsize=(10*nCols, 8*nRows))
  sns.despine(fig=fSuper, offset=10)

  allMix = [];
  allSum = [];

  ### plot reference tuning [row 1 (i.e. 2nd row)]
  ## on the right, SF tuning (high contrast)
  sfRef = hf.nan_rm(respMean[0, :, -1]); # high contrast tuning
  ax[1, 1].plot(all_sfs, sfRef, 'k-', marker='o', label='ref. tuning (d0, high con)', clip_on=False)
  ax[1, 1].set_xscale('log')
  ax[1, 1].set_xlim((0.1, 10));
  ax[1, 1].set_xlabel('sf (c/deg)')
  ax[1, 1].set_ylabel('response (spikes/s)')
  ax[1, 1].set_ylim((-5, 1.1*np.nanmax(sfRef)));
  ax[1, 1].legend(fontsize='x-small');

  #####
  ## then on the left, RVC (peak SF)
  #####
  sfPeak = np.argmax(sfRef); # stupid/simple, but just get the rvc for the max response
  v_cons_single = val_con_by_disp[0]
  rvcRef = hf.nan_rm(respMean[0, sfPeak, v_cons_single]);
  # now, if possible, let's also plot the RVC fit
  if rvcFits is not None:
    rvcFits = hf.get_rvc_fits(dataPath, expInd, which_cell, rvcName=rvcName, rvcMod=rvcMod);
    rel_rvc = rvcFits[0]['params'][sfPeak]; # we get 0 dispersion, peak SF
    plt_cons = np.geomspace(all_cons[0], all_cons[-1], 50);
    c50, pk = hf.get_c50(rvcMod, rel_rvc), rvcFits[0]['conGain'][sfPeak];
    c50_emp, c50_eval = hf.c50_empirical(rvcMod, rel_rvc); # determine c50 by optimization, numerical approx.
    if rvcMod == 0:
      rvc_mod = hf.get_rvc_model();
      rvcmodResp = rvc_mod(*rel_rvc, plt_cons);
    else: # i.e. mod=1 or mod=2
      rvcmodResp = hf.naka_rushton(plt_cons, rel_rvc);
    if baseline is not None:
      rvcmodResp = rvcmodResp - baseline; 
    ax[1, 0].plot(plt_cons, rvcmodResp, 'k--', label='rvc fit (c50=%.2f, gain=%0f)' %(c50, pk))
    # and save it
    curr_suppr['c50'] = c50; curr_suppr['conGain'] = pk;
    curr_suppr['c50_emp'] = c50_emp; curr_suppr['c50_emp_eval'] = c50_eval
  else:
    curr_suppr['c50'] = np.nan; curr_suppr['conGain'] = np.nan;
    curr_suppr['c50_emp'] = np.nan; curr_suppr['c50_emp_eval'] = np.nan;

  ax[1, 0].plot(all_cons[v_cons_single], rvcRef, 'k-', marker='o', label='ref. tuning (d0, peak SF)', clip_on=False)
  #         ax[1, 0].set_xscale('log')
  ax[1, 0].set_xlabel('contrast (%)');
  ax[1, 0].set_ylabel('response (spikes/s)')
  ax[1, 0].set_ylim((-5, 1.1*np.nanmax(rvcRef)));
  ax[1, 0].legend(fontsize='x-small');

  # plot the fitted model on each axis
  pred_plt = np.linspace(0, np.nanmax(all_preds), 100);
  if fitz is not None:
    ax[0, 0].plot(pred_plt, myFit(pred_plt, *fitz), 'r--', label='fit')
    ax[0, 1].plot(pred_plt, myFit(pred_plt, *fitz), 'r--', label='fit')

  for d in range(nDisps):
    if d == 0: # we don't care about single gratings!
      dispRats = [];
      continue; 
    v_cons = np.array(val_con_by_disp[d]);
    n_v_cons = len(v_cons);

    # plot split out by each contrast [0,1]
    for c in reversed(range(n_v_cons)):
      v_sfs = hf.get_valid_sfs(S, d, v_cons[c], expInd)
      for s in v_sfs:
        mixResp = respMean[d, s, v_cons[c]];
        allMix.append(mixResp);
        sumResp = predResps[d, s, v_cons[c]];
        allSum.append(sumResp);
  #      print('condition: d(%d), c(%d), sf(%d):: pred(%.2f)|real(%.2f)' % (d, v_cons[c], s, sumResp, mixResp))
        # PLOT in by-disp panel
        if c == 0 and s == v_sfs[0]:
          ax[0, 0].plot(sumResp, mixResp, 'o', color=clrs_d[d-1], label=lbls_d[d], clip_on=False)
        else:
          ax[0, 0].plot(sumResp, mixResp, 'o', color=clrs_d[d-1], clip_on=False)
        # PLOT in by-sf panel
        sfInd = np.where(np.array(v_sfs) == s)[0][0]; # will only be one entry, so just "unpack"
        try:
          if d == 1 and c == 0:
            ax[0, 1].plot(sumResp, mixResp, 'o', color=clrs_sf[sfInd], label=lbls_sf[sfInd], clip_on=False);
          else:
            ax[0, 1].plot(sumResp, mixResp, 'o', color=clrs_sf[sfInd], clip_on=False);
        except:
          pass;
          #pdb.set_trace();
        # plot baseline, if f0...
  #       if baseline is not None:
  #         [ax[0, i].axhline(baseline, linestyle='--', color='k', label='spon. rate') for i in range(2)];


    # plot averaged across all cons/sfs (i.e. average for the whole dispersion) [1,0]
    mixDisp = respMean[d, :, :].flatten();
    sumDisp = predResps[d, :, :].flatten();
    mixDisp, sumDisp = zr_rm_pair(mixDisp, sumDisp, 0.5);
    curr_rats = np.divide(mixDisp, sumDisp)
    curr_mn = geomean(curr_rats); curr_std = np.std(np.log10(curr_rats));
  #  curr_rat = geomean(np.divide(mixDisp, sumDisp));
    ax[2, 0].bar(d, curr_mn, yerr=curr_std, color=clrs_d[d-1]);
    ax[2, 0].set_yscale('log')
    ax[2, 0].set_ylim(0.1, 10);
  #  ax[2, 0].yaxis.set_ticks(minorticks)
    dispRats.append(curr_mn);
  #  ax[2, 0].bar(d, np.mean(np.divide(mixDisp, sumDisp)), color=clrs_d[d-1]);

    # also, let's plot the (signed) error relative to the fit
    if fitz is not None:
      errs = mixDisp - myFit(sumDisp, *fitz);
      ax[3, 0].bar(d, np.mean(errs), yerr=np.std(errs), color=clrs_d[d-1])
      # -- and normalized by the prediction output response
      errs_norm = np.divide(mixDisp - myFit(sumDisp, *fitz), myFit(sumDisp, *fitz));
      ax[4, 0].bar(d, np.mean(errs_norm), yerr=np.std(errs_norm), color=clrs_d[d-1])

    # and set some labels/lines, as needed
    if d == 1:
        ax[2, 0].set_xlabel('dispersion');
        ax[2, 0].set_ylabel('suppression ratio (linear)')
        ax[2, 0].axhline(1, ls='--', color='k')
        ax[3, 0].set_xlabel('dispersion');
        ax[3, 0].set_ylabel('mean (signed) error')
        ax[3, 0].axhline(0, ls='--', color='k')
        ax[4, 0].set_xlabel('dispersion');
        ax[4, 0].set_ylabel('mean (signed) error -- as frac. of fit prediction')
        ax[4, 0].axhline(0, ls='--', color='k')

    curr_suppr['supr_disp'] = dispRats;

  ### plot averaged across all cons/disps
  sfInds = []; sfRats = []; sfRatStd = []; 
  sfErrs = []; sfErrsStd = []; sfErrsInd = []; sfErrsIndStd = []; sfErrsRat = []; sfErrsRatStd = [];
  curr_errNormFactor = [];
  for s in range(len(val_sfs)):
    try: # not all sfs will have legitmate values;
      # only get mixtures (i.e. ignore single gratings)
      mixSf = respMean[1:, val_sfs[s], :].flatten();
      sumSf = predResps[1:, val_sfs[s], :].flatten();
      mixSf, sumSf = zr_rm_pair(mixSf, sumSf, 0.5);
      rats_curr = np.divide(mixSf, sumSf); 
      sfInds.append(s); sfRats.append(geomean(rats_curr)); sfRatStd.append(np.std(np.log10(rats_curr)));

      if fitz is not None:
        #curr_NR = myFit(sumSf, *fitz); # unvarnished
        curr_NR = np.maximum(myFit(sumSf, *fitz), 0.5); # thresholded at 0.5...

        curr_err = mixSf - curr_NR;
        sfErrs.append(np.mean(curr_err));
        sfErrsStd.append(np.std(curr_err))

        curr_errNorm = np.divide(mixSf - curr_NR, mixSf + curr_NR);
        sfErrsInd.append(np.mean(curr_errNorm));
        sfErrsIndStd.append(np.std(curr_errNorm))

        curr_errRat = np.divide(mixSf, curr_NR);
        sfErrsRat.append(np.mean(curr_errRat));
        sfErrsRatStd.append(np.std(curr_errRat));

        curr_normFactors = np.array(curr_NR)
        curr_errNormFactor.append(geomean(curr_normFactors[curr_normFactors>0]));
      else:
        sfErrs.append([]);
        sfErrsStd.append([]);
        sfErrsInd.append([]);
        sfErrsIndStd.append([]);
        sfErrsRat.append([]);
        sfErrsRatStd.append([]);
        curr_errNormFactor.append([]);
    except:
      pass

  # get the offset/scale of the ratio so that we can plot a rescaled/flipped version of
  # the high con/single grat tuning for reference...does the suppression match the response?
  offset, scale = np.nanmax(sfRats), np.nanmax(sfRats) - np.nanmin(sfRats);
  sfRef = hf.nan_rm(respMean[0, val_sfs, -1]); # high contrast tuning
  sfRefShift = offset - scale * (sfRef/np.nanmax(sfRef))
  ax[2,1].scatter(all_sfs[val_sfs][sfInds], sfRats, color=clrs_sf[sfInds], clip_on=False)
  ax[2,1].errorbar(all_sfs[val_sfs][sfInds], sfRats, sfRatStd, color='k', linestyle='-', clip_on=False, label='suppression tuning')
  #         ax[2,1].plot(all_sfs[val_sfs][sfInds], sfRats, 'k-', clip_on=False, label='suppression tuning')
  ax[2,1].plot(all_sfs[val_sfs], sfRefShift, 'k--', label='ref. tuning', clip_on=False)
  ax[2,1].axhline(1, ls='--', color='k')
  ax[2,1].set_xlabel('sf (cpd)')
  ax[2,1].set_xscale('log')
  ax[2,1].set_xlim((0.1, 10));
  #ax[2,1].set_xlim((np.min(all_sfs), np.max(all_sfs)));
  ax[2,1].set_ylabel('suppression ratio');
  ax[2,1].set_yscale('log')
  #ax[2,1].yaxis.set_ticks(minorticks)
  ax[2,1].set_ylim(0.1, 10);        
  ax[2,1].legend(fontsize='x-small');
  curr_suppr['supr_sf'] = sfRats;

  ### residuals from fit of suppression
  if fitz is not None:
    # mean signed error: and labels/plots for the error as f'n of SF
    ax[3,1].axhline(0, ls='--', color='k')
    ax[3,1].set_xlabel('sf (cpd)')
    ax[3,1].set_xscale('log')
    ax[3,1].set_xlim((0.1, 10));
    #ax[3,1].set_xlim((np.min(all_sfs), np.max(all_sfs)));
    ax[3,1].set_ylabel('mean (signed) error');
    ax[3,1].errorbar(all_sfs[val_sfs][sfInds], sfErrs, sfErrsStd, color='k', marker='o', linestyle='-', clip_on=False)
    # -- and normalized by the prediction output response + output respeonse
    val_errs = np.logical_and(~np.isnan(sfErrsRat), np.logical_and(np.array(sfErrsIndStd)>0, np.array(sfErrsIndStd) < 2));
    norm_subset = np.array(sfErrsInd)[val_errs];
    normStd_subset = np.array(sfErrsIndStd)[val_errs];
    ax[4,1].axhline(0, ls='--', color='k')
    ax[4,1].set_xlabel('sf (cpd)')
    ax[4,1].set_xscale('log')
    ax[4,1].set_xlim((0.1, 10));
    #ax[4,1].set_xlim((np.min(all_sfs), np.max(all_sfs)));
    ax[4,1].set_ylim((-1, 1));
    ax[4,1].set_ylabel('error index');
    ax[4,1].errorbar(all_sfs[val_sfs][sfInds][val_errs], norm_subset, normStd_subset, color='k', marker='o', linestyle='-', clip_on=False)
    # -- AND simply the ratio between the mixture response and the mean expected mix response (i.e. Naka-Rushton)
    # --- equivalent to the suppression ratio, but relative to the NR fit rather than perfect linear summation
    val_errs = np.logical_and(~np.isnan(sfErrsRat), np.logical_and(np.array(sfErrsRatStd)>0, np.array(sfErrsRatStd) < 2));
    rat_subset = np.array(sfErrsRat)[val_errs];
    ratStd_subset = np.array(sfErrsRatStd)[val_errs];
    #ratStd_subset = (1/np.log(2))*np.divide(np.array(sfErrsRatStd)[val_errs], rat_subset);
    ax[5,1].scatter(all_sfs[val_sfs][sfInds][val_errs], rat_subset, color=clrs_sf[sfInds][val_errs], clip_on=False)
    ax[5,1].errorbar(all_sfs[val_sfs][sfInds][val_errs], rat_subset, ratStd_subset, color='k', linestyle='-', clip_on=False, label='suppression tuning')
    ax[5,1].axhline(1, ls='--', color='k')
    ax[5,1].set_xlabel('sf (cpd)')
    ax[5,1].set_xscale('log')
    ax[5,1].set_xlim((0.1, 10));
    ax[5,1].set_ylabel('suppression ratio (wrt NR)');
    ax[5,1].set_yscale('log', basey=2)
  #         ax[2,1].yaxis.set_ticks(minorticks)
    ax[5,1].set_ylim(np.power(2.0, -2), np.power(2.0, 2));
    ax[5,1].legend(fontsize='x-small');
    # - compute the variance - and put that value on the plot
    errsRatVar = np.var(np.log2(sfErrsRat)[val_errs]);
    curr_suppr['sfRat_VAR'] = errsRatVar;
    ax[5,1].text(0.1, 2, 'var=%.2f' % errsRatVar);

    # compute the unsigned "area under curve" for the sfErrsInd, and normalize by the octave span of SF values considered
    val_errs = np.logical_and(~np.isnan(sfErrsRat), np.logical_and(np.array(sfErrsIndStd)>0, np.array(sfErrsIndStd) < 2));
    val_x = all_sfs[val_sfs][sfInds][val_errs];
    ind_var = np.var(np.array(sfErrsInd)[val_errs]);
    curr_suppr['sfErrsInd_VAR'] = ind_var;
    # - and put that value on the plot
    ax[4,1].text(0.1, -0.25, 'var=%.3f' % ind_var);
  else:
    curr_suppr['sfErrsInd_VAR'] = np.nan
    curr_suppr['sfRat_VAR'] = np.nan

  #########
  ### NOW, let's evaluate the derivative of the SF tuning curve and get the correlation with the errors
  #########
  mod_sfs = np.geomspace(all_sfs[0], all_sfs[-1], 1000);
  mod_resp = hf.get_descrResp(dfit_curr, mod_sfs, DoGmodel=dMod_num);
  deriv = np.divide(np.diff(mod_resp), np.diff(np.log10(mod_sfs)))
  deriv_norm = np.divide(deriv, np.maximum(np.nanmax(deriv), np.abs(np.nanmin(deriv)))); # make the maximum response 1 (or -1)
  # - then, what indices to evaluate for comparing with sfErr?
  errSfs = all_sfs[val_sfs][sfInds];
  mod_inds = [np.argmin(np.square(mod_sfs-x)) for x in errSfs];
  deriv_norm_eval = deriv_norm[mod_inds];
  # -- plot on [1, 1] (i.e. where the data is)
  ax[1,1].plot(mod_sfs, mod_resp, 'k--', label='fit (g)')
  ax[1,1].legend();
  # Duplicate "twin" the axis to create a second y-axis
  ax2 = ax[1,1].twinx();
  ax2.set_xscale('log'); # have to re-inforce log-scale?
  ax2.set_ylim([-1, 1]); # since the g' is normalized
  # make a plot with different y-axis using second axis object
  ax2.plot(mod_sfs[1:], deriv_norm, '--', color="red", label='g\'');
  ax2.set_ylabel("deriv. (normalized)",color="red")
  ax2.legend();
  sns.despine(ax=ax2, offset=10, right=False);
  # -- and let's plot rescaled and shifted version in [2,1]
  offset, scale = np.nanmax(sfRats), np.nanmax(sfRats) - np.nanmin(sfRats);
  derivShift = offset - scale * (deriv_norm/np.nanmax(deriv_norm));
  ax[2,1].plot(mod_sfs[1:], derivShift, 'r--', label='deriv(ref. tuning)', clip_on=False)
  ax[2,1].legend(fontsize='x-small');
  # - then, normalize the sfErrs/sfErrsInd and compute the correlation coefficient
  if fitz is not None:
    norm_sfErr = np.divide(sfErrs, np.nanmax(np.abs(sfErrs)));
    norm_sfErrInd = np.divide(sfErrsInd, np.nanmax(np.abs(sfErrsInd))); # remember, sfErrsInd is normalized per condition; this is overall
    non_nan = np.logical_and(~np.isnan(norm_sfErr), ~np.isnan(deriv_norm_eval))
    corr_nsf, corr_nsfN = np.corrcoef(deriv_norm_eval[non_nan], norm_sfErr[non_nan])[0,1], np.corrcoef(deriv_norm_eval[non_nan], norm_sfErrInd[non_nan])[0,1]
    curr_suppr['corr_derivWithErr'] = corr_nsf;
    curr_suppr['corr_derivWithErrsInd'] = corr_nsfN;
    ax[3,1].text(0.1, 0.25*np.nanmax(sfErrs), 'corr w/g\' = %.2f' % corr_nsf)
    ax[4,1].text(0.1, 0.25, 'corr w/g\' = %.2f' % corr_nsfN)
  else:
    curr_suppr['corr_derivWithErr'] = np.nan;
    curr_suppr['corr_derivWithErrsInd'] = np.nan;

  # make a polynomial fit
  try:
    hmm = np.polyfit(allSum, allMix, deg=1) # returns [a, b] in ax + b 
  except:
    hmm = [np.nan];
  curr_suppr['supr_index'] = hmm[0];

  for j in range(1):
    for jj in range(nCols):
      ax[j, jj].axis('square')
      ax[j, jj].set_xlabel('prediction: sum(components) (imp/s)');
      ax[j, jj].set_ylabel('mixture response (imp/s)');
      ax[j, jj].plot([0, 1*maxResp], [0, 1*maxResp], 'k--')
      ax[j, jj].set_xlim((-5, maxResp));
      ax[j, jj].set_ylim((-5, 1.1*maxResp));
      ax[j, jj].set_title('Suppression index: %.2f|%.2f' % (hmm[0], rel_c50))
      ax[j, jj].legend(fontsize='x-small');

  fSuper.suptitle('Superposition: %s #%d [%s; f1f0 %.2f; szSupr[dt/md] %.2f/%.2f; oriBW|CV %.2f|%.2f; tfBW %.2f]' % (cellType, which_cell, cellName, f1f0_rat, suprDat, suprMod, oriBW, oriCV, tfBW))

  if fitList is None:
    save_name = 'cell_%03d.pdf' % which_cell
  else:
    save_name = 'cell_%03d_mod%s.pdf' % (which_cell, hf.fitType_suffix(fitType))
  pdfSv = pltSave.PdfPages(str(save_locSuper + save_name));
  pdfSv.savefig(fSuper)
  pdfSv.close();

  #########
  ### Finally, add this "superposition" to the newest 
  #########

  if to_save:

    if fitList is None:
      from datetime import datetime
      suffix = datetime.today().strftime('%y%m%d')
      super_name = 'superposition_analysis_%s.npy' % suffix;
    else:
      super_name = 'superposition_analysis_mod%s.npy' % hf.fitType_suffix(fitType);

    pause_tm = 5*np.random.rand();
    print('sleeping for %d secs (#%d)' % (pause_tm, which_cell));
    time.sleep(pause_tm);

    if os.path.exists(dataPath + super_name):
      suppr_all = hf.np_smart_load(dataPath + super_name);
    else:
      suppr_all = dict();
    suppr_all[which_cell-1] = curr_suppr;
    np.save(dataPath + super_name, suppr_all);
  
  return curr_suppr;
Ejemplo n.º 50
0
    def makeModel(self):
        # step1 利用统计得到的速度信息得到基向量
        speeds = np.array(self.speeds)
        speedDirs = speeds[:, 3:4]
        axis = getAxis(speedDirs)
        axis0 = axis[0]
        axis1 = axis[1]
        mainAxis = axis0 if abs(axis0) > abs(axis1) else axis1
        secondAxis = axis0 if abs(axis0) < abs(axis1) else axis1
        if mainAxis > 0:
            mainAxis -= math.pi / 2
        print('主轴倾角:' + str(mainAxis) + ',辅轴倾角:' + str(secondAxis))
        if self.revise:
            transformMat, invTransformMat = getTransformMat([mainAxis, secondAxis])
        else:
            mainAxis = -math.pi/2
            secondAxis = 0
            transformMat, invTransformMat = getTransformMat([-math.pi/2, 0])

        # step2 算出特征路径,对数据进行过滤和分类
        importPaths = []
        longPaths = []
        for path in self.paths:
            path = straighten(path, stepTh=self.avgBbox*self.stopTh)  # 过滤掉停止点
            if len(path['zipDots']) > 20:
                path = pathFitting(path, invTransformMat)
                # 得到最大位移,进行方向判定:
                tMain = path['fun_dots'][:, 0]
                tD = tMain[-1] - tMain[1]
                if tD > 2 * self.avgBbox:
                    importPaths.append(path)
                longPaths.append(path)
        print('找到优质路径:' + str(len(importPaths)) + '条')
        if len(importPaths) < 50:
            print('找到的有价值的路径过少,可能导致建模失败')
        elif len(importPaths) == 0:
            print('没有找到任何有价值的路径,建模结束')
            exit()
        # step3 提取重要数据
        # 3.1 提取车道数
        startVar = []
        for path in importPaths:
            startVar.append(path['fun_dots'][0, 1])
        startVar = np.array(startVar)
        laneNum, _ = gaussianCumulative(startVar, self.avgBbox * self.sigma)
        print('车道数:' + str(laneNum))
        startK = KMeans(n_clusters=laneNum)
        startK.fit(startVar[:, np.newaxis])

        # 3.2 提取出口方向数:
        endVar = []
        for path in importPaths:
            endVar.append(path['fun_dots'][-1, 1])
        endVar = np.array(endVar)
        dirNum, _ = gaussianCumulative(endVar, self.avgBbox * self.sigma)
        if dirNum > 3:
            dirNum = 3
        print('出口方向数:' + str(dirNum))
        endK = KMeans(n_clusters=dirNum)
        endK.fit(endVar[:, np.newaxis])

        # step4 生成模型:
        # step3.1 主区域蒙版
        stopMap, speedMap = getMap(importPaths, self.mapSize, self.avgBbox / 100)

        # step3.2 各车道蒙版
        lanePaths = {}
        for i in range(laneNum):
            lanePaths[i] = []
        for path in importPaths:
            cls = startK.predict([[path['fun_dots'][0, 1]]])[0]
            lanePaths[cls].append(path)

        laneMaps = {}
        for i in lanePaths:
            tStopMap, tSpeedMap = getMap(lanePaths[i], self.mapSize, self.avgBbox / 100)
            laneMaps[i] = {
                'speedMap': tSpeedMap,
                'stopMap': tStopMap
            }

        # step3.3 得各车道停止点
        stopDots = []
        for i in laneMaps:
            tStopMap = laneMaps[i]['stopMap']
            index = np.argmax(tStopMap)
            y = index // tStopMap.shape[1]
            x = index - y * tStopMap.shape[1]
            # 加上偏移量:
            x = int(x + self.avgBbox * math.cos(mainAxis) * 0.5)
            y = int(y + self.avgBbox * math.sin(mainAxis) * 0.5)
            stopDots.append([x, y])
        # step3.4 得到停止线:
        stopLine = [0, 0]
        if len(stopDots) == 1:
            stopDot = stopDots[0]
            k = math.tan(secondAxis)
            b = stopDot[0] - k*stopDot[1]
            stopLine = [k, b]
        elif len(stopDots) > 1:
            stopDots = np.array(stopDots)
            ys = stopDots[:, 1]
            xs = stopDots[:, 0]
            arg = np.polyfit(xs, ys, 1)
            k = arg[0]
            b = arg[1]
            stopLine = [k, b]
        else:
            print('警告:未找到任何停止点!无法得到停止线')
        print('stop line: y = {}*x + {}'.format(stopLine[0], stopLine[1]))
        # 计算出入口可达矩阵
        reachableMat = np.zeros((int(laneNum), int(dirNum)))
        for path in importPaths:
            lane = startK.predict([[path['fun_dots'][0, 1]]])[0]
            dir = endK.predict([[path['fun_dots'][-1, 1]]])[0]
            reachableMat[lane][dir] += 1
        reachableMat = reachableMat > np.max(reachableMat)/10
        print('可达矩阵: ')
        print(reachableMat)

        # 储存:
        model = {
            'type': 'crossroad',
            'avg_bbox':self.avgBbox ,
            'main_axis': mainAxis,
            'second_axis': secondAxis,
            'transform_mat': transformMat,
            'inv_transform_mat': invTransformMat,
            'main_map': speedMap,
            'stop_line': stopLine,
            'lane_maps': laneMaps,
            'lane_classifier': startK,
            'dir_classifier': endK,
            'reachable_mat':reachableMat
        }
        with open(self.modelPath, 'wb') as f:
            pickle.dump(model, f)
    def lids_summary(self, lids, verbose=False):
        r'''Calculate summary statistics for LIDS
        Fit all LIDS-transformed bouts and calculate the mean period, the mean
        mri, the mean number of LIDS cycles and the dampening factor of the
        mean LIDS profile.
        Parameters
        ----------
        lids: list of pandas.Series
            Output data from LIDS transformation.
        verbose: bool, optional
            If set to True, print summary statistics.
            Default is False.
        Returns
        -------
        summary: dict
            Dictionary with the summary statistics.
        '''

        ilids = []  # LIDS profiles
        periods = []  # List of LIDS periods
        mris = []  # MRI indices
        ncycles = []  # Number of LIDS cycles/sleep bout

        for idx, s in enumerate(lids):
            # Fit LIDS data
            self.lids_fit(s, verbose=False)

            # Verify LIDS period
            period = self.lids_period(freq='s')

            # Calculate MRI
            mri = self.lids_mri(s)

            # Calculate the number of LIDS cycle (as sleep bout length/period):
            ncycle = s.index.values.ptp() / np.timedelta64(1, 's')
            ncycle /= period.astype(float)

            if verbose:
                print('-' * 20)
                print('Sleep bout nr {}'.format(idx))
                print('- Period: {!s}'.format(period))
                print('- MRI: {}'.format(mri))
                print('- Number of cycles: {}'.format(ncycle))

            # Rescale LIDS timeline to LIDS period
            rescaled_lids = self.lids_convert_to_internal_time(s)

            periods.append(period)
            mris.append(mri)
            ncycles.append(ncycle)
            ilids.append(rescaled_lids)

        # Create the mean LIDS profile
        lids_profile = reduce(
            (lambda x, y: x.add(y, fill_value=0)),
            ilids
        ) / len(ilids)

        # Fit mean LIDS profile with a pol0
        fit_params = np.polyfit(
            x=range(len(lids_profile.index)),
            y=lids_profile.values,
            deg=1
        )

        # LIDS summary
        summary = {}
        summary['Mean number of LIDS cycles'] = np.mean(ncycles)
        summary['Mean LIDS period (s)'] = np.mean(periods).astype(float)
        summary['Mean MRI'] = np.mean(mris)
        summary[
            'LIDS dampening factor (counts/{})'.format(self.freq)
        ] = fit_params[0]

        return summary
Ejemplo n.º 52
0
#drift velocity
def vel(time):
    if time > 20 and time < 100:
        return 0.5
    else:
        return -0.3


#potential
def potential(time):
    return 0.01 * (50 - price[time - 1])


#perform walk
while t < 2000:
    rand = randint(0, 1)
    if rand == 0:
        rand = -1
    else:
        rand = 1
    t = t + 1
    time.append(t)
    price.append(price[t - 1] + rand + potential(t))

#plot results as well as regression line
plt.plot(time, price)
linetool = np.polyfit(time, price, 1)
line = np.poly1d(linetool)
plt.plot(line(time))
plt.show()
Ejemplo n.º 53
0
def sliding_window(img, nwindows=15, margin=50, minpix=1, draw_windows=True):
    global left_a, left_b, left_c, right_a, right_b, right_c
    left_fit_ = np.empty(3)
    right_fit_ = np.empty(3)
    out_img = np.dstack((img, img, img)) * 255

    histogram = get_hist(img)
    # find peaks of left and right halves
    midpoint = int(histogram.shape[0] / 2)
    leftx_base = np.argmax(histogram[:midpoint])
    rightx_base = np.argmax(histogram[midpoint:]) + midpoint

    # Set height of windows
    window_height = np.int(img.shape[0] / nwindows)
    # Identify the x and y positions of all nonzero pixels in the image
    nonzero = img.nonzero()
    nonzeroy = np.array(nonzero[0])
    nonzerox = np.array(nonzero[1])
    # Current positions to be updated for each window
    leftx_current = leftx_base
    rightx_current = rightx_base

    # Create empty lists to receive left and right lane pixel indices
    left_lane_inds = []
    right_lane_inds = []

    # Step through the windows one by one
    for window in range(nwindows):
        # Identify window boundaries in x and y (and right and left)
        win_y_low = img.shape[0] - (window + 1) * window_height
        win_y_high = img.shape[0] - window * window_height
        win_xleft_low = leftx_current - margin
        win_xleft_high = leftx_current + margin
        win_xright_low = rightx_current - margin
        win_xright_high = rightx_current + margin
        # Draw the windows on the visualization image
        if draw_windows == True:
            cv2.rectangle(out_img, (win_xleft_low, win_y_low), (win_xleft_high, win_y_high),
                          (100, 255, 255), 1)
            cv2.rectangle(out_img, (win_xright_low, win_y_low), (win_xright_high, win_y_high),
                          (100, 255, 255), 1)
            # Identify the nonzero pixels in x and y within the window
        good_left_inds = ((nonzeroy >= win_y_low) & (nonzeroy < win_y_high) &
                          (nonzerox >= win_xleft_low) & (nonzerox < win_xleft_high)).nonzero()[0]
        good_right_inds = ((nonzeroy >= win_y_low) & (nonzeroy < win_y_high) &
                           (nonzerox >= win_xright_low) & (nonzerox < win_xright_high)).nonzero()[0]
        # Append these indices to the lists
        left_lane_inds.append(good_left_inds)
        right_lane_inds.append(good_right_inds)
        # If you found > minpix pixels, recenter next window on their mean position
        if len(good_left_inds) > minpix:
            leftx_current = np.int(np.mean(nonzerox[good_left_inds]))
        if len(good_right_inds) > minpix:
            rightx_current = np.int(np.mean(nonzerox[good_right_inds]))

    #        if len(good_right_inds) > minpix:
    #            rightx_current = np.int(np.mean([leftx_current +900, np.mean(nonzerox[good_right_inds])]))
    #        elif len(good_left_inds) > minpix:
    #            rightx_current = np.int(np.mean([np.mean(nonzerox[good_left_inds]) +900, rightx_current]))
    #        if len(good_left_inds) > minpix:
    #            leftx_current = np.int(np.mean([rightx_current -900, np.mean(nonzerox[good_left_inds])]))
    #        elif len(good_right_inds) > minpix:
    #            leftx_current = np.int(np.mean([np.mean(nonzerox[good_right_inds]) -900, leftx_current]))

    # Concatenate the arrays of indices
    left_lane_inds = np.concatenate(left_lane_inds)
    right_lane_inds = np.concatenate(right_lane_inds)

    # Extract left and right line pixel positions
    leftx = nonzerox[left_lane_inds]
    lefty = nonzeroy[left_lane_inds]
    rightx = nonzerox[right_lane_inds]
    righty = nonzeroy[right_lane_inds]

    if leftx.size and rightx.size:
        # Fit a second order polynomial to each
        left_fit = np.polyfit(lefty, leftx, 2)
        right_fit = np.polyfit(righty, rightx, 2)

        left_a.append(left_fit[0])
        left_b.append(left_fit[1])
        left_c.append(left_fit[2])

        right_a.append(right_fit[0])
        right_b.append(right_fit[1])
        right_c.append(right_fit[2])

        left_fit_[0] = np.mean(left_a[-10:])
        left_fit_[1] = np.mean(left_b[-10:])
        left_fit_[2] = np.mean(left_c[-10:])

        right_fit_[0] = np.mean(right_a[-10:])
        right_fit_[1] = np.mean(right_b[-10:])
        right_fit_[2] = np.mean(right_c[-10:])

        # Generate x and y values for plotting
        ploty = np.linspace(0, img.shape[0] - 1, img.shape[0])

        left_fitx = left_fit_[0] * ploty ** 2 + left_fit_[1] * ploty + left_fit_[2]
        right_fitx = right_fit_[0] * ploty ** 2 + right_fit_[1] * ploty + right_fit_[2]

        out_img[nonzeroy[left_lane_inds], nonzerox[left_lane_inds]] = [255, 0, 100]
        out_img[nonzeroy[right_lane_inds], nonzerox[right_lane_inds]] = [0, 100, 255]

        return out_img, (left_fitx, right_fitx), (left_fit_, right_fit_), ploty
    else:
        return img,(0,0),(0,0),0
Ejemplo n.º 54
0
def process_video(image):
    '''
    This function takes in an image either from a
    sequence of images or a single image and calls
    all methods responsible for undistorting,
    warping, and thresholding the image.
    '''

    img = np.copy(image)

    # This converts the image to HLS
    hsv = cv2.cvtColor(img, cv2.COLOR_RGB2HLS).astype(np.float)
    l_channel = hsv[:, :, 1]
    s_channel = hsv[:, :, 2]

    # This grabs the binary values
    gradx = abs_sobel_thresh(l_channel, sx_thresh=(7, 255))
    mag_binary = mag_thresh(s_channel, sobel_kernel=15, mag_thresh=(50, 255))
    dir_binary = dir_threshold(s_channel, sobel_kernel=21, thresh=(0, 1.3))
    # s_binary = color_threshold(s_channel, s_thresh=(170, 255)) --NOT IN USE--

    # This combines all the binary thresholds into one so that each can contribute its advantages
    combined = np.zeros_like(dir_binary)
    combined[((gradx == 1)) | ((mag_binary == 1) & (dir_binary == 1))] = 1

    # ############################## PERSPECTIVE TRANSFORM ############################## #

    # This reads in the saved camera matrix and distortion coefficients
    # These are the arrays calculated using cv2.calibrateCamera()
    dist_pickle = pickle.load(open("calibration_wide/19.p", "rb"))
    mtx = dist_pickle["mtx"]
    dist = dist_pickle["dist"]

    # This is the perspective transform method
    warped_im, undist, Minv = warp(combined, mtx, dist)

    # ############################## FIND LINES AND RADII ############################## #

    # This defines conversions in x and y from pixels space to meters
    ym_per_pix = 30 / 720  # meters per pixel in y dimension
    xm_per_pix = 3.7 / 700  # meters per pixel in x dimension

    def get_radius(left_fit, right_fit):
        '''
        This function determines the radii from the polyfit
        '''

        # This gets a new polyfit with pixel-meter conversions
        left_fit = np.polyfit(yaxis * ym_per_pix, leftx * xm_per_pix, 2)
        right_fit = np.polyfit(yaxis * ym_per_pix, rightx * xm_per_pix, 2)

        # This defines a y-value where we want radius of curvature
        # The maximum y-value is chosen, corresponding to the bottom of the image
        y_eval = np.max(ploty)
        left_curverad = (
            (1 + (2 * left_fit[0] * y_eval * ym_per_pix + left_fit[1])**2)**
            1.5) / np.absolute(2 * left_fit[0])
        right_curverad = (
            (1 + (2 * right_fit[0] * y_eval * ym_per_pix + right_fit[1])**2)**
            1.5) / np.absolute(2 * right_fit[0])
        return left_curverad, right_curverad

    # This finds the lines through sliding window search
    lines, yaxis, leftx, rightx, dfc = find_lines(warped_im)

    dfc = dfc * ym_per_pix * .01

    # Polyfit
    left_fit = np.polyfit(yaxis, leftx, 2)
    right_fit = np.polyfit(yaxis, rightx, 2)

    ploty = np.linspace(0, warped_im.shape[0] - 1, warped_im.shape[0])
    left_fitx = left_fit[0] * ploty**2 + left_fit[1] * ploty + left_fit[2]
    right_fitx = right_fit[0] * ploty**2 + right_fit[1] * ploty + right_fit[2]

    # This is the radius of curvature
    left_rad, right_rad = get_radius(leftx, rightx)

    # ############################## DRAW LINES ############################## #

    # This draws the lines on an image either of a sequence of images or on a single image.
    result = draw_lines(warped_im, image, left_fitx, right_fitx, ploty, Minv,
                        undist, [left_rad, right_rad], dfc)

    return result
Ejemplo n.º 55
0
def training_loop(hyperparameters):
    print(f"Starting training with hyperparameters: {hyperparameters}")
    save_path = hyperparameters["save_path"]
    load_path = hyperparameters["load_path"]

    # create the save path and save hyperparameter configuration
    if not os.path.exists(save_path):
        os.mkdir(save_path)
    else:
        a = input("Warning, Directory already exists. Dou want to continue?")
        if a not in ["Y","y"]:
            raise Exception("Path already exists, please start with another path.")

    with open(save_path+ "/parameters.json", "w") as f:
        json.dump(hyperparameters, f)

    # general configurations
    state_dim=18
    action_dim=4
    max_action=1
    iterations=hyperparameters["max_iterations"]
    batch_size=hyperparameters["batch_size"]
    max_episodes=hyperparameters["max_episodes"]
    train_mode = hyperparameters["train_mode"]
    closeness_factor=hyperparameters["closeness_factor"]
    c = closeness_factor

    # init the agent
    agent1 = TD3Agent([state_dim + action_dim, 256, 256, 1],
                        [state_dim, 256, 256, action_dim],
                        optimizer=hyperparameters["optimizer"],
                        policy_noise=hyperparameters["policy_noise"],
                        policy_noise_clip=hyperparameters["policy_noise_clip"],
                        gamma=hyperparameters["gamma"],
                        delay=hyperparameters["delay"],
                        tau=hyperparameters["tau"],
                        lr=hyperparameters["lr"],
                        max_action=max_action,
                        weight_decay=hyperparameters["weight_decay"])

    # load the agent if given
    loaded_state=False
    if load_path:
        agent1.load(load_path)
        loaded_state=True

    # define opponent
    if hyperparameters["self_play"]:
        agent2=agent1
    else:
        agent2 = h_env.BasicOpponent(weak=hyperparameters["weak_agent"])

    # load enviroment and replaybuffer
    replay_buffer = ReplayBuffer(state_dim, action_dim)

    if train_mode == "defense":
        env = h_env.HockeyEnv(mode=h_env.HockeyEnv.TRAIN_DEFENSE)
    elif train_mode == "shooting":
        env = h_env.HockeyEnv(mode=h_env.HockeyEnv.TRAIN_SHOOTING)
    else:
        env = h_env.HockeyEnv()


    # add figure to plot later
    if hyperparameters["plot_performance"]:
        fig, (ax_loss, ax_reward) = plt.subplots(2)
        ax_loss.set_xlim(0, max_episodes)
        ax_loss.set_ylim(0, 20)
        ax_reward.set_xlim(0, max_episodes)
        ax_reward.set_ylim(-30, 20)

    with HiddenPrints():
    # first sample enough data to start:
        obs_last = env.reset()
        for i in range(batch_size*100):
            a1 = env.action_space.sample()[:4] if not loaded_state else agent1.act(env.obs_agent_two())
            a2 = agent2.act(env.obs_agent_two())
            obs, r, d, info = env.step(np.hstack([a1,a2]))
            done = 1 if d else 0
            replay_buffer.add(obs_last, a1, obs, r, done)
            obs_last=obs
            if d:
                obs_last = env.reset()

    print("Finished collection of data prior to training")

    # tracking of performance
    episode_critic_loss=[]
    episode_rewards=[]
    win_count=[]
    if not os.path.isfile(save_path + "/performance.csv"):
        pd.DataFrame(data={"Episode_rewards":[], "Episode_critic_loss":[], "Win/Loss":[]}).to_csv(save_path + "/performance.csv", sep=",", index=False)

    # Then start training
    for episode_count in range(max_episodes+1):
        obs_last = env.reset()
        total_reward=0
        critic_loss=[]

        for i in range(iterations):
            # run the enviroment
            with HiddenPrints():
                with torch.no_grad():
                    a1 =  agent1.act(env.obs_agent_two()) + np.random.normal(loc=0, scale=hyperparameters["exploration_noise"], size=action_dim)
                a2 = agent2.act(env.obs_agent_two())
                obs, r, d, info = env.step(np.hstack([a1,a2]))
            total_reward+=r
            done = 1 if d else 0

            # mopify reward with cloeness to puck reward
            if hyperparameters["closeness_decay"]:
                c = closeness_factor *(1 - episode_count/max_episodes)
            newreward = r + c * info["reward_closeness_to_puck"] 

            # add to replaybuffer
            replay_buffer.add(obs_last, a1, obs, newreward, done)
            obs_last=obs
            
            # sample minibatch and train
            states, actions, next_states, reward, done = replay_buffer.sample(batch_size)
            loss = agent1.train(states, actions, next_states, reward, done)
            critic_loss.append(loss.detach().numpy())

            # if done, finish episode
            if d:
                episode_rewards.append(total_reward)
                episode_critic_loss.append(np.mean(critic_loss))
                win_count.append(info["winner"])
                print(f"Episode {episode_count} finished after {i} steps with a total reward of {total_reward}")
                
                # Online plotting
                if hyperparameters["plot_performance"] and episode_count>40 :
                    ax_loss.plot(list(range(-1, episode_count-29)), moving_average(episode_critic_loss, 30), 'r-')
                    ax_reward.plot(list(range(-1, episode_count-29)), moving_average(episode_rewards, 30), "r-")
                    plt.draw()
                    plt.pause(1e-17)

                break
        
        # Intermediate evaluation of win/loss and saving of model
        if episode_count % 500 ==0 and episode_count != 0:
                print(f"The agents win ratio in the last 500 episodes was {win_count[-500:].count(1)/500}")
                print(f"The agents loose ratio in the last 500 episodes was {win_count[-500:].count(-1)/500}")
                try:
                    agent1.save(save_path)
                    print("saved model")
                except Exception:
                    print("Saving Failed model failed")
                pd.DataFrame(data={"Episode_rewards": episode_rewards[-500:], "Episode_critic_loss": episode_critic_loss[-500:], "Win/Loss": win_count[-500:]}).to_csv(save_path + "/performance.csv", sep=",", index=False, mode="a", header=False)
                    
    print(f"Finished training with a final mean reward of {np.mean(episode_rewards[-500:])}")





    # plot the performance summary
    if hyperparameters["plot_performance_summary"]:
            try:
                fig, (ax1, ax2) = plt.subplots(2)
                x = list(range(len(episode_critic_loss)))
                coef = np.polyfit(x, episode_critic_loss,1)
                poly1d_fn = np.poly1d(coef)
                ax1.plot(episode_critic_loss)
                ax1.plot(poly1d_fn(list(range(len(episode_critic_loss)))))


                x = list(range(len(episode_rewards)))
                coef = np.polyfit(x, episode_rewards,1)
                poly1d_fn = np.poly1d(coef)
                ax2.plot(episode_rewards)
                ax2.plot(poly1d_fn(list(range(len(episode_rewards)))))
                fig.show()
                fig.savefig(save_path + "/performance.png", bbox_inches="tight")
            except:
                print("Failed saving figure")
                         **common_opts)
ax3.set_xlim(left=2, right=1e4)
ax3.set_ylim(bottom=0.13, top=99.87)
ax3.grid()
plt.yticks([0.5, 2, 10, 30, 50, 70, 90, 98, 99.5])

plt.tight_layout()
plt.title('Extracted data-Probability y scale')
plt.show()

# ### Fitting Polynomial Equation

# In[116]:

poly_order = 7  # Fitting Polynomial order
z = np.polyfit(R / 1000, CDF, poly_order)

# In[117]:

R / 1000

# In[118]:

z

# In[119]:

p = np.poly1d(z)

# In[120]:
Ejemplo n.º 57
0
    def fit_line(self, method='TLS', max_error=None):
        """
        Fit a line to the set of points of the object.

        Parameters
        ----------
        method : string
            The method used to fit the line. Options:
            - Ordinary Least Squares: 'OLS'
            - Total Least Squares: 'TLS'
        max_error : float or int
            The maximum error (average distance points to line) the
            fitted line is allowed to have. A ThresholdError will be
            raised if this max error is exceeded.

        Attributes
        ----------
        slope : float
            The slope of the fitted line.
        intercept : float
            The y-intercept of the fitted line.

        Raises
        ------
        NotImplementedError
            If a non-existing method is chosen.
        ThresholdError
            If the error of the fitted line (average distance points to
            line) exceeds the given max error.
        """
        if len(self.points) == 1:
            raise ValueError('Not enough points to fit a line.')
        elif len(self.points) == 2:
            dx, dy = np.diff(self.points, axis=0)[0]
            if dx == 0:
                self.a = 0
            else:
                self.a = dy / dx
            self.b = -1
            self.c = (np.mean(self.points[:, 1]) -
                      np.mean(self.points[:, 0]) * self.a)
        elif all(self.points[0, 0] == self.points[:, 0]):
            self.a = 1
            self.b = 0
            self.c = -self.points[0, 0]
        elif all(self.points[0, 1] == self.points[:, 1]):
            self.a = 0
            self.b = 1
            self.c = -self.points[0, 1]
        else:
            if method == 'OLS':
                self.a, self.c = np.polyfit(self.points[:, 0],
                                            self.points[:, 1], 1)
                self.b = -1
            elif method == 'TLS':
                _, eigenvectors = PCA(self.points)
                self.a = eigenvectors[1, 0] / eigenvectors[0, 0]
                self.b = -1
                self.c = (np.mean(self.points[:, 1]) -
                          np.mean(self.points[:, 0]) * self.a)
            else:
                raise NotImplementedError("Chosen method not available.")

            if max_error is not None:
                error = self.error()
                if error > max_error:
                    raise utils.error.ThresholdError(
                        "Could not fit a proper line. Error: {}".format(error)
                    )

        self._create_line_segment()
        latIndex1 = np.abs(SScapitalLatLon[0, c] -
                           latc).argmin()  # nearest lat pixel to factory
        lonIndex1 = np.abs(SScapitalLatLon[1, c] -
                           lonc).argmin()  # nearest lon pixel to factory

        lons = np.array([lonIndex0, lonIndex1])
        lats = np.array([latIndex0, latIndex1])
        iminlon = lons.argmin()
        imaxlon = lons.argmax()

        # x = each lon pixel between the cites
        x = np.zeros(shape=(abs(lonIndex1 - lonIndex0)))
        for i in range(abs(lonIndex1 - lonIndex0)):
            x[i] = lons[iminlon] + i

        m, b = np.polyfit([lons[iminlon], lons[imaxlon]],
                          [lats[iminlon], lats[imaxlon]], 1)
        yfit = m * x + b  # yfit = each (basically) lat pixel between the cities

        # add each pixel between the two cities
        for k in range(len(yfit)):
            costtmp = truckCostMap[int(np.round(yfit[k], 0)), int(x[k])]
            truckCostVector[f, c] += costtmp

        # average the costs
        truckCostVector[f, c] = truckCostVector[f, c] / len(yfit)
        print countrycosted[f], subsaharancountry[c], np.round(
            truckCostVector[f, c], 3)

plt.clf()
plt.imshow(truckCostVector, cmap=cm.jet)
plt.title('Trucking Cost ($/tonne*km)')
Ejemplo n.º 59
0
                ax2a.plot(pixels,
                          yCorrected,
                          color=colours[temperatureIndex],
                          label="%0.1fC" % measurementTemperature)

                pixelShiftPerFrame.append(
                    (solarLinePixelNoShift - minimumPixel))

        pixelShift.append(np.mean(pixelShiftPerFrame))
        measurementTemperatures.append(measurementTemperature)

    pixelShift = np.asfarray(pixelShift)
    measurementTemperatures = np.asfarray(measurementTemperatures)

    linearCoefficients = np.polyfit(measurementTemperatures, pixelShift, 1.0)

    print("temperature shift coefficients order %i =" % diffractionOrder,
          linearCoefficients)

    with open(os.path.join(BASE_DIRECTORY, "output.txt"), "a") as f:
        f.write("%i, %0.3f, %0.5f, %0.5f\n" %
                (diffractionOrder, minimumPixel, linearCoefficients[0],
                 linearCoefficients[1]))

    ax1a.legend()
    ticks = ax1a.get_xticks()
    ax1a.set_xticks(np.arange(ticks[0], ticks[-1], 1.0))
    ax1b.set_xticks(np.arange(ticks[0], ticks[-1], 1.0))
    ax1a.grid()
    ax1b.grid()
Ejemplo n.º 60
0
def _fit_polynomial(y, X, order=2):
    # Generating weights and model for polynomial function with a given degree
    y_predicted = np.polyval(np.polyfit(X, y, order), X)
    return y_predicted