Example #1
0
def exponential_fit(ac, use_function='single'):
    """Perform a single- or double- exponential fit on an autocorrelation curve.
    
    RETURNS
    yFit  - the y-values of the fit curve."""
    
    nsteps = ac.shape[0]
    if use_function == 'single':
        v0 = [0.0, 1.0 , 4000.]  # Initial guess [a0, a1, tau1] for a0 + a1*exp(-(x/tau1))
        popt, pcov = curve_fit(single_exp_decay, np.arange(nsteps), ac, p0=v0, maxfev=10000)  # ignore last bin, which has 0 counts
        yFit_data = single_exp_decay(np.arange(nsteps), popt[0], popt[1], popt[2])
        # print 'best-fit a0 = ', popt[0], '+/-', pcov[0][0]
        # print 'best-fit a1 = ', popt[1], '+/-', pcov[1][1]
        print 'best-fit tau1 = ', popt[2], '+/-', pcov[2][2]
    else:
        v0 = [0.0, 0.9, 0.1, 4000., 200.0]  # Initial guess [a0, a1,a2, tau1, tau2] for a0 + a1*exp(-(x/tau1)) + a2*exp(-(x/tau2))
        popt, pcov = curve_fit(double_exp_decay, np.arange(nsteps), ac, p0=v0, maxfev=10000)  # ignore last bin, which has 0 counts
        yFit_data = double_exp_decay(np.arange(nsteps), popt[0], popt[1], popt[2], popt[3], popt[4])
        # print 'best-fit a0 = ', popt[0], '+/-', pcov[0][0]
        #print 'best-fit a1 = ', popt[1], '+/-', pcov[1][1]
        #print 'best-fit a2 = ', popt[2], '+/-', pcov[2][2]
        print 'best-fit tau1 = ', popt[3], '+/-', pcov[3][3]
        print 'best-fit tau2 = ', popt[4], '+/-', pcov[4][4]

    return yFit_data
				def waterbulk(input, zcoord, func):
					x = input[0]
					densmean = np.zeros(200)
					i = 0
					start = 0
					for slice in range(1,200):
						ymean = input[slice]
						At = guess(ymean)					
						popt, pcov = curve_fit(func, x, ymean,[At, 2, 1], maxfev=10000)
						k = func(0,*popt)
						densmean[slice]=k
						if k > 800:
							start = slice - i
							i = i + 1

					end = start + i
					densmean2 = np.zeros(i + 1)
					i2 = 0
					for slice2 in range(0,200):
						ymean = input[slice2]
						At = guess(ymean)
						popt, pcov = curve_fit(func, x, ymean,[At, 2, 1], maxfev=10000)
						k = func(0,*popt)
						if k > 800:
							densmean2[i2] = k
							i2 = i2 + 1
					return np.median(densmean2)
def get_data(local_df,date_list):
            
    for i in date_list:
                
        # Slice data from the complete dataframe
        sub_df=local_df.ix[i-dt.timedelta(days=avg_window_noct/2)+dt.timedelta(hours=t_del):
                           i+dt.timedelta(days=avg_window_noct/2)-dt.timedelta(hours=t_del)].dropna(axis=0,how='any')
        
        # If either too few data or temperature range is less than 5C, abort optimisation
        if len(sub_df) >= min_records and sub_df[tempName].max() - sub_df[tempName].min() >= temp_spread:
            
            global Eo
            Eo=params_df['Eo'].ix[i]        
            
            # Try optimisation - if causes error return nan array    
            if nocturnal_fit==True:                  
                try:
                    params_df['rb_noct'][i]=curve_fit(TRF_rb,sub_df[tempName],sub_df[CfluxName],p0=1)[0]
                except RuntimeError:
                    params_df['rb_noct'][i]=np.nan
            else:
                try:
                    a=curve_fit(LRF,sub_df[[radName,tempName,VPDName]],sub_df[CfluxName],p0=[-0.1,-10,1,1])[0]
                except RuntimeError:
                    a=[np.nan,np.nan,np.nan,np.nan]
                params_df['alpha'][i]=a[0]
                params_df['Aopt'][i]=a[1]
                params_df['k'][i]=a[2]
                params_df['rb_day'][i]=a[3]                        
Example #4
0
def fit_multipeak(idata, npeak = 1, pos = None, wid = 3., ptype = 'Gaussian'):
    if pos is None:
        pos = find_peaks(idata, npeak)
    if len(pos) < npeak:
        raise ValueError('Must have a position estimate for each peak')
    else:
        npeak = [len(pos[x]) for x in pos]
    x_data = np.array(range(len(idata)))
    f = interp1d(x_data, idata, kind='cubic')
    
    amps = f(pos['pos']), f(pos['neg'])
    med = np.median(idata)
    
    #split into positive and negative so that we can differentiate between
    #the two original images
    pmodel = multi_peak_model(ptype, len(amps[0]))
    pinit = [np.array([a, pos['pos'][i], wid]) for i, a in enumerate(amps[0])]
    pdata = np.clip(idata, a_min=med, a_max=np.nanmax(idata))
    p_fit, p_tmp = curve_fit(pmodel, x_data, pdata, pinit)
    
    nmodel = multi_peak_model(ptype, len(amps[1]))
    ninit = [np.array([a, pos['neg'][i], wid]) for i, a in enumerate(amps[1])]
    ndata = np.clip(idata, a_max=med, a_min=np.nanmin(idata))
    n_fit, n_tmp = curve_fit(nmodel, x_data, ndata, ninit)
    
    return x_data, build_composite(p_fit, ptype), build_composite(n_fit, ptype)
Example #5
0
def add_fits_to_plot(ax, beta_rms,mdots):
	beta_cont= np.logspace(-3,1,num=1000)
	p0 = [19.8,1.]
	opt, pcov = curve_fit(lee_parallel,beta_rms,mdots,p0=p0)
	lab=r'$\dot{M}_{\parallel}: \beta_{ch}= %.1f, n = %.1f$' % (opt[0],opt[1])
	#ax.plot(beta_cont,np.exp(lee_parallel(beta_cont,opt[0],opt[1])),'b-',lw=2,label=lab)
	#
	p0 = [19.8]
	opt, pcov = curve_fit(lee_parallel_neq100,beta_rms,mdots,p0=p0)
	lab=r'$\dot{M}_{\parallel}: \beta_{ch}= %.1f, n \equiv %.1f$' % (opt[0],100)
	#ax.plot(beta_cont,np.exp(lee_parallel_neq100(beta_cont,opt[0])),'r-',lw=2,label=lab)
	#
	p0 = [19.8]
	opt, pcov = curve_fit(lee_parallel_neq8,beta_rms,mdots,p0=p0)
	#lab=r'$\dot{M}_{\parallel}: \beta_{ch}= %.2f, n \equiv %d$' % (opt[0],8)
	lab=r'$\dot{M}_{\rm{Magn.}}: \beta_{ch}= %.1f, n \equiv %d$' % (opt[0],8)
	ax.plot(beta_cont,np.exp(lee_parallel_neq8(beta_cont,opt[0])),'b-',lw=4,label=lab)
	#
	lab=r'$\dot{M}_{\parallel}: \beta_{ch}=19.8, n=1$ (Lee 2014)'
	#ax.plot(beta_cont,np.exp(lee_parallel(beta_cont,19.8,1.)),'g-',lw=2,label=lab)
	#p0 = [19.8]
	#opt, pcov = curve_fit(lee_parallel_neq100,beta_rms,mu,p0=p0)
	#lab=r'$\beta_{ch}= %.1f$' % (opt[0])
	#ax[0].plot(beta_cont,lee_parallel_neq100(beta_cont,opt[0]),'b-',lw=2,label=lab)
	#	p0 = [1,19.8,
	#	p0 = [1,19.8,1] 
	#	for i in range(3):
	#		fit = leastsq(residuals_par, p0, args=(log_mean, log_beta))
	#		p0=lsq_mean[0]
	#		plt.plot(log_beta, lee_parallel(log_beta,lsq_mean[0][0],lsq_mean[0][1],lsq_mean[0][2]),c='b',ls='-',label=r'$\parallel$ mean'
	p0 = [11.1]
	opt, pcov = curve_fit(magnetic_bh_neq8,beta_rms,mdots,p0=p0)
	lab=r'$\dot{M}_{\rm{fit,bh}}: \beta_{ch}= %.1f, n= 8' % (opt[0])
	ax.plot(beta_cont,np.exp(magnetic_bh_neq8(beta_cont,opt[0])),'r-',lw=4,label=lab)
def fit_m2(recondata, pguess, n, s, bounds=None):
    """
    Fits the lunar equation based on lunar local time for each longitude to
    the reconstructed data in order to extract its amplitude and phase for
    comparison to the originals.

    :param recondata: Reconstructed lunar tide data binned by LLT, format [[
                      llt, longitude, tide]_0 ...[llt, longitude, tide]_n]
    :param pguess: Initial guess for parameters in format [amplitude, phase,
                    background]
    :param bounds: Bounds for amplitude in format [[low A, low φ], [high A,
                                                                    high φ]]
    :param s: Spatial frequency of the tidal wave.
    :param n: Temporal frequency of the tidal wave. 2 for semidiurnal.
    :return: list containing the fit values for amplitude, phase and offset
    """
    from scipy.optimize import curve_fit

    # Function that gets fit - nested to allow additional arguments
    def fit_lunar(n, s, L):
        def real_fitter(llt, A, P, C):
            return A * np.cos((2*pi*n / 24) * llt + (s - n)*L - P) + C
        return real_fitter

    if bounds is None:
        popt, pcov = curve_fit(fit_lunar(n, s, recondata[:, 1]), recondata[:, 0],
                               recondata[:, 2], pguess)
    else:
        popt, pcov = curve_fit(fit_lunar(n, s, recondata[:, 1]),
                               recondata[:, 0], recondata[:, 2], pguess,
                               bounds=bounds)

    return [popt[0], popt[1], popt[2]]
Example #7
0
def fullPeriodFit2plot(trdata1,trdata2,resids=False,planets=3):
	"""If the option 'resids' is specified, plot the redisuals of the model fitting procedure.  
	Otherwise show the TTVs plus the best-fit sinusoidal model"""
	n,t =  trdata1.T[:2]
	n1,t1 = trdata2.T[:2]
	per0,per10 = map( lambda x: linearfit(x[0],x[1])[1], [[n,t],[n1,t1]] )
	mdl = lambda x,p,t0,a,b,w: x * p + t0 + a * sin(w * p * x) + b * cos(w * p * x)
	j =2 +argmin([abs((j-1)*per10 / ( j*per0 ) - 1.) for j in range(2,5)])
	w0 = 2. * pi * ( j / per10 - (j-1) / per0 )

	popt, pcov = curve_fit(mdl, n, t, p0=[per0,t[0], 0.05, 0.05, w0 ] )
	popt1, pcov1 = curve_fit(mdl, n1, t1, p0=[per10,t1[0], 0.05, 0.05, w0 ] )
	if resids:
		errs,errs1 = trdata1[:,2],trdata2[:,2]
		if planets==1:
			plt.errorbar(t,t - mdl(n,popt[0],popt[1],popt[2],popt[3],popt[4]),yerr=errs,fmt='ks')
		elif planets==2:
			plt.errorbar(t1,t1 - mdl(n1,popt1[0],popt1[1],popt1[2],popt1[3],popt1[4]) ,yerr = errs1,fmt='rs')
		else:
			plt.errorbar(t,t - mdl(n,popt[0],popt[1],popt[2],popt[3],popt[4]),yerr=errs,fmt='ks')
			plt.errorbar(t1,t1 - mdl(n1,popt1[0],popt1[1],popt1[2],popt1[3],popt1[4]) ,yerr = errs1,fmt='rs')
	
	else:
		plot(n,mdl(n,popt[0],popt[1],popt[2],popt[3],popt[4])- popt[0]*n -popt[1] )
		plot(n,t - popt[0]*n - popt[1] )
		plot(n1,mdl(n1,popt1[0],popt1[1],popt1[2],popt1[3],popt1[4])- popt1[0]*n1 -popt1[1] )
		plot(n1,t1 - popt1[0]*n1 - popt1[1] )
	plt.show()
Example #8
0
def myfit(x, y, betagamma, qL, orientation, T, yerr=None, krange=[]):
    if len(krange) != 0:
        # trim to given range
        booleanrange = [(x >= krange[0]) & (x <= krange[1])]
        x = x[booleanrange]
        y = y[booleanrange]
    if yerr is None:
        popt, pcov = curve_fit(fun, x, y, sigma=None)
    else:
        if len(krange) != 0:
            yerr = yerr[booleanrange]
        popt, pcov = curve_fit(fun, x, y, sigma=yerr, absolute_sigma=True)
    x = linspace(x[0], x[-1], 1e3)
    fit = fun(x, *popt)
    perr = sqrt(diag(pcov))
    A = array([popt[0], perr[0]])/qL**2
    B = array([popt[1], perr[1]])/qL*orientation
    C = array([popt[2], perr[2]])
    eps, bet, alp, gam = twissfromparabola(A, B, C, T)
    epsn = betagamma*eps*1e6
    eps *= 1e9
    string = (r'$\beta=%.2f \pm %.2f$''\n'
              r'$\alpha=%.2f \pm %.2f$''\n'
              r'$\gamma=%.2f \pm %.2f$''\n'
              r'$\epsilon=(%.2f \pm %.2f)\pi\, nm\, rad$''\n'
              r'$\epsilon^*=(%.2f \pm %.2f)\pi\, \mu m\, rad$'
              % (bet[0], bet[1], alp[0], alp[1], gam[0], gam[1],
                 eps[0], eps[1], epsn[0], epsn[1]))
    return x, fit, string
Example #9
0
def fit_poly(x1, y1, xref, yref, num_free_param , p0x = None, p0y=None, minim=True, weightx=None, weighty =None, leg=False):
    '''
    Assumes input is 2 matched starlists (x1, y1, x2, y2)
    free_param is number of free parameters to be used in the fit
    returns coefficients for best fit polynomial in both x and y
    '''

    if p0x ==None:
        p0x = np.zeros(num_free_param)
    if p0y == None:
        p0y = np.zeros(num_free_param)


    if not minim:
        print  'weight of first point ',weightx[0],  weighty[0]
        c_x, cov_x = curve_fit(poly, np.array([x1,y1]), xref, p0=p0x, sigma=weightx, absolute_sigma=True)
        print 'weight of first point ', weightx[0],  weighty[0]
        c_y, cov_y = curve_fit(poly, np.array([x1,y1]), yref, p0=p0y, sigma=weighty, absolute_sigma=True)
    else:
        for i in range(len(p0x)):
            p0x[i] = p0x[i]# + i * .01
            p0y[i] = p0y[i]# + i * .01
        resx = minimize(poly_min,p0x, args=(x1,y1, xref, weightx, leg), method='CG')
        resy = minimize(poly_min,p0y, args=(x1,y1, yref, weighty, leg), method='CG')
        c_x = resx.x
        c_y = resy.x
    print c_y, c_x
    return c_x, c_y  
Example #10
0
    def fit(self, xvals, yvals, evals=None, guess=(), handle_nans=True, **kwargs):
        """
    Use xvals and yvals +/- evals to fit params with initial values p0.

    evals == None means don't use errorbars.
    guess == () means guess all 1.0 for the parameters (usually bad!)
    kwargs passed to curve_fit()
    handle_nans automatically drops tuples that have nan in any of xvals, yvals,
      or evals.
    """
        if handle_nans:
            drop = np.isnan(xvals)
            drop = drop | np.isnan(yvals)
            if evals is not None:
                drop = drop | np.isnan(evals)
            xvals = np.array(xvals)[~drop]
            yvals = np.array(yvals)[~drop]
            if evals is not None:
                evals = np.array(evals)[~drop]
        if guess == ():
            guess = self._set_default_parms(xvals, yvals, evals)
        if (evals is None) or (np.isnan(evals).any()):
            fit = curve_fit(self.form, xvals, yvals, p0=guess, **kwargs)
        else:
            fit = curve_fit(self.form, xvals, yvals, sigma=evals, absolute_sigma=True, p0=guess, **kwargs)
        self.parm = np.array(guess)
        self.perr = np.array(guess)
        for pi, p in enumerate(guess):
            self.parm[pi] = fit[0][pi]
            self.perr[pi] = fit[1][pi][pi] ** 0.5
        self.cov = fit[1]
        if len(self.parm) == len(self.pnms):
            self.pmap = dict(zip(self.pnms, self.parm))
            self.emap = dict(zip(self.pnms, self.perr))
Example #11
0
def threeGaussFit(TR):
  print len(TR.peak)
 
  if len(TR.peak)>0:
    if len(TR.peak)==1:
      sig=[TR.radius[0]]
      popt,pcov=curve_fit(oneGauss2,TR.x,TR.n,p0=[TR.peak[0],TR.amp[0],sig[0]])
      TR.popt.append(popt[0])
      TR.popt.append(popt[1])
      TR.popt.append(popt[2])
      TR.n=TR.n-nGauss(TR.x,popt[0],popt[1],popt[2])

    elif len(TR.peak)==2:
      sig=[TR.radius[0],TR.radius[1]]
      popt,pcov=curve_fit(nGauss,TR.x,TR.n,p0=[TR.peak[0],TR.amp[0],sig[0],TR.peak[1],TR.amp[1],sig[1]])
      for i in range(6):
        TR.popt.append(popt[i])
      TR.n=TR.n-nGauss(TR.x,popt[0],popt[1],popt[2],popt[3],popt[4],popt[5])

    else:
      sig=[TR.radius[0],TR.radius[1],TR.radius[2]]
      popt,pcov=curve_fit(nGauss,TR.x,TR.n,p0=[TR.peak[0],TR.amp[0],sig[0],TR.peak[1],TR.amp[1],sig[1],TR.peak[2],TR.amp[2],sig[2]])
      for i in range(9):
        TR.popt.append(popt[i])
  
      pb.plot(TR.x,nGauss(TR.x,popt[0],popt[1],popt[2],popt[3],popt[4],popt[5],popt[6],popt[7],popt[8]),color=col4,linewidth=1)
      pb.show()
      TR.n=TR.n-nGauss(TR.x,popt[0],popt[1],popt[2],popt[3],popt[4],popt[5],popt[6],popt[7],popt[8])

  TR.sens=0.02*max(TR.y)
def HistFit( XInt ):    
    # Calculate histogram
    hist, bin_edges = numpy.histogram(XInt, bins = 100, density=False)
    numpy.array(hist)
    datay = hist
    global bin_centers
    bin_centers = (bin_edges[:-1] + bin_edges[1:])/2
    datax = bin_centers
    # define your function:
    def f(x, height, mean, sd):
        return height*numpy.exp(-(x-mean)**2/(2.*sd**2))
    global mean, sd, CV
    mean = numpy.mean(XInt, axis=0) 
    sd = numpy.std(XInt, axis=0)
    CV = sd/mean
    print mean, sd, CV
    height = 40000   
    p_init = numpy.array([height, mean, sd])
   # fit! (given that data is an array with the data to fit)
    coeff, var_matrix = optimize.curve_fit(f, bin_centers, hist, p_init )
    print  optimize.curve_fit(f, bin_centers, hist, p_init )
    print "this is the fit"
    print coeff
    mean = coeff[1]
    sd = coeff[2]
    print mean, CV
    hist_fit = f(bin_centers, *coeff)
    return hist_fit
def main(angles_org, firing_rates_org, initial_est, fig_title=''):

    # Plot the original data
    plt.figure()
    plt.title('Rotation Tuning ' + fig_title)
    plt.xlabel('Angle(Deg)')
    plt.ylabel('Normalized Firing Rate')
    plt.scatter(angles_org, firing_rates_org, label='Original Data')

    angles_arr = np.arange(-180, 180, step=1)

    # -------------------------------------------------------------------------------------------
    # Single Gaussian Curve Fit
    # -------------------------------------------------------------------------------------------
    params_fit, params_cov_mat = curve_fit(
        single_gaussian,
        angles_org,
        firing_rates_org,
        p0=initial_est[0, :])

    # Standard deviation of fit parameters:
    # REF: (1) http://stackoverflow.com/questions/14581358/getting-standard-errors-on-fitted-
    # parameters-using-the-optimize-leastsq-method-i
    # (2) http://docs.scipy.org/doc/scipy/reference/generated/scipy.optimize.curve_fit.html
    params_err_std_dev = np.sqrt(np.diag(params_cov_mat))

    plt.plot(
        angles_arr,
        single_gaussian(angles_arr, params_fit[0], params_fit[1], params_fit[2]),
        label=r'$1\ Gaussian:\ \mu_1=%0.2f,\ \sigma_1=%0.2f,\ Amp_1=%0.2f$'
              % (params_fit[0], params_fit[1], params_fit[2]))

    print ("1 Gaussian Fit - standard deviation of errors in parameters:" +
           "\n\tmu_1=%0.4f, sigma_1=%0.4f, Amp_1=%0.4f"
           % (params_err_std_dev[0], params_err_std_dev[1], params_err_std_dev[2]))

    # -------------------------------------------------------------------------------------------
    # Mirror Symmetric Fit
    # -------------------------------------------------------------------------------------------
    params_fit, params_cov_mat = curve_fit(
        pseudo_symmetric_gaussian,
        angles_org,
        firing_rates_org,
        p0=initial_est[0, :])

    # Standard deviation of fit parameters:
    # REF: (1) http://stackoverflow.com/questions/14581358/getting-standard-errors-on-fitted-
    # parameters-using-the-optimize-leastsq-method-i
    # (2) http://docs.scipy.org/doc/scipy/reference/generated/scipy.optimize.curve_fit.html
    params_err_std_dev = np.sqrt(np.diag(params_cov_mat))

    plt.plot(
        angles_arr,
        pseudo_symmetric_gaussian(angles_arr, params_fit[0], params_fit[1], params_fit[2]),
        label=r'$ Pseudo Sym Gaussian:\ \mu_1=%0.2f,\ \sigma_1=%0.2f,\ Amp_1=%0.2f$'
              % (params_fit[0], params_fit[1], params_fit[2]))

    print ("Pseudo Sym - standard deviation of errors in parameters:" +
           "\n\tmu_1=%0.4f, sigma_1=%0.4f, Amp_1=%0.4f"
           % (params_err_std_dev[0], params_err_std_dev[1], params_err_std_dev[2]))
Example #14
0
def double_diode_fit_plot(df, parameters=False):
    fig, ax = plt.subplots()
    if isinstance(df,pd.DataFrame):
        for index, row in df.iterrows():
            x = np.linspace(0.4,1,num=61)
            popt, pcov = curve_fit(double_diode_model,x,row.current_density[140:201],p0=(1e-10,1e-12))
            curve_y = double_diode_model(x, popt[0],popt[1])
            ax.plot(df.voltage, df.current_density)
            j01_string = "{0:.2e}".format(popt[0])
            j02_string = "{0:.2e}".format(popt[1])
            label = "$J_{01} = $" + j01_string +  " $A \ cm^{-2}$" +"\n"+ "$J_{02} = $" +j02_string+ " $A \ cm^{-2}$"
            ax.plot(x, curve_y, '--', label = label)
            ax.legend()
            ax.set_xlabel("Voltage (V)")
            ax.set_ylabel("Current Density ($A \ cm^{-2}$)")
        if parameters:
            return fig, ax, J02_perimeter, J02_bulk, S0LS
        else:
            return fig, ax
    else:
        x = np.linspace(0.4,1,num=61)
        popt, pcov = curve_fit(double_diode_model,x,df.current_density[140:201],p0=(1e-10,1e-12))
        curve_y = double_diode_model(x, popt[0],popt[1])
        ax.plot(df.voltage[140:201], df.current_density[140:201])
        j01_string = "{0:.2e}".format(popt[0])
        j02_string = "{0:.2e}".format(popt[1])
        label = "$J_{01} = $" + j01_string +  " $A \ cm^{-2}$" +"\n"+ "$J_{02} = $" +j02_string+ " $A \ cm^{-2}$"
        ax.plot(x, curve_y, '--', label = label)
        ax.legend()
        ax.set_xlabel("Voltage (V)")
        ax.set_ylabel("Current Density ($A \ cm^{-2}$)")
        if parameters:
            return fig, ax, J02_perimeter, J02_bulk, S0LS
        else:
            return fig, ax
Example #15
0
def main():

    # calculate least squares beta coefficient using ls()
    beta_old = ls(variables['body'], variables['brain'])

    # calculate least squares beta coefficient using curve_fit()
    # reshape to go from (65, 1) to (65,)
    beta_new, vcov = curve_fit(linear, abrain, abody)

    print('Week 5 model: bo = %.4f * br + %.4f' %
          (beta_old[1][0], beta_old[0][0]))

    print('SciPy model: bo = %.4f * br + %.4f\n' %
          (beta_new[0], beta_new[1]))

    imports = 'from __main__ import curve_fit, linear, abrain, abody, ' +  \
                'brain, body, ls, gaussian'
    times = 1000
    scipy_implementation = timeit('curve_fit(linear, abrain, abody)', setup=imports,
                      number=times)
    my_implementation = timeit('ls(body, brain)', setup=imports,
                   number=times)
    gaussian_implementation = timeit('curve_fit(gaussian, abrain, abody)', setup=imports,
                                    number=times)

    for result in ('my_implementation', 'scipy_implementation', 'gaussian_implementation'):
        print('Avg execution time for %s (%i reps):\n%s' %
              (result, times, eval(result)))

    print('\nGuassian:\nscaling var = %.4f\nmu = %.4f\nsigma = %.4f' %
          tuple(curve_fit(gaussian, abrain, abody)[0]))
Example #16
0
def plot_p_output(n):
  Ts,rhos,f1,f2,vdh = np.loadtxt(OUTNAME,unpack=True)
  plot_mrho = []
  plot_mhoef = []
  plot_mcalc = []
  nmostafa = 400
  sub_plot = 221
  #fn = lambda xs:(lambda a,b,c:[a*xs[i]**2+b*xs[i]+c for i in xrange(len(xs))])
  #quad = fn(rhos)
  quad = lambda a,b,c:[(rhos[i]**2)*(3*a*rhos[i]**2+2*b*rhos[i]+c)\
                       for i in xrange(len(rhos))]

  for i in xrange(0,len(rhos),n):
    s = slice(i,i+n)
    mslice = slice((i/n)*nmostafa,((i/n)+1)*nmostafa)
    plt.subplot(sub_plot)
    popt1,_ = curve_fit(cubic,rhos[s],f1[s],p0=[1,1,1,1])
    popt2,_ = curve_fit(cubic,rhos[s],f2[s],p0=[1,1,1,1])
    poptvdh,_ = curve_fit(cubic,rhos[s],Ts[i]*vdh[s],p0=[1,1,1,1])
    p1, = plt.plot(rhos[s],quad(popt1[0],popt1[1],popt1[2])[s],\
                   marker = 'o', linewidth = 3)
    p2, = plt.plot(rhos[s],quad(popt2[0],popt2[1],popt2[2])[s],\
                   marker = '>', linewidth = 3, linestyle = '-.')
    p3, = plt.plot(rhos[s],quad(poptvdh[0],poptvdh[1],poptvdh[2])[s],\
                   marker = 'd', linewidth = 3, linestyle = ':')
    plt.xlabel('$\\rho$',fontsize = 20)
    plt.ylabel('P',fontsize = 15)
    plt.title('P vs $\\rho$ for T = '+str(Ts[i]))
    plt.legend([p1,p2,p3],['$1^{st}$ order','$2^{nd}$ order','Van der Hoef'],\
               loc = 2)
    sub_plot += 1
  plt.show()
  return
Example #17
0
def plot_subline(n):
  Ts,rhos,f1,f2,fhoef = np.loadtxt(OUTNAME,unpack=True)
  plot_mrho = []
  plot_mhoef = []
  plot_mcalc = []
  subd1 = []
  subd2 = []
  tpts = []
  quad = lambda a,b,c:[(rhos[i]**2)*(3*a*rhos[i]**2+2*b*rhos[i]+c)\
                       for i in xrange(len(rhos))]
  quad_psolve = lambda a,b,c: (-b+np.sqrt(b*b-4*a*c))/(2*a)
  
  for i in xrange(0,len(rhos),n):
    s = slice(i,i+n)
    popt1,_ = curve_fit(cubic,rhos[s],f1[s],p0=[1,1,1,1])
    popt2,_ = curve_fit(cubic,rhos[s],f2[s],p0=[1,1,1,1])
    subd1 += [quad_psolve(popt1[0]*rhos[i]**2*3,popt1[1]*rhos[i]**2*2,\
                          popt1[2]*rhos[i]**2)]
    subd2 += [quad_psolve(popt2[0]*rhos[i]**2*3,popt2[1]*rhos[i]**2*2,\
                          popt2[2]*rhos[i]**2)]
    tpts += [Ts[i]]
    print tpts[-1],'\t',subd1[-1],'\t',subd2[-1]

  p1, = plt.plot(tpts,subd1,linewidth=3,marker='o')
  p2, = plt.plot(tpts,subd2,linewidth=3,linestyle='-.',marker='>')
  plt.xlabel('$T(K)$',fontsize=20)
  plt.ylabel('$\\rho(g/cm^3)$',fontsize=20)
  plt.title("Sublimation Density vs. Temperature")
  plt.legend([p1,p2],['$1^{st}$ order','$2^{nd}$ order'],loc=1)
  plt.show()
  return
def fit_func(func, x, y, yerr, xlong,ylong):
    start = time.time()
    p, cov = curve_fit(func, x, y, p0 = initial_guess(func,x,y), sigma = yerr,
            maxfev = 1000000)
#    print(p)
    if func == func_power:
        pi = p
        p, cov = curve_fit(func, x, y, p0 = initial_guess(func, x+p[2], y),
                sigma = yerr, maxfev = 1000000)
        while (np.any((pi/p-1)>0.001)):
            pi=p
            p, cov = curve_fit(func, x, y, p0 = initial_guess(func, x+p[2], y),
                    sigma = yerr, maxfev = 1000000)
    perr = np.sqrt(np.diag(cov))

    chi2red, pval = calculate_stats(x, y, yerr, p, perr, func)

    yfit = func(xlong,*p)
    resid = ylong-yfit
    p = unumpy.uarray(p,perr)
    extrapolate_val = func_err_prop(func,extrapolate_times,p)
    print(extrapolate_val)

    end = time.time()
    elapsed = end-start

    return (p, yfit, resid, extrapolate_val, chi2red, pval, elapsed) 
Example #19
0
def shift(ep):
    ids,x1,y1,mk,mj,x2,y2 = np.genfromtxt(ep,unpack=True)

    local_mask = np.in1d(ids,bid)
    lid,lx1,ly1,lx2,ly2  = np.transpose([ids,x1,y1,x2,y2])[local_mask].T

    loc_xy = np.transpose([lx2,ly2])
    nbrs   = NN(n_neighbors=vecinos, algorithm='auto').fit(loc_xy)

    coo_xy = np.transpose([x2,y2])
    dist, idx = nbrs.kneighbors(coo_xy)
    idx  = idx[:,1:]
    dist = dist[:,1:]

    ctx = np.zeros(x1.size)
    cty = np.zeros(y1.size)

    for i in range(x1.size):
        star_locales = loc_xy[idx[i]]
        ep1_x = lx1[idx[i]]
        ep1_y = ly1[idx[i]]

        poptx, pcovx = curve_fit(linear,star_locales.T,ep1_x)
        popty, pcovy = curve_fit(linear,star_locales.T,ep1_y)

        ctx[i] += linear([x2[i],y2[i]],*poptx)
        cty[i] += linear([x2[i],y2[i]],*popty)

    shift_x = x1 - ctx
    shift_y = y1 - cty

    hdr  = 'ID X Y MAG_K MAG_J PMX PMY'
    fmt  = '%d %.3f %.3f %.3f %.3f %f %f'
    data = np.transpose([ids,x1,y1,mk,mj,shift_x,shift_y])
    np.savetxt('./%s/%s' % (pm_folder,ep.split('/')[-1].replace('.mfma','.pm')), data, header=hdr, fmt=fmt)
Example #20
0
def fitmagbin(data, mbin, cbins, iteration):
	import numpy as np
	from scipy.optimize import curve_fit
	# N, B = np.histogram(data[ : , 2], bins = cbins) # use for base case 
	N, B = np.histogram(data[ : , 2], bins = cbins, weights = data[ : , 3]) # use for morphology
	# print np.sum(np.isnan(N)) # no nans show up
	E = np.sqrt(N)
	E = np.where(E > 0., E, 2.)
	# print E
	bincenters = 0.5 * (B[1:] + B[:-1])

	if iteration == 0:
		opt, cov = curve_fit(func0, bincenters, N, p0 = g_guess[mbin, 0: ], sigma = E, maxfev = 100000)
	if iteration == 1:
		opt, cov = curve_fit(func1, bincenters, N, p0 = g_guess[mbin, 2: ], sigma = E, maxfev = 100000)
	if iteration == 2:
		opt, cov = curve_fit(func2, bincenters, N, p0 = g_guess[mbin, 4: ], sigma = E, maxfev = 100000)

	err = np.sqrt(np.diagonal(cov))
	err = np.absolute(err)
	ERR[iteration, mbin, 2 * iteration : ] = err
	opt = np.absolute(opt)
	OPT[iteration, mbin, 2 * iteration : ] = opt

	print 'fit values'
	fitdata = np.column_stack((opt, err))
	print fitdata
	return N, E, bincenters
def datafit(xdata,ydata,fit_type):
    ydata = np.power(10.0,ydata/10.0)
    length = len(xdata)
    center,minmum = min(enumerate(ydata),key = operator.itemgetter(1))
    low_bound = max(center-length/10,0)
    up_bound = min(center+length/10,len(ydata))
    
    xdata1 = xdata[low_bound:up_bound]
    ydata1 = ydata[low_bound:up_bound]
    
    if (fit_type == 'ratio'):

        popt, pcov = curve_fit(ratio_fitfunc, xdata1, ydata1)
        best_value = (2*popt[0]*popt[1] - popt[3])/(2*popt[0])
        ch2_amp = AWG.get_ch2amp()
        qt.msleep(0.1)
        AWG.set_ch1amp(ch2_amp*best_value)
        qt.msleep(0.1)        
 
    elif (fit_type == 'skew'):
       
        popt, pcov = curve_fit(skew_fitfunc, xdata1, ydata1)
        best_value = popt[2]
        AWG.set_ch1skew(best_value)
        qt.msleep(0.1)
        
    elif (fit_type == 'offset'):
        
        popt, pocv = curve_fit(ratio_fitfunc,xdata1,ydata1)
        best_value = (2*popt[0]*popt[1] - popt[3])/(2*popt[0])
        
        
        
    return best_value
Example #22
0
def main():
    parser = OptionParser(description='Fitting to a noisy data generated by a known function')
    parser.add_option("--npoints", type="int",   help="number of data points") 
    parser.add_option("--low",     type="float", help="smallest data point") 
    parser.add_option("--high",    type="float", help="highest data point") 
    parser.add_option("--sigma",   type="float", help="std of noise") 
    (options, args) = parser.parse_args() 

    pl.figure(1,(7,6))
    ax = pl.subplot(1,1,1)

    pl.connect('key_press_event',kevent.press)
    
    
    sigma = options.sigma    
    Ls   = np.append(np.linspace(options.low,options.high,options.npoints),46)
    nLs  = np.linspace(min(Ls),max(Ls),100)
    Mis  = HalfLog(Ls,.5,0.5)
    errs = np.random.normal(0,sigma, len(Mis))
    Mis  = Mis+errs
    pl.errorbar(Ls,Mis,errs,ls='',marker='s',color='b')
    print sigma/Mis 

    coeff, var_matrix = curve_fit(FreeLog,Ls,Mis,(1.0,1.0,1.0))
    err = np.sqrt(np.diagonal(var_matrix))
    dof     = len(Ls) - len(coeff)
    chisq   = sum(((Mis-FreeLog(Ls,coeff[0],coeff[1],coeff[2]))/sigma)**2)
    cdf     = special.chdtrc(dof,chisq)
    print 'Free:  a = %0.2f(%0.2f); b = %0.2f(%0.2f); c = %0.2f(%0.2f); p-value = %0.2f ' %(coeff[0],err[0],coeff[1],err[1],coeff[2],err[2],cdf)
    pl.plot(nLs,FreeLog(nLs,coeff[0],coeff[1],coeff[2]),label='Free',color='y')

    coeff, var_matrix = curve_fit(ZeroLog,Ls,Mis,(1.0,1.0))
    err = np.sqrt(np.diagonal(var_matrix))
    dof     = len(Ls) - len(coeff)
    chisq   = sum(((Mis-ZeroLog(Ls,coeff[0],coeff[1]))/sigma)**2)
    cdf     = special.chdtrc(dof,chisq)
    print 'Zero:  a = %0.2f(%0.2f);                 c = %0.2f(%0.2f); p-value = %0.2f' %(coeff[0],err[0],coeff[1],err[1],cdf)
    pl.plot(nLs,ZeroLog(nLs,coeff[0],coeff[1]),label='Zero',color='g')
    pl.tight_layout()

    coeff, var_matrix = curve_fit(HalfLog,Ls,Mis,(1.0,1.0))
    err = np.sqrt(np.diagonal(var_matrix))
    dof     = len(Ls) - len(coeff)
    chisq   = sum(((Mis-HalfLog(Ls,coeff[0],coeff[1]))/sigma)**2)
    cdf     = special.chdtrc(dof,chisq)
    print 'Half:  a = %0.2f(%0.2f);                 c = %0.2f(%0.2f); p-value = %0.2f' %(coeff[0],err[0],coeff[1],err[1],cdf)
    pl.plot(nLs,HalfLog(nLs,coeff[0],coeff[1]),label='Half',color='b')
    pl.tight_layout()

    coeff, var_matrix = curve_fit(OneLog,Ls,Mis,(1.0,1.0))
    err = np.sqrt(np.diagonal(var_matrix))
    dof     = len(Ls) - len(coeff)
    chisq   = sum(((Mis-OneLog(Ls,coeff[0],coeff[1]))/sigma)**2)
    cdf     = special.chdtrc(dof,chisq)
    print 'Unity: a = %0.2f(%0.2f);                 c = %0.2f(%0.2f); p-value = %0.2f' %(coeff[0],err[0],coeff[1],err[1],cdf)
    pl.plot(nLs,OneLog(nLs,coeff[0],coeff[1]),label='Unity',color='r')
    pl.tight_layout()

    pl.legend()
    pl.show()
Example #23
0
def psf_lineplot(stack, cut_range = 3.5, z_step = 0.5, r_step=0.097):
    """
    cut_range: where to cut off
    axis:    0 --- z-direction
            1 --- y-direction
            2 --- x-direction
    z_step: step in z-direction
    r_step: step in x and y direction
    plot all the three directions
    and fit to Gaussian to give the FWHM
    """
    figv = plt.figure(figsize=(6,4))
    ax = figv.add_subplot(1,1,1)
    ax.set_xlim([-cut_range,cut_range])
    nz, ny, nx = stack.shape
    cz, cy, cx = np.unravel_index(np.argmax(stack), (nz,ny,nx))

    FWHM = np.zeros(3)
    # plot along z-direction
    psf_z = stack[:, cy, cx]
    coord_z = (np.arange(nz)-nz*0.5)*z_step
    b = np.mean((psf_z[0],psf_z[-1]))
    a = psf_z.max() - b
    w0 = 3.00
    pz_0 = (a,0,w0,b)
    popt = optimize.curve_fit(gaussian, coord_z, psf_z, pz_0)[0]
    FWHM[0] = np.sqrt(popt[2]*0.5)* 2.355
    ax.plot(coord_z-popt[1], psf_z, '-ob', linewidth = 2, label = 'z')


    # plot along y-direction
    psf_y = stack[cz,:, cx]
    coord_y = (np.arange(ny)-ny*0.5)*r_step
    b = np.mean((psf_y[0],psf_y[-1]))
    a = psf_y.max() - b
    w0 = 0.50
    py_0 = (a,0,w0,b)
    popt = optimize.curve_fit(gaussian, coord_y, psf_y, py_0)[0]
    FWHM[1] = np.sqrt(popt[2]*0.5)* 2.355
    ax.plot(coord_y-popt[1], psf_y, '->g', linewidth = 2, label = 'y')


    # plot along x-direction
    psf_x = stack[cz, cy, :]
    coord_x = (np.arange(nx)-nx*0.5)*r_step
    b = np.mean((psf_x[0],psf_x[-1]))
    a = psf_x.max() - b
    w0 = 0.50
    px_0 = (a,0,w0,b)
    popt = optimize.curve_fit(gaussian, coord_x, psf_x, px_0)[0]
    FWHM[2] = np.sqrt(popt[2]*0.5)* 2.355
    ax.plot(coord_x-popt[1], psf_x, '-xr', linewidth = 2, label = 'x')

    ax.legend(['z', 'y', 'x'])
    ax.set_xlabel('distance (micron)')



    plt.tight_layout()
    return figv, FWHM
            def waterbulk(input, zcoord, func):
                #print "WE ARE INSIDE THE FUNCTION WATERBULK"
                x = input[0]
                densmean = np.zeros(200)
                i = 0
                start = 0
                for slice in range(1,200):
                    ymean = input[slice]
                    At = guess(ymean)                   
                    popt, pcov = curve_fit(func, x, ymean,[At, 2, 1], maxfev=10000)
                    k = func(0,*popt)
                    densmean[slice]=k
                    if k > 800:
                        start = slice - i
                        i = i + 1

                end = start + i
                densmean2 = np.zeros(i + 1)
                i2 = 0
                for slice2 in range(0,200):
                    ymean = input[slice2]
                    At = guess(ymean)
                    popt, pcov = curve_fit(func, x, ymean,[At, 2, 1], maxfev=10000)
                    k = func(0,*popt)
                    if k > 800:
                       densmean2[i2] = k
                       i2 = i2 + 1
                #print "WE ARE IN THE LAST LINE OF THE FUNCTION WATERBULK"
                return np.median(densmean2)
Example #25
0
def twoDParbolicFit(data):
    """parabolic fit the function z = Amp * max(0, 1 - a*(x-x0)^2 - b*(y-y0)^2) +offset). 
    Since we only need center and width, this function returns center, width, amplitude and offset.
    """
    def oneDParabolic(x, centerX, Amp, a, offset):
        #print  - Amp *  ((x-centerX)**2) + C
        #print offset
        #return - Amp *  ((x-centerX)**2) + C * offset
        out = Amp *  np.maximum( (1 - a * (x - centerX)**2), 0)  ** 2+ offset

        return out
    #def oneDGaussian(x, centerX,sDevX,Amp,yOffset):
    #    return Amp*np.exp(-0.5*((x-centerX)/sDevX)**2)+yOffset
    ### Mask out only the area of interest then integrate the data long each axis
    



    xSlice = np.sum(data,0)    
    ySlice = np.sum(data,1)
        
    ### Initial guesses for 1d fits
    xOff = np.nanmin(xSlice)
    maxX = np.nanmax(xSlice)-xOff
    x0 = np.argmax(xSlice)
    
    yOff = np.nanmin(ySlice)
    maxY = np.nanmax(ySlice)-yOff
    y0 = np.argmax(ySlice)

    lengthX = 0
    for i in range(len(xSlice)):
        if xSlice[i] - xOff > 0.2 * maxX:
            lengthX += 1
 
    lengthY = 0
    for i in range(len(ySlice)):
        if ySlice[i] - yOff > 0.2 * maxY:
            lengthY += 1  
    
    aX = 4./lengthX**2
    aY = 4./lengthY**2
    AmpX = 4./3. * maxX/sqrt(aY)
    AmpY = 4./3. * maxY/sqrt(aX)

            
    ### 1d fits
    xVals, yCovar = curve_fit(oneDParabolic,range(len(xSlice)),xSlice,p0=(x0, AmpX, aX, xOff))
    x0 = xVals[0]
    widthX = sqrt(1/xVals[2])
    
    yVals, yCovar = curve_fit(oneDParabolic,range(len(ySlice)),ySlice,p0=(y0, AmpY, aY, yOff))
    y0 = yVals[0]
    widthY = sqrt(1/yVals[2])

    Amp = sqrt(xVals[1] * yVals[1] * 9./16. * sqrt(xVals[2] * yVals[2]) )
    
    offset = 0.5 * (xVals[3]/(np.shape(data)[1]) + yVals[3]/(np.shape(data)[0]))
    
    return [[x0, y0], [widthX, widthY], Amp, offset]
Example #26
0
def gauss_fit(xdata, ydata, withoffset=True, trim=None, guess_z=None):
    """Utility function for fitting single variable gaussian data"""
    # estimate the offset
    offset = ydata.min()
    ydata_corr = ydata - offset
    # make parameter guesses if none giben
    if guess_z is None:
        x0 = nmoment(xdata, ydata_corr, 0, 1)
    else:
        x0 = guess_z
    sigma_x = np.sqrt(nmoment(xdata, ydata_corr, x0, 2))
    p0 = np.array([ydata_corr.max(), x0, sigma_x, offset])
    # trim data if requested
    if trim is not None:
        args = abs(xdata - x0) < trim * sigma_x
        xdata = xdata[args]
        ydata = ydata[args]
    # do actual fitting
    try:
        if withoffset:
            popt, pcov = curve_fit(gauss, xdata, ydata, p0=p0)
        else:
            popt, pcov = curve_fit(gauss_no_offset, xdata, ydata, p0=p0[:3])
            popt = np.insert(popt, 3, offset)
    except RuntimeError:
        popt = p0 * np.nan
    # return result
    return popt
Example #27
0
def draw_fit(rl, pct):
    """Draw sigmoid for psychometric

    rl: x values
    pct: y values

    Fxn draws the curve
    """
    def sig(x, A, x0, k, y0):
        return A / (1 + np.exp(-k*(x-x0))) + y0
    def sig2(x, x0, k):
        return 1. / (1+np.exp(-k*(x-x0)))

    pl.xlabel('R-L stimuli')
    pl.ylabel('p(choose R)')
    pl.xlim([rl.min()-1, rl.max()+1])
    pl.ylim([-0.05, 1.05])

    popt,pcov = curve_fit(sig, rl, pct) # stretch and yshift are free params
    popt2,pcov2 = curve_fit(sig2, rl, pct) # stretch and yshift are fixed
    x = np.linspace(rl.min(), rl.max(), 200)
    y = sig(x, *popt)
    y2 = sig2(x, *popt2)
    pl.vlines(0,0,1,linestyles='--')
    pl.hlines(0.5,rl.min(),rl.max(),linestyles='--')
    pl.plot(x,y)
    #pl.plot(x,y2)
    return popt
Example #28
0
def fitParam(paths,identifier): #paths should be ordered in theta
  x0 = defaultdict(list)
  for path in paths:
    res, df = parametrizeBias([path],level=0)
    x0['a'].append(res.x[0])
    x0['b'].append(np.abs(res.x[1]))
    x0['theta'].append(int(np.unique(degrees_(df['theta']).apply(round))))
  x0 = pd.DataFrame(x0)
  colors = plt.cm.Set1(np.linspace(0, 1, 2))
  fig = plt.figure(figsize=(8,5))
  ax = fig.add_subplot(111)
  ax.scatter(np.array(x0['theta']),np.array(x0['a']),marker='s',s=30,c=colors[0],lw=0)
  ax.scatter(np.array(x0['theta']),np.array(x0['b']),marker='o',s=30,c=colors[1],lw=0)
  ths = np.linspace(x0['theta'].min(),x0['theta'].max(),100) 
  a0, acov_lin = curve_fit(a,np.array(x0['theta'])*np.pi/180.,x0['a']) 
  b0, bcov_quad= curve_fit(b,np.array(x0['theta'])[1:]*np.pi/180.,np.array(x0['b'])[1:]) 
  ax.plot(ths,a(np.array(ths)*np.pi/180.,*a0),lw=2,label = "$a$",c=colors[0])
  ax.plot(ths,b(np.array(ths)*np.pi/180.,*b0),lw=2,label = "$b$",c=colors[1])
  customAx(ax,xlabel=r'$\theta/^{\circ}$',ylabel="$p$",xlim=[x0['theta'].min()-1,x0['theta'].max()+1],legloc=2,ncol=1,bbox=None,
    fontS=18,frameon=False,xscale_log=False,yscale_log=False,ticklabelsize=18,labelsize=22,legend=True,
    handleL=1,colSp=.2,labelSp=0.001,handleTp=.1,borderP=.2,yaxis_labelpad=5)    
  ax.minorticks_on()
  figpath = 'pics/RecEff'
  figname = 'fitbiasparam_theta_%(identifier)s.pdf'%locals()
  print 'save fig', figname  
  fig.savefig('%(figpath)s/%(figname)s'%locals(),bbox_inches='tight')
  return x0
def cruvefit(std, window, peak, stdamp, popendiff):
		fitpeak = np.array([])
		fitpopendiff = np.array([])
		fitstdamp = np.array([])
		fitwindow = np.array([])
		failedattemp = 'interval:' + str(window)
		failedattempcount = 0
		for tpeak in range(len(peak)):
				xdata = np.arange(peak[tpeak]-(window-1)*precentage, peak[tpeak]+(window-1)*precentage+1)
				ydata = std[peak[tpeak]-(window-1)*precentage: peak[tpeak]+(window-1)*precentage+1]
				guess = [peak[tpeak], popendiff[tpeak]]
				params, params_covariance = optimize.curve_fit(funstd, xdata, ydata, guess)
				if funstd(params[0], params[0], params[1]) < (0.5*std[params[0]]):
						print 'Curve fit based on Popen solved from standard deviation peak failed.'
						print 'Peak time', peak[tpeak], 'Retry for maximum of ten times.'
						count = 1
						while count < 11:
								params, params_covariance = optimize.curve_fit(funstd,xdata,ydata,guess)
								if funstd(params[0], params[0], params[1]) < (0.5*std[params[0]]):
										count += 1
								else:
										print 'Retrying:', str(count), 'of 10','success'
										count += 100

						if count == 11:
								print peak[tpeak], 'Curve fitting failed', 'Using initial guess instead'
								failedattemp += ' ' + str(peak[tpeak])
								failedattempcount += 1
								params[0] = peak[tpeak]
								params[1] = popendiff[tpeak]
				fitpeak = np.append(fitpeak, params[0])
				fitpopendiff = np.append(fitpopendiff, params[1])
				fitstdamp = np.append(fitstdamp, funstd(params[0], params[0], params[1]))
		failedattemp += ' ' + str(failedattempcount) + ' out of ' + str(len(fitpeak)) + 'failed'
		return fitpeak, fitpopendiff, fitstdamp, failedattemp
Example #30
0
def bootstrap(data, H_0):
    ''' 
    Simulación de bootstrap para encontrar el
    intervalo de  confianza (95%)
    '''
    N, N1 = data.shape
    N_boot = 10000
    H = np.zeros(N_boot)
    for i in range(N_boot):
        s = np.random.randint(low=0, high=N, size=N)
        fake_data = data[s][s]
        v = fake_data[:, 0]
        d = fake_data[:, 1]
        a_optimo1, a_covarianza1 = curve_fit(funcion_a_minimizar1,
                                               d, v, 2)
        a_optimo2, a_covarianza2 = curve_fit(funcion_a_minimizar2,
                                               v, d, 2)
        a_promedio = (a_optimo2 + a_optimo1) / 2
        H[i] = a_promedio
    fig2, ax2 = plt.subplots()
    plt.hist(H, bins=30)
    plt.axvline(H_0, color='r', label="valor obtenido de H0")
    plt.legend(loc=2)
    ax2.set_title("Simulacion bootstrap")
    ax2.set_xlabel("H [Km/s /Mpc]")
    ax2.set_ylabel("frecuencia")
    plt.savefig("bootstrap_2.jpg")
    H = np.sort(H)
    limite_bajo = H[int(N_boot * 0.025)]
    limite_alto = H[int(N_boot * 0.975)]
    print "El intervalo de confianza al 95% es: [{}:{}]".format(limite_bajo,
                                                                limite_alto)
Example #31
0
    def bias_corrector_sigma(self,
                             model_func,
                             b,
                             rice_sig,
                             sigma,
                             ind_break,
                             guess,
                             bound,
                             weight,
                             sigdiff_break=.02,
                             N_break=100):
        """
    Algorithm Implementation without sigma estimation
    Run Bias Correction
    model_func: underlying model function
    b: array of b-factors (b=gamma^2*g^2*delta^2*(Delta-delta/3)) [s/mm^2]
    rice_sig: signal to be corrected (Rician distributed)
    guess: initial guess for the fit funtion (must be set; not None)
    bound: bounds for the fit
    weight: use weighting of datapoints (rician)
    num_sigmaes_avrg: "number of freedom" for sigmaes calculation
    """
        #array for corrected signal
        rice_sig_corr = np.zeros_like(rice_sig)
        #set dimensions and number of fitting parameters
        num_dp = np.prod(b.shape)  #number of datapoints in signal
        if ind_break is None:
            ind_break = int(num_dp / 2.)  # index for break criterium
        num_fp = len(guess)  #number of fitting parameters
        #effective number of datapoints for sigmaes calculation
        #list for run parameters
        para_run_arr = np.zeros((self.N_break, len(guess) + 1))
        #saving fitting errors (keep as list for the moment)
        para_run_err = []

        #STEP 1
        #fit exp and obtain ŝ_0 (fit_sig) and RMSE
        res = so.curve_fit(
            model_func,
            b,
            rice_sig_corr,
            guess,
            bounds=bound,
            method=method_grad,
        )
        if res is False:
            if self.debug:
                print('Problem with Fit!')
            return None
        fit_sig = model_func(b, *res[0])  # FIX parameter input
        #save parameters of first fit and estimation
        para_run_arr[0, :-1] = res[0]
        para_run_arr[0, -1] = sigma
        para_run_err.append(res[1])

        for i in np.arange(1, N_break):
            #STEP 3 + 4
            #Estimate Rician bias and calculate corrected signal
            rice_sig_corr = rice_sig - (self.rice_mean_high(fit_sig, sigma) -
                                        fit_sig)
            #STEP 5
            #fit exp
            res = so.curve_fit(
                model_func,
                b,
                rice_sig_corr,
                guess,
                bounds=bound,
                method=method_grad,
            )
            if res is False:
                if self.debug:
                    print('Problem with Fit!')
                return None
            fit_sig_bf = np.copy(fit_sig)
            fit_sig = model_func(b, *res[0])
            #step 6
            if 1 == 1:
                para_run_arr[i, :-1] = res[0]
                para_run_arr[i, -1] = sigma
                para_run_err.append(res[1])
            #step 8 stopping crit
            if np.max(
                    np.abs(fit_sig[ind_break] - fit_sig_bf[ind_break]) /
                    fit_sig[ind_break]) < sigdiff_break:
                break
        return [para_run_arr[:i + 1], para_run_err, i]
Example #32
0
	FWHM	= np.zeros(N)
	V_span	= np.zeros(N)
	BIS 	= np.zeros(N)


	v_CCF 	= np.linspace(-20, 20, 401)
	idx0 	= (v_CCF >= -10) & (v_CCF <= 10)
	v_ccf  	= v_CCF[idx0]

	for i in range(N):
		filename	= DIR + '/fits/CCF' + str(i) + '.dat'
		# filename	= DIR + '/CCF_noise' + '%d' %SN[n] + '/' + str(i) + '.dat'
		CCF 		= 1 - np.loadtxt(filename)
		ccf 		= CCF[idx0]
		# popt, pcov  = curve_fit(gaussian, v_ccf, ccf)
		popt, pcov  = curve_fit(gaussian, v_CCF, CCF)
		popt0 		= popt 								# Keep a snapshot
		RV[i]   	= popt[1]    						# km/s
		FWHM[i]		= popt[2] * math.sqrt(8*math.log(2))
		sigma 		= FWHM[i]/2 						# performs better than sigma = popt[2]
		# sigma 		= popt[2]

		###################
		# Calculate Vspan #
		###################
		# σ is the width of the CCF
		# I take σ as the FWHM

		# oversample v_CCF
		from scipy.interpolate import interp1d
		f 		= interp1d(v_CCF, CCF, kind='cubic')
Example #33
0
    dates = dates[i0:]

    if province != 'Hubei':
        maxt = 12
    else:
        maxt = 19
    i1 = np.where(t <= maxt)[0][-1]
    _t = t[:i1 + 1]
    _cases = cases[:i1 + 1]
    print(t, cases)

    if len(t) < 8:
        continue

    f = lambda x, mu, B: B * x**mu
    p, _ = curve_fit(f, _t, _cases, [1.9, 4.5])

    print(p)

    tt = np.logspace(np.log(t[0]), np.log(t[-1]), base=np.exp(1))
    tt_dates = np.array((tt - 1) * 24 * 3600, np.timedelta64) + dates[0]
    print(dates, tt_dates)

    pl.sca(ax[i])

    pl.plot_date(dates,
                 cases,
                 marker=markers[i],
                 c=colors[i],
                 label=titlemap[province],
                 mfc='None')
Example #34
0
def fitUtilityFunction(X,Y,func):
    from scipy.optimize import curve_fit
    curve_opt, curve_cov = curve_fit(func,X,Y,
                                     maxfev=20000) # fit the observed data, try 20,000 iterations
    # print(curve_opt)
    return curve_opt
Example #35
0
        a, b, c = params
        return a*np.exp(-b*x) + c

    def error(params, x, y):
        return y - func(params, x)

    def error2(params, x, y):
        return (y - func(params, x))**2




    x = np.linspace(0,4,50)
    params = np.array([2.5, 1.3, 0.5])
    y0 = func(params, x)
    y = y0 + 0.2*np.random.normal(size=len(x))

    res = optimize.leastsq(error, params, args=(x, y), full_output=True)
##    r, R, c = getjaccov(res[1:], 3)

    mod = Myfunc(y, x)
    resmy = mod.fit(nparams=3)

    cf_params, cf_pcov = optimize.curve_fit(func0, x, y)
    cf_bse = np.sqrt(np.diag(cf_pcov))
    print(res[0])
    print(cf_params)
    print(resmy.params)
    print(cf_bse)
    print(resmy.bse)
Example #36
0
plt.savefig('build/magnetfeld.pdf')

makeTable([z[:10], B[:10]], r'{$z/\si{\milli\metre}$} & {$B/\si{\milli\tesla}$}', 'magnetfeld', ['S[table-format=2.0]', 'S[table-format=3.0]'], ["%2.0f", "%3.0f"])
makeTable([z[9:], B[9:]], r'{$z/\si{\milli\metre}$} & {$B/\si{\milli\tesla}$}', 'magnetfeld2', ['S[table-format=2.0]', 'S[table-format=3.0]'], ["%2.0f", "%3.0f"])

#reinprobe
print('reinprobe')
l,t1,t2 = np.genfromtxt('scripts/reinprobe.txt',unpack=True) # micrometer, degree
tr = (t1-t2)/2
tr = 2*np.pi*tr/360 # rad
tr = tr/5.11*10**3 #rad/m

def hyperbel(l,a):
	return a/l**2

params, covariance_matrix = curve_fit(hyperbel,l,tr)
errors = np.sqrt(np.diag(covariance_matrix))

x = np.linspace(0.9,2.7,1000)
plt.cla()
plt.clf()
plt.plot(l**2,tr,'rx',label=r'Messwerte')
plt.plot(x**2,hyperbel(x,*params),'b-',label=r'Ausgleichskurve')
plt.xlabel(r'$\lambda^2/\si{\micro\metre\squared}$')
plt.ylabel(r'$\Delta\theta/\si{\text{rad}\per\metre}$')
plt.xlim(0.9,7.2)
plt.tight_layout(pad=0, h_pad=1.08, w_pad=1.08)
plt.legend(loc='best')
plt.savefig('build/reinprobe.pdf')

print('a=',unp.uarray(params[0],errors[0]))
Example #37
0
def smooth(wvl, flux, cut_vel, unc_arr=False):
    """
    Smoothes spectra by separating SN features from noise in Fourier space.
    Python implementation of the IDL smoothing technique introduced by Liu et al. 2016
    (https://github.com/nyusngroup/SESNspectraLib/blob/master/SNspecFFTsmooth.pro)

    Parameters
    ----------
    wvl : np.array
        wavelength array
    flux : np.array
        flux array
    cut_vel : float
        velocity cut for SN features. Recommended 1000 km/s for non IcBL spectra
        and 3000 km/s for IcBL spectra.
    unc_arr : Boolean
        Calculates uncertainty array if True.

    Returns
    -------
    w_smoothed : np.array
        wavelength array for smoothed fluxes
    f_smoothed : np.array
        smoothed flux array
    sepvel : float
        velocity for separating SN features.

    """
    c_kms = 299792.47  # speed of light in km/s
    vel_toolarge = 100000  # km/s
    width = 100

    wvl_ln = np.log(wvl)
    num = wvl_ln.shape[0]
    binsize = wvl_ln[-1] - wvl_ln[-2]
    f_bin, wln_bin = binspec(wvl_ln, flux, min(wvl_ln), max(wvl_ln), binsize)
    fbin_ft = np.fft.fft(f_bin)  #*len(f_bin)
    freq = np.fft.fftfreq(num)
    num_upper = np.max(np.where(1.0 / freq[1:] * c_kms * binsize > cut_vel))
    num_lower = np.max(
        np.where(1.0 / freq[1:] * c_kms * binsize > vel_toolarge))
    mag_avg = np.mean(np.abs(fbin_ft[num_lower:num_upper + 1]))
    powerlaw = lambda x, amp, exp: amp * x**exp
    num_bin = len(f_bin)
    xdat = freq[num_lower:num_upper]
    ydat = np.abs(fbin_ft[num_lower:num_upper])
    nonzero_mask = xdat != 0
    slope, intercept, _, _, _ = st.linregress(np.log(xdat[nonzero_mask]),
                                              np.log(ydat[nonzero_mask]))
    exp_guess = slope
    amp_guess = np.exp(intercept)

    #do powerlaw fit
    xdat = freq[num_lower:int(num_bin / 2)]
    ydat = np.abs(fbin_ft[num_lower:int(num_bin / 2)])
    #exclude data where x=0 because this can cause 1/0 errors if exp < 0
    finite_mask = np.logical_not(xdat == 0)
    finite_mask = np.logical_and(finite_mask, np.isfinite(ydat))
    ampfit, expfit = opt.curve_fit(powerlaw,
                                   xdat[finite_mask],
                                   ydat[finite_mask],
                                   p0=[amp_guess, exp_guess])[0]

    #find intersection of average fbin_ft magnitude and powerlaw fit to calculate separation
    #velocity between signal and noise.
    intersect_x = np.power((mag_avg / ampfit), 1.0 / expfit)
    sep_vel = 1.0 / intersect_x * c_kms * binsize

    #filter out frequencies with velocities higher than sep_vel
    smooth_fbin_ft = np.array([fbin_ft[ind] if np.abs(freq[ind])<np.abs(intersect_x) else 0 \
                               for ind in range(len(freq))])#/len(f_bin)
    #inverse fft on smoothed fluxes
    smooth_fbin_ft_inv = np.real(np.fft.ifft(smooth_fbin_ft))

    #interpolate smoothed fluxes back onto original wavelengths
    w_smoothed = np.exp(wln_bin)
    f_smoothed = np.interp(wvl, w_smoothed, smooth_fbin_ft_inv)

    if unc_arr:
        f_resi = flux - f_smoothed
        num = len(f_resi)
        bin_size = int(np.floor(
            width / (wvl[1] - wvl[0])))  # window width in number of bins
        bin_rad = int(np.floor(bin_size / 2))
        f_std = np.zeros(num)
        start_ind = bin_rad
        end_ind = num - bin_rad
        for j in np.arange(start_ind, end_ind):
            f_std[j] = np.std(f_resi[j - bin_rad:j + bin_rad + 1])
        for j in np.arange(1, bin_rad):
            f_std[j] = np.std(f_resi[0:2 * j + 1])
        for j in np.arange(end_ind, num - 1):
            f_std[j] = np.std(f_resi[2 * j - num + 1:])
        f_std[0] = np.abs(f_resi[0])
        f_std[-1] = np.abs(f_resi[-1])
        return w_smoothed, f_smoothed, sep_vel, f_std

    return w_smoothed, f_smoothed, sep_vel
Example #38
0
def main():
	# print(Strukturamplitude(gitter = 'SC'))
	# print(Strukturamplitude(gitter = 'FCC'))
	# print(Strukturamplitude(gitter = 'BCC'))
	gitter_moegl = ['SC','FCC','BCC','Diamant']
	Xi2_best = ['SC', 10]

	####################################################################################################
	# Metall 
	####################################################################################################
	print('\n#################### Analyse für Metall ####################\n')
	# print('\n')

	#Bogenlängenformel b=alpha*r*pi /180
	# theta_4 = radius_m * 180 / (np.pi * Radius_k)
	theta_4_temp = radius_m * 180 / (np.pi * Radius_k) # in Grad!
	theta_4_rueck = (radius_m_rueck * 180 / (np.pi * Radius_k)) #+ 180 # in Grad!
	theta_4 = np.concatenate((theta_4_temp, theta_4_rueck), axis = 0)
	print('Hier!!!!!!!!!!!:' ,theta_4)
	theta = theta_4 * np.pi / (4 * 180) # in radianten
	theta = np.sort(theta)
	theta_sin = np.sin(theta)

	# hier für die Fehlerfortpfalnzung aufgrund der ungenauigkeit des Lineals
	radius_metall_temp = unp.uarray(radius_m, Fehler_radius)
	radius_metall_rueck = unp.uarray(radius_m_rueck, Fehler_radius)
	radius_metall = np.concatenate((radius_metall_temp, radius_metall_rueck), axis = 0)
	Radius_kamera = ufloat(Radius_k, 0.0)
	theta_4_unp = radius_metall * 180 / (np.pi * Radius_kamera)
	theta_unp = theta_4_unp * np.pi / (4 * 180) # in radianten
	theta_unp = np.sort(theta_unp)

	print('Theta mit Fehler: ', theta_unp)
	
	netzebenenabstand_metall = bragg(lambda_1, theta)

	for gitter in gitter_moegl:
		infos = Strukturamplitude(gitter = gitter)
		reflexe = infos[0]
		m = infos[1]
	
		verhaeltnisse = findStructure(m, netzebenenabstand_metall)
		verhaeltnis_m = verhaeltnisse[0]
		verhaeltnis_d = verhaeltnisse[1]
	
		print('Verhältnisse für die m Werte: ', verhaeltnis_m)
		print('Verhältnisse für die d Werte: ', verhaeltnis_d)
		print('Abweichung Xi^2 für die ' + gitter + ' Sturktur: ', abweichung(verhaeltnis_m, verhaeltnis_d))

		if abweichung(verhaeltnis_m, verhaeltnis_d) < Xi2_best[1]:
			Xi2_best = [gitter, abweichung(verhaeltnis_m, verhaeltnis_d)]

	print('Struktur mit der kleinsten Abweichung: ', Xi2_best[0])
	print('Abweichung Xi^2: ', Xi2_best[1])
		
	m = Strukturamplitude(gitter = Xi2_best[0])[1]
	a = np.array(gitterkonstanteBragg(m, netzebenenabstand_metall))

	print('Gitterkonstanten für ' + Xi2_best[0] + ' Struktur: ', a)

	# linearer fit für a gegen cos^2
	params, cov = curve_fit(lin,np.cos(theta)**2,a)
	# params, cov = curve_fit(lin,np.cos(theta),a)
	err = np.sqrt(np.diag(cov))

	a_extrp = ufloat(params[1], err[1])

	# systematischer Fehler Absorption der Röntgenstrahlung

	DeltaA = (radius_rohr / (2 * Radius_k)) * (1 - Radius_k / Abstand_FP) * (np.cos(theta)**2 / theta) * a
	# cos2Theta = []
	# for i in range(0,len(theta)):
	# 	cos2Theta.append(ufloat(theta[i], Fehler_Theta[i]))
	cos2Theta = unp.cos(theta_unp)**2

	a_mitFehler = unp.uarray(a, DeltaA)
	print('Gitterkonstanten mit Fehler durch die Absorption: ', a_mitFehler)

	print('Extrapolierte Gitterkonstante: ', a_extrp)

	cos2Theta_fit = np.linspace(0, 1)

	####### ab hier der a gegen cos^2 Plot ########
	# plt.plot(np.cos(theta)**2, a, 'x', label = 'Daten')
	plt.errorbar(np.cos(theta)**2, a, xerr=stds(cos2Theta), yerr=DeltaA, fmt='x', label = 'Daten')
	# plt.plot(np.cos(theta), a, 'x', label = 'Daten')
	plt.plot(cos2Theta_fit, lin(cos2Theta_fit, *params), label = 'Fit')
	plt.legend(loc = 'best')
	plt.xlabel('cos$^2(\Theta)$')
	plt.ylabel('$a$ in Angtröm')
	# plt.xlim(0.6,1)
	# plt.ylim(4.8e-10,5.8e-10)
	plt.tight_layout()
	plt.grid()
	plt.savefig('Plots/Metall_Fit.pdf')
	plt.close()

	####################################################################################################
	# Salz 
	####################################################################################################
	print('\n#################### Analyse für Salz ####################\n')

	gitter_moegl_Salz = ['Zinkblende', 'Steinsalz', 'Caesiumchlorid', 'Fluorit']
	Xi2_best_Salz = ['Zinkblende', 10, 'CuCl']

	ion_zink = [['Cu', 'Cl'], ['Cs', 'J'], ['Cu', 'J']]

	#Bogenlängenformel b=alpha*r*pi /180
	# theta_4 = radius_s * 180 / (np.pi * Radius_k)
	theta_4_temp = radius_s * 180 / (np.pi * Radius_k) # in Grad!
	theta_4_rueck = (radius_s_rueck * 180 / (np.pi * Radius_k))# + 180 # in Grad!
	theta_4 = np.concatenate((theta_4_temp, theta_4_rueck), axis = 0)
	print('Hier!!!!!!!!!!!:' ,theta_4)
	theta = theta_4 * np.pi / (4 * 180) # in radianten
	theta = np.sort(theta)
	theta_sin = np.sin(theta)

	# hier für die Fehlerfortpfalnzung aufgrund der ungenauigkeit des Lineals
	# radius_salz = unp.uarray(radius_s, Fehler_radius)
	radius_salz_temp = unp.uarray(radius_s, Fehler_radius)
	radius_salz_rueck = unp.uarray(radius_s_rueck, Fehler_radius)
	radius_salz = np.concatenate((radius_salz_temp, radius_salz_rueck), axis = 0)
	print('Hier!!!???!?!?!?!: ', radius_salz)
	Radius_kamera = ufloat(Radius_k, 0.0)
	theta_4_unp = radius_salz * 180 / (np.pi * Radius_kamera)
	theta_unp = theta_4_unp * np.pi / (4 * 180) # in radianten
	theta_unp = np.sort(theta_unp)

	print('Theta mit Fehler: ', theta_unp)
	
	netzebenenabstand_salz = bragg(lambda_1, theta)

	for gitter in gitter_moegl_Salz:
		# reflexe = []
		# m = []
		# if gitter == 'Zinkblende':
		# 	for ion in ion_zink:
		# 		temp_Salz = Strukturamplitude_Salz(formfaktor(, lambda_1,ion[0]), formfaktor(ion[1]), 'Zinkblende') # wie mit theta????
		# 		reflexe = temp_Salz[0]
		# 		m = temp_Salz[1]
		# 		verhaeltnisse = findStructure(m, netzebenenabstand_salz)
		# 		verhaeltnis_m = verhaeltnisse[0]
		# 		verhaeltnis_d = verhaeltnisse[1]
			
		# 		print('Verhältnisse für die m Werte: ', verhaeltnis_m)
		# 		print('Verhältnisse für die d Werte: ', verhaeltnis_d)
		# 		print('Abweichung Xi^2 für die ' + gitter + ' Sturktur mit' + ion[0] + ion[1] + ' : ', abweichung(verhaeltnis_m, verhaeltnis_d))
		
		# 		if abweichung(verhaeltnis_m, verhaeltnis_d) < Xi2_best_Salz[1]:
		# 			Xi2_best_Salz = [gitter, abweichung(verhaeltnis_m, verhaeltnis_d), ion[0] + ion[1]]
		# elif gitter == 'Steinsalz':
		# 	temp_Salz = Strukturamplitude_Salz(ion[0], ion[1], 'Zinkblende')
		# 	reflexe = temp_Salz[0]
		# 	m = temp_Salz[1]
		# 	verhaeltnisse = findStructure(m, netzebenenabstand_salz)
		# 	verhaeltnis_m = verhaeltnisse[0]
		# 	verhaeltnis_d = verhaeltnisse[1]
		
		# 	print('Verhältnisse für die m Werte: ', verhaeltnis_m)
		# 	print('Verhältnisse für die d Werte: ', verhaeltnis_d)
		# 	print('Abweichung Xi^2 für die ' + gitter + ' Sturktur mit' + ion[0] + ion[1] + ' : ', abweichung(verhaeltnis_m, verhaeltnis_d))
	
		# 	if abweichung(verhaeltnis_m, verhaeltnis_d) < Xi2_best_Salz[1]:
		# 		Xi2_best_Salz = [gitter, abweichung(verhaeltnis_m, verhaeltnis_d), ion[0] + ion[1]]

		infos = Strukturamplitude_Salz(f1 = 1, f2 = 2, gitter = gitter)
		reflexe = infos[0]
		m = infos[1]
	
		verhaeltnisse = findStructure(m, netzebenenabstand_salz)
		verhaeltnis_m = verhaeltnisse[0]
		verhaeltnis_d = verhaeltnisse[1]
	
		print('Verhältnisse für die m Werte: ', verhaeltnis_m)
		print('Verhältnisse für die d Werte: ', verhaeltnis_d)
		print('Abweichung Xi^2 für die ' + gitter + ' Sturktur: ', abweichung(verhaeltnis_m, verhaeltnis_d))

		if abweichung(verhaeltnis_m, verhaeltnis_d) < Xi2_best_Salz[1]:
			Xi2_best_Salz = [gitter, abweichung(verhaeltnis_m, verhaeltnis_d)]

	print('Struktur mit der kleinsten Abweichung: ', Xi2_best_Salz[0])
	print('Abweichung Xi^2: ', Xi2_best_Salz[1])
		
	m = Strukturamplitude_Salz(gitter = Xi2_best_Salz[0])[1]
	a = np.array(gitterkonstanteBragg(m, netzebenenabstand_salz))

	print('Gitterkonstanten für ' + Xi2_best_Salz[0] + ' Struktur: ', a)

	#linearer fit für a gegen cos^2
	params, cov = curve_fit(lin,np.cos(theta)**2,a)
	# params, cov = curve_fit(lin,np.cos(theta),a)
	err = np.sqrt(np.diag(cov))

	a_extrp = ufloat(params[1], err[1])

	# systematischer Fehler Absorption der Röntgenstrahlung

	DeltaA = (radius_rohr / (2 * Radius_k)) * (1 - Radius_k / Abstand_FP) * (np.cos(theta)**2 / theta) * a
	# cos2Theta = []
	# for i in range(0,len(theta)):
	# 	cos2Theta.append(ufloat(theta[i], Fehler_Theta[i]))
	cos2Theta = unp.cos(theta_unp)**2

	a_mitFehler = unp.uarray(a, DeltaA)
	print('Gitterkonstanten mit Fehler durch die Absorption: ', a_mitFehler)

	print('Extrapolierte Gitterkonstante: ', a_extrp)

	cos2Theta_fit = np.linspace(0, 1)

	####### ab hier der a gegen cos^2 Plot ########
	# plt.plot(np.cos(theta)**2, a, 'x', label = 'Daten')
	plt.errorbar(np.cos(theta)**2, a, xerr=stds(cos2Theta), yerr=DeltaA, fmt='x', label = 'Daten')
	# plt.plot(np.cos(theta), a, 'x', label = 'Daten')
	plt.plot(cos2Theta_fit, lin(cos2Theta_fit, *params), label = 'Fit')
	plt.legend(loc = 'best')
	plt.xlabel('cos$^2(\Theta)$')
	plt.ylabel('$a$ in Angtröm')
	# plt.xlim(0.5,1)
	# plt.ylim(4.8e-10,5.8e-10)
	plt.tight_layout()
	plt.grid()
	plt.savefig('Plots/Salz_Fit.pdf')
	plt.close()
Example #39
0
    def approximate_data(
            self,
            include_countries=('All', ),
            exclude_countries=('All', ),
            include_province=('All', ),
            exclude_province=('All', ),
            quality=('good', 'ok'),
    ):
        if not len(self.df_cases) or not len(self.df_deaths):
            raise ValueError(
                'Data has not been loaded properly. Call load_data_jh() method first.'
            )

        data = []  # type: [DataEstimate]

        for i, deaths_row in self.df_deaths.iterrows():
            try:
                # row = df.iloc[0]    # Get one row of data.
                row_province = deaths_row[0]
                row_country = deaths_row[1]

                # Filter by country.
                if 'All' in include_countries:
                    if row_country in exclude_countries:
                        continue
                elif row_country not in include_countries:
                    continue

                # Filter by region.
                if 'All' in include_province:
                    if row_province in exclude_province:
                        continue
                elif row_province not in include_province:
                    continue

                deaths_row = deaths_row.iloc[
                    4:]  # Cut meta data at the beginning.

                row_days = deaths_row.index.to_series(
                )  # Split dates info a separate series.
                row_days = row_days.apply(
                    jh_date_to_dofy)  # Convert dates into days of the year.

                # Get real cases in the same format.
                if row_province:
                    cases = self.df_cases.loc[
                        (self.df_cases['Province/State'] == row_province)
                        & (self.df_cases['Country/Region'] == row_country
                           )  # Sort by the same country and province.
                    ].iloc[0].iloc[
                        4:]  # [1] gets the Series out of next() results, iloc[4:] cuts the unused meta data from the beginning.
                else:
                    cases = self.df_cases.loc[
                        self.df_cases['Country/Region'] ==
                        row_country  # Sort by the same country and province.
                    ].iloc[0].iloc[
                        4:]  # [1] gets the Series out of next() results, iloc[4:] cuts the unused meta data from the beginning.

                # If we have less than 20 cases or 7 days of data or if the reported death rate is above 10%,
                # we cannot really estimate anything.
                if (deaths_row.tail(1)[0] < 20 or deaths_row.tail(7)[4] < 1
                        or (deaths_row.sum() / cases.sum() * 100) > 10):
                    continue

                population = 500000
                if row_country == 'US' and row_province in self.states_population[
                        'state'].tolist():
                    population = int(self.states_population.loc[
                        self.states_population['state'] == row_province]
                                     ['population'])

                # Find the model fit.
                fit = curve_fit(
                    self.logistic_model,
                    list(
                        row_days.iloc[deaths_row.to_numpy().nonzero()[0][0]:]),
                    list(deaths_row.iloc[deaths_row.to_numpy().nonzero(
                    )[0][0]:]),
                    # p0=[2, 10, 200000],
                    bounds=(
                        (1, 1, 1e4),
                        (200, 1000, 3e7),
                    ),
                    check_finite=True,
                    maxfev=population,
                )
                fit_a = fit[0][0]
                error_a = np.sqrt(fit[1][0][0])
                fit_peak_days = fit[0][1]
                error_peak_days = np.sqrt(fit[1][1][1])
                fit_c = fit[0][2]
                error_cases = np.sqrt(fit[1][2][2])

                # Make an estimated row with day # as values and dates as indexes.
                cases_approximation = pd.Series(row_days, index=deaths_row.index)\
                    .apply(
                        lambda x: int(self.logistic_model((x+self.avg_days_to_death), fit_a, fit_peak_days, fit_c)/self.mortality_rate*100)
                    )

                # Just a quick estimate.
                tail_cases_doubling_2d = float(
                    deaths_row.tail(1)) / self.mortality_rate * 100 * 2**(
                        self.avg_days_to_death / 2)
                tail_cases_doubling_4d = float(
                    deaths_row.tail(1)) / self.mortality_rate * 100 * 2**(
                        self.avg_days_to_death / 4)
                tail_cases_doubling_6d = float(
                    deaths_row.tail(1)) / self.mortality_rate * 100 * 2**(
                        self.avg_days_to_death / 6)

                # Estimated cases as of today.
                tail_cases_approximation = int(cases_approximation.tail(1))

                # If the estimation shows that there are more cases than an estimation with 2-day doubling rate, it's likely bad.
                if error_a > 3 or error_peak_days > 150 or error_cases > population * 0.01 or tail_cases_approximation > tail_cases_doubling_2d:
                    estimate = 'bad'
                    # Skipp cases with not specified quality.
                    if 'bad' not in quality:
                        continue
                elif error_a > 1 or error_peak_days > 50 or error_cases > population * 0.001:
                    # Skipp cases with not specified quality.
                    if 'ok' not in quality:
                        continue
                    estimate = 'ok'
                else:
                    # Skipp cases with not specified quality.
                    if 'good' not in quality:
                        continue
                    estimate = 'good'

                data.append(
                    DataEstimate(
                        id=i,
                        province=row_province,
                        country=row_country,
                        cases_approximation=cases_approximation,
                        cases=cases,
                        estimate=estimate,
                        error_cases=error_cases,
                        error_peak=error_peak_days,
                        peak_day=fit_peak_days,
                    ))

            except IndexError:
                # Some states don't have any data yet, so we just skipp them.
                continue

        # Sort data by fitment.
        data = sorted(data, key=lambda x: (x.estimate_order, x.error_cases))

        return data
Example #40
0
    def bias_corrector(self,
                       model_func,
                       b,
                       rice_sig,
                       guess,
                       bound,
                       res_func=None,
                       weight=None,
                       num_sigmaes_avrg=0,
                       N_avrg=1,
                       sigdiff_break=.02,
                       N_break=100):
        """
    Algorithm Implementation
    Run Bias Correction
    model_func: underlying model function
    b: array of b-factors (b=gamma^2*g^2*delta^2*(Delta-delta/3)) [s/mm^2]
    rice_sig: signal to be corrected (Rician distributed)
    guess: initial guess for the fit funtion (must be set; not None)
    bound: bounds for the fit
    weight: use weighting of datapoints (rician)
    num_sigmaes_avrg: "number of freedom" for sigmaes calculation
    N_avrg: factor for actual sigma if signal is averaged
    """
        if res_func is None:

            def res_func(SNR):
                return self.alpha_func(SNR, N=N_avrg)

        rice_sig_corr = np.zeros_like(rice_sig)
        #set dimensions and number of fitting parameters
        num_dp = rice_sig.shape[0]
        num_fp = len(guess)  #number of fitting parameters
        #effective number of datapoints for sigmaes calculation
        num_siges = num_dp - num_sigmaes_avrg
        #list for run parameters
        para_run_arr = np.zeros((self.N_break, len(guess) + 1))
        #saving fitting errors (keep as list for the moment)
        para_run_err = []

        #STEP 1
        #fit exp and obtain ŝ_0 (fit_sig) and RMSE
        res = so.curve_fit(model_func,
                           b,
                           rice_sig,
                           guess,
                           bounds=bound,
                           method=method_grad)
        if res is False:
            if self.debug:
                print('Problem with Fit!')
            return None
        fit_sig = model_func(b, *res[0])  # FIX parameter input
        RMSE = np.sqrt(np.sum((fit_sig - rice_sig)**2) / (num_dp - num_fp))
        #STEP 2
        #get RMSE
        sigma_corr_fac_RMSE = np.sqrt(N_avrg)
        sigma_es = RMSE * sigma_corr_fac_RMSE
        #save parameters of first fit and estimation
        para_run_arr[0, :-1] = res[0]
        para_run_arr[0, -1] = sigma_es
        para_run_err.append(res[1])
        for i in np.arange(1, N_break):
            #STEP 3 + 4
            #Estimate Rician bias and calculate corrected signal
            rice_sig_corr = rice_sig - (
                self.rice_mean_high(fit_sig, sigma_es) - fit_sig)
            #STEP 5
            res = so.curve_fit(model_func,
                               b,
                               rice_sig_corr,
                               guess,
                               bounds=bound,
                               method=method_grad)
            if res is False:
                if self.debug:
                    print('Problem with Fit!')
                return None
            fit_sig = model_func(b, *res[0])
            #step 6+7
            #get new alpha and estimate sigma
            SNR_es = fit_sig / sigma_es
            alpha = res_func(SNR_es)
            sigma_es_bf = np.copy(sigma_es)
            sigma_es = np.sum(np.abs(fit_sig - rice_sig) / alpha) / num_siges
            if 1 == 1:
                para_run_arr[i, :-1] = res[0]
                para_run_arr[i, -1] = sigma_es
                para_run_err.append(res[1])
            #step 8 stopping crit
            if np.abs(sigma_es - sigma_es_bf) / sigma_es < sigdiff_break:
                break
        return [para_run_arr[:i + 1], para_run_err, i]
        def get_decay_feats(peak_xt, plot_me=False):
            '''
            Using the locations for each peak, extracts features for model. It will calculate when the peak returns to
            baseline and use it and the peak to calculate an exponential fit. This will be used to generate Tau and AUC.
            It will also return the time domain for ea ch peak as well as the percent baseline used to determine what
            the peak must return to to be considered baseline.

            :param peak_xt: Array of currrent values surrounding the peak in question.
            :param plot_me: When this is true, will plot the exponential fit of each peak to the calculated baseline.
            :return:
            tau: The negative inverse of the dampening coefficeint fo the exponential fit (-1/b)
            r2_decay: The r2 value for the exponential fit
            auc: Area under the curve of the fitted exponential equation. NOT the AUC for the whole peak,
            just after the peak.
            perc: The percent peak height used to find the baseline.
            '''

            # Sets starting values for determining a return to baseline
            val = 1000
            perc = 0.001

            # Finds the return to baseline index for the peak. If the initial range is unsuccessful, it will expand
            # potential trace by 10% and increase the allowed percent return to double. This will occur until a minimum is
            # found.
            y_mins = []
            while len(y_mins) == 0:
                search_area = list(trace_s[peak_xt:peak_xt + val])
                # print(search_area)
                y_mins = [(abs(x), search_area.index(x)) for x in search_area
                          if x <= peak_y[1] * perc]
                # print(y_mins)
                perc = 2 * perc
                val = val + int(np.round(val * 1.1))
            perc = (perc / 2)
            y_min = y_mins[0]

            def exp_func(x, a, b):
                '''
                Exponential function for the fit.
                :param x: Input varibale
                :param a: Coefficient for flexibility, not indicitive (amplitude)
                :param b: Dampening coefficient
                :return: Function output.
                '''
                return a * np.exp(b * x)

            # print(y_min)

            # Grabs the appropriate portion of the trace for the exponential fit for a paticular peak.
            ydata = search_area[:y_min[1]]
            xdata = [ydata.index(x) for x in ydata]

            # Performs the exponential fit.
            initial_guess = [-0.1, -0.1]
            popt, pcov = curve_fit(exp_func, xdata, ydata, initial_guess)

            # Calculates tau
            tau = -1 / popt[1]

            # Calculates r2 for exponential fit
            xFit = np.arange(0, len(ydata), 1)
            residuals = ydata - exp_func(xFit, *popt)
            ss_res = np.sum(residuals**2)
            ss_tot = np.sum((ydata - np.mean(ydata))**2)
            r2_decay = 1 - (ss_res / ss_tot)

            # Fitted model for getting AUC
            def func(x):
                return popt[0] * np.exp(popt[1] * x)

            # Calculates AUC
            auc = integrate.quad(func, 0, len(ydata))[0]

            # Plots the fit and decay if plot_me is true.
            if plot_me == True:
                plt.plot(xdata, ydata, 'b-', label='Peak Data')
                plt.plot(xFit,
                         exp_func(xFit, *popt),
                         'g--',
                         label='Fitted Curve')
                plt.title(peak_xt)
                plt.legend()
                plt.show()

            return tau, r2_decay, auc, perc
Example #42
0
def fit2gauss(lam,
              y,
              yerr,
              min_tot=100.0,
              chi_thr=10.0,
              base=0.0,
              fit_indy=False,
              verbose=False):

    # min_tot = minimum intensity to try
    # chi_thr = Chi^2 threshold
    # base = base level subtracted when computing moments
    # fit_indy = option to truncate data array pre/post 0th moment calculation

    # ==== compute moments

    d = dict()

    yt = (y - base)  #> 0.0 #    a truncated version for moments

    if fit_indy == True:
        m0 = np.sum(yt)  #> 1.0 #    prevent problems with division
        m0 = np.maximum(m0, 1.0)
        yt[yt < 0.0] = 0.0
    else:
        yt[yt < 0.0] = 0.0  # Dana's does this first.
        m0 = np.sum(yt)  #> 1.0 #    prevent problems with division
        m0 = np.maximum(m0, 1.0)

    m1 = np.sum(yt * lam) / m0
    m2 = np.sum(yt * (lam - m1)**2) / m0
    m3 = np.sum(yt * (lam - m1)**3) / m0

    moms = [m0, m1, m2]  #  pack for return

    # if first moment is too small (nothing to fit)
    if (m0 < min_tot):
        #print('m0 = ',m0)
        chi1g = -1.0
        a1g = [1.0, 1402.77, 1.0]
        a2g = [1.0, 1402.77, 1.0, 1.0, 1402.77, 1.0]
        d['moms'] = moms
        d['chi1g'] = chi1g
        d['chi2g'] = 100000
        d['a2g'] = a2g
        d['a1g'] = a1g

        y1g = single_gauss_func_noder(lam, *a1g)
        pars_1 = a2g[0:3]
        pars_2 = a2g[3:6]
        y2a = single_gauss_func_noder(lam, *pars_1)
        y2b = single_gauss_func_noder(lam, *pars_2)
        d['y1g'] = y1g
        d['y2a'] = y2a
        d['y2b'] = y2b
        if verbose == True: print('Nothing to fit.. ejecting!')
        return d

    # ===== do 1-Gaussian fit
    # estimate parameters for 1-Gaussian fit
    sd = np.sqrt(m2)
    dx = lam[1] - lam[0]
    a0 = [dx * m0 / (np.sqrt(2 * np.pi) * sd), m1,
          sd]  #  estimate of 1-gaussian paramters

    # perform fit
    a1g, a1cov = curve_fit(single_gauss_func_noder,
                           lam,
                           y,
                           p0=a0,
                           maxfev=110000)  #, bounds=(0, np.inf))
    y1g = single_gauss_func_noder(lam, *a1g)

    #calculate chi_square
    y_modelone = single_gauss_func_noder(lam, *a1g)
    X2one = np.sum(((y_modelone - y) / yerr)**2)
    chi1g = X2one / (len(y) - 3)  # reduced chi^2

    # ==== do double-Gaussian fit
    # estimate parameters
    a0_2 = est_params([m0, m1, m2, m3], dx=dx)
    if verbose == True: print('est params = ', a0_2)

    # ==========================================================================
    # new routine to find peaks for initial parameters (not really ever used) -------------------------------
    #spec_sm = savgol_filter(y, 3, 1) #smooth to make local peak finding more accurate
    peaks, _ = find_peaks(y)

    pos_peaks = lam[peaks]
    spec_peaks = y[peaks]
    iis = np.where(spec_peaks > 0.2 * np.max(y))
    iis = iis[0]

    if (len(iis) > 2) and (verbose == True):
        print('!!!! - more than two peaks found')  # as a precaution

    if len(
            iis
    ) < 2:  # redo fitering to see if we can't get two peaks. if not, we'll call it a single gaussian.
        if verbose == True: print('single peak found')
        spec_sm = savgol_filter(y, 3, 1)  #15->3?
        peaks, _ = find_peaks(spec_sm)
        pos_peaks = lam[peaks]
        spec_peaks = spec_sm[peaks]
        iis = np.where(spec_peaks > 0.05 * np.max(spec_sm))
        iis = iis[0]

        if len(
                iis
        ) == 1:  # then two peaks not found via find_peaks(). create artificial peak for fit process.
            if verbose == True: print('only one peak still')
            spec_val = 0.5 * np.max(spec_sm)
            spec_peaks = np.append(spec_peaks, spec_val)
            spec_pos = pos_peaks[iis] + 0.25
            pos_peaks = np.append(pos_peaks, spec_pos)
            iis = np.append(
                iis, iis[-1] +
                1)  # add one more index for fitting purposes (need two).

    amp_peaks = spec_peaks[iis]  # amplitude and position of peaks
    pos_peaks = pos_peaks[iis]
    # update exsisting estimation for fit parameters
    #a0_2[0],a0_2[1],a0_2[3],a0_2[4] = amp_peaks[0],pos_peaks[0],amp_peaks[1],pos_peaks[1]
    # ------------------------------------------------------------------------------------------
    # ==========================================================================

    #if verbose==True: print('new init params = ', a0_2)
    upper_bound = [np.inf, 1404, np.inf, np.inf, 1404, np.inf]
    lower_bound = [500, 1402, 0, 500, 0, 0]
    a2g, a2cov = curve_fit(double_gauss_func_noder,
                           lam,
                           y,
                           p0=a0_2,
                           maxfev=110000,
                           absolute_sigma=True)

    # individual gaussians of double fit
    pars_1 = a2g[0:3]
    pars_2 = a2g[3:6]
    y2a = single_gauss_func_noder(lam, *pars_1)
    y2b = single_gauss_func_noder(lam, *pars_2)

    # calculate chi^2
    #y_modeltwo = double_gauss_func_noder(lam_s, *a2g)
    y2g = y2a + y2b
    y_modeltwo = y2g
    X2two = np.sum(((y_modeltwo - y) / yerr)**2)
    chi2g = X2two / (len(y) - 6)  # reduced chi^2

    if verbose == True:
        print('a2g =', a2g)
        print('a1g[0] =', a1g[0])
        print('chi2g = ', chi2g)

    # method for selecting single aussian over double gaussian
    # if double fitting WORSE than single gaussian fit, OR OR OR if the amplitude of the second Gaussian is neglible

    small_amp = np.minimum(a2g[0], a2g[3])
    lrg_amp = np.maximum(a2g[0], a2g[3])
    lrg_vel = np.maximum(np.abs(a2g[1]), np.abs(a2g[4]))

    #if(chi1g<chi2g) or (small_amp<100) or (lrg_amp<100) or (chi1g<chi_thr) :
    #if (small_amp<0.1*lrg_amp) or (lrg_amp<0.1*small_amp) or (chi1g<chi_thr) :
    if (small_amp < 100) or (lrg_amp < 100) or (chi1g < chi_thr):
        a2g = np.concatenate((a1g, a1g))  #  return copies of single fit params
        a2g[3] = 0.0  #  but zero amplitude
        y2a = y1g
        y2b = 0.0 * y1g
        chi2g = -1.0  #  flag that no fit was attmepted

    if verbose == True:
        print('a2g = ', a2g)
        print('chi1g = ', chi1g)
        print('chi2g = ', chi2g)

    d['moms'] = moms
    d['a1g'] = a1g
    d['y1g'] = y1g
    d['a2g'] = a2g
    d['y2a'] = y2a
    d['y2b'] = y2b
    d['chi2g'] = chi2g
    d['chi1g'] = chi1g

    return d
Example #43
0
# plot the projection of the rotated cropped image
f1_ax5 = fig.add_subplot(gs[2, 0])

print('--> fitting the projections')
min_fit = 0
# define x array: should be kept as 0 since it's to get an array lenght only
xd = np.arange(min_fit, min_fit + len(proj[0][min_fit:]))
edge = np.zeros(evt_data)

# plot the data
plt.plot(xd, proj[show][min_fit:])

# execute the fit on the right part of the data
for i in xrange(evt_data):
    popt, pcov = curve_fit(
        error_function, xd,
        proj[i][min_fit:])  #, bounds=([0, 0, 0, -10000], [5, 10, 1000, 0]))
    print(popt)
    edge[i] = popt[2]
    print(i, edge[i])
    # plot the fit and the center extracted from the fit a t a 'show' position
    if i == show:
        plt.plot(xd, error_function(xd, popt[0], popt[1], popt[2], popt[3]))
        plt.axvline(x=popt[2])

# plot the histogram of the edge position
print('--> plotting the histogram of the edge position')

# normalise to get the gaussian properly fitting with the sum under the guassian = 1
f1_ax6 = fig.add_subplot(gs[2, 1])
n, bins, patches = plt.hist(edge,
Example #44
0
def residuals(composition, color='grey', norm=7.0, overplot=False):
    dbase = ['Bacteria', 'Bacteria_draft', 'ResFinder', 'HumanMicrobiome',
             'Plasmid', 'Virus', 'Unmapped',
             'Plant', 'Invertebrates', 'Mitochondrion', 'Vertebrates_mammals',
             'Vertebrates_other', 'Human']

    for f in [0, 2]:
        fig = plt.figure(59+f)
        if not overplot:
            plt.clf()
        sns.set(style="darkgrid")
        sns.set(font_scale=1.05)
        # Vector components are [x_pos, y_pos, width, height]
        axt = [0.12, 0.84, 0.84, 0.13]
        axm = [0.12, 0.10, 0.84, 0.72]

        axis = (
            fig.add_axes(axt, frame_on=True),
            fig.add_axes(axm, frame_on=True))

        axis[0].set_yticks([])
        axis[0].set_ylim(-.5, .5)
        axis[0].set_xlim(5.6, 8.3)
        axis[1].set_xlim(5.6, 8.3)
        axis[1].set_xlabel('Log(Total reads)')
        axis[1].set_ylabel('Log(Reads)')

        dat_x = np.log10(composition.sum())  # loc['notPhiX'])
        if f == 0:
            dat_y = np.log10(composition.loc['Bacteria_agg'])  # +composition.loc['Bacteria_draft'])
        else:
            dat_y = np.log10(composition.loc[dbase[f]])
        mean_x = np.mean(dat_x)
        mean_y = np.mean(dat_y)

        xvar = np.array([min(dat_x), max(dat_x)])

        popt, pcov = so.curve_fit(gfunc.line, dat_x, dat_y)
        gplots.plot_fits(gfunc.line, axis[1], xvar, popt, pcov, color=color)
        bin_width = 2.*ss.iqr(dat_x)/pow(len(dat_x), 1./3.) - 0.03
        nbins = int(np.ceil((max(dat_x)-min(dat_x))/bin_width))

        qua1 = []
        qua3 = []
        mi = []
        ma = []
        for i in range(nbins):
            subset = [idx for idx, j in enumerate(dat_x) if j > (
                min(dat_x)+i*bin_width) and j <= (min(dat_x)+(i+1)*bin_width)]

            mdn = np.median(dat_y[subset]-dat_x[subset]+mean_x-mean_y)

            if len(subset) > 1:
                qua1.append(np.median([k for k in dat_y[subset] -
                                       dat_x[subset]+mean_x-mean_y if k < mdn]))
                qua3.append(np.median([k for k in dat_y[subset] -
                                       dat_x[subset]+mean_x-mean_y if k > mdn]))
            else:
                qua1.append(0)
                qua3.append(0)
            mi.append(-np.min(dat_y[subset]-dat_x[subset]+mean_x-mean_y))
            ma.append(np.max(dat_y[subset]-dat_x[subset]+mean_x-mean_y))

        bins = np.linspace(min(dat_x), max(dat_x), nbins)
        xvar = np.linspace(min(dat_x), max(dat_x), 500)
        spl1 = ip.interp1d(bins, qua1, kind='cubic')
        spl2 = ip.interp1d(bins, qua3, kind='cubic')

        axis[1].scatter(dat_x, dat_y, alpha=0.7, s=15, color=color)
        axis[1].plot(xvar, (xvar-mean_x)+mean_y, '--', color='black', lw=2, alpha=0.5)

        axis[0].errorbar(bins, np.zeros(len(bins)), yerr=[mi, ma], alpha=0.7,
                         capsize=4., capthick=2.0, markersize=6., markeredgewidth=0.5,
                         fmt='')

        axis[0].plot(xvar, spl1(xvar), color=color)
        axis[0].plot(xvar, spl2(xvar), color=color)
        axis[0].fill_between(xvar, spl1(xvar), spl2(xvar), color=color, alpha=0.4)
        axis[0].plot([min(xvar), max(xvar)], [0, 0], '--', color='black')

        for i, name in enumerate(composition.columns):
            axis[1].annotate(name, (dat_x[i], dat_y[i]))

        # axis[1].annotate()
        if not overplot:
            axis[1].annotate(dbase[f], (6.1, axis[1].get_ylim()[1]
                                        - .1*(axis[1].get_ylim()[1]
                                              - axis[1].get_ylim()[0])))
Example #45
0
	# calculate Creutz ratio
	chi[r], d_chi[r] = jackknife_creutz(w00, w01, w11, 20)
	print("chi(%d,%d) = %.12f (%.12f)" % (r+1, r+1, chi[r], d_chi[r]))
	
cursor.close()
con.close()

# best fit for string tension
print("\nFitting string tension and plotting...")

def sigma_fit(R, sqrt_sigma, B):
	return sqrt_sigma**2.0 + B / R**2.0
# 	return sqrt_sigma**2 + B * (1 / R**2 + 1 / (L - R)**2)
# 	return sqrt_sigma**2 + B * (L / (R * (L - R)))**2

popt, pcov = opt.curve_fit(sigma_fit, R[R_min-1:R_max], chi[R_min-1:R_max], [0.05, 1.0], sigma=d_chi[R_min-1:R_max])
sqrt_sigma = popt[0]
d_sqrt_sigma = np.sqrt(pcov[0][0])
B = popt[1]
d_B = np.sqrt(pcov[1][1])
print("\nchi(R,R) = sigma + B / R^2")
print("B = %.12f (%.12f)" % (B, d_B))
print("sigma = %.12f (%.12f)" % (sqrt_sigma**2, 2 * np.abs(sqrt_sigma) * d_sqrt_sigma))
print("sqrt(sigma) = %.12f (%.12f)" % (np.abs(sqrt_sigma), d_sqrt_sigma))

# calculate best fit curves
R_A = np.linspace(0.001, R_half, 1000)
_R2_A = np.zeros(len(R_A))
chi_A = np.zeros(len(R_A))
for r in range(0, len(R_A)):
	chi_A[r] = sigma_fit(R_A[r], sqrt_sigma, B)
def fit_the_values(country_, y_values , total_days, graph, output):
    """
    We are going to fit the values

    """

    try:
        base_value = y_values[0]
        # some preperations
        number_of_y_values = len(y_values)
        global TOTAL_DAYS_IN_GRAPH
        TOTAL_DAYS_IN_GRAPH = total_days  # number of total days
        x_values = np.linspace(start=0, stop=number_of_y_values - 1, num=number_of_y_values)

        x_values_extra = np.linspace(
            start=0, stop=TOTAL_DAYS_IN_GRAPH - 1, num=TOTAL_DAYS_IN_GRAPH
        )


        popt_d, pcov_d = curve_fit(
            f=derivate,
            xdata=x_values,
            ydata=y_values,
            #p0=[0, 0, 0],
            p0 = [26660, 9, 0.03, base_value],  # IC BEDDEN MAART APRIL
            bounds=(-np.inf, np.inf),
            maxfev=10000,
        )
        residuals = y_values - derivate(x_values, *popt_d)
        ss_res = np.sum(residuals**2)
        ss_tot = np.sum((y_values - np.mean(y_values))**2)
        r_squared = round(  1 - (ss_res / ss_tot),4)
        l = (f"polynome fit: a=%5.3f, b=%5.3f, c=%5.3f, d=%5.3f / r2 = {r_squared}" % tuple(popt_d))

        a,b,c,d = popt_d
        deriv_0 = derivate(0, a,b,c,d)
        deriv_last = derivate(number_of_y_values,a,b,c,d)
        r_0_last_formula = (deriv_last/deriv_0)**(4/number_of_y_values)
        #r_0_last_cases = (y_values[number_of_y_values-1]/ y_values[0])**(4/number_of_y_values)
        r_total = sum(
            (derivate(i, a, b, c, d) / derivate(i - 1, a, b, c, d)) ** (4 / 1)
            for i in range(number_of_y_values - 5, number_of_y_values)
        )

        #r_avg_formula = r_total/(number_of_y_values-1)
        r_avg_formula = r_total/6
        r_cases_list = []


        if output == True:
            #st.write (f"{country_} : {x_values} / {y_values}/ base {base_value} /  a {a} / b {b} / c {c} / d {d} / r2 {r_squared}")
            st.write (f"{country_} : / base {base_value} /  a {a} / b {b} / c {c} / d {d} / r2 {r_squared}")

            #st.write (f"Number of Y values {number_of_y_values}")


            #st.write (f"Number of R-values {len(r_cases_list)}")
            st.write (f"R from average values from formula day by day {r_avg_formula} (purple)")

            #st.write (f"R from average values from cases day by day {r_cases_avg} (yellow)")

            st.write (f"R getal from formula from day 0 to day {number_of_y_values}: {r_0_last_formula} (red)")
            #st.write (f"R getal from cases from day 0 to day {number_of_y_values}: {r_0_last_cases} (orange)")


        if graph == True:
            with _lock:
                fig1x = plt.figure()
                plt.plot(
                        x_values_extra,
                        derivate(x_values_extra, *popt_d),
                        "g-",
                        label=l
                    )

                plt.plot(
                            x_values,
                            cases_from_r(x_values, r_0_last_formula,deriv_0),
                            "r-",
                            label=(f"cases_from_r_0_last_formula ({round(r_0_last_formula,2)})")
                        )
                # plt.plot(
                #             x_values,
                #             cases_from_r(x_values, r_0_last_cases,deriv_0),
                #             "orange",
                #             label=(f"cases_from_r_0_last_cases ({round(r_0_last_cases,2)})")
                #         )

                plt.plot(
                            x_values,
                            cases_from_r(x_values, r_avg_formula,deriv_0),
                            "purple",
                            label=(f"cases_from_r_avg_formula  ({round(r_avg_formula,2)})")
                        )
                # plt.plot(
                #             x_values,
                #             cases_from_r(x_values, r_cases_avg,deriv_0),
                #             "yellow",
                #             label=(f"cases_from_r_avg_cases ({round(r_cases_avg,2)})")
                #         )
                plt.scatter(x_values, y_values, s=20, color="#00b3b3", label="Data")

                plt.legend()
                plt.title(f"{country_} / curve_fit (scipy)")
                #plt.ylim(bottom=0)
                plt.xlabel(f"Days from {from_}")


                st.pyplot(fig1x)


    except RuntimeError as e:
        #str_e = str(e)
        #st.info(f"Could not find derivate fit :\n{str_e}")
        pass
    try:
        a = 1* r_avg_formula
    except NameError:
        r_avg_formula = None

    return r_avg_formula
Example #47
0
#psi0=np.array([0+1j*0.0,1.0+1j*0.0,0.0+1j*0.0])


def constrainedPropagateHam(k, epsilon, U, n, S, m0):
    return lambda t, omega, delta: np.array(
        propagateRLHamiltonian2(
            t, k, omega, delta, epsilon, U, n, S, m0, rampOnt=rampOnt))


HofT = constrainedPropagateHam(k, epsilon, U, n, S, m0)
##print H(1.0,4.0,0.0,0.0)
#a=np.array(tRecoils)
#b=np.array(frac0)

popt, pcov = optimize.curve_fit(HofT,
                                tRecoils[:maxInd],
                                fractions.flatten(),
                                p0=(omegaGuess, deltaGuess))
print popt, np.sqrt(np.diag(pcov))
#popt=(0.6,0.08)
tForFit = np.linspace(np.min(tRecoils), np.max(tRecoils), 100)
pops_fitted = HofT(tForFit, *popt)
#sort=np.argsort(tRecoils)
#tSorted=tRecoils[sort]

pops_fitted = pops_fitted.reshape(tForFit.size, 2 * S + 1).transpose()

figure = plt.figure()
panel = figure.add_subplot(1, 1, 1)
panel.set_title(r'$\Omega$ = ' + str(np.round(popt[0], 2)) +
                r' $E_L/h$,$\delta$ = ' + str(np.round(popt[1], 3)) +
                r' $E_L/h$')  #, U= '+str(np.round(popt[2],3))+r'$E_L$')
Example #48
0
global NTs
NTs = ([BOrd, BOrd + ROrd])
pT = np.zeros(BOrd + ROrd)
#Have a run index after which temperature rises
tendCut = 3570
tstartCut = 170
#Find that index in the data arrays
place = np.where(runsTemp == tendCut)[0]
print(runsTemp)
#Keep only data before the cutoff index
pT[BOrd] = np.mean(tempsTemp[tstartCut:place[0]])
#Stack the runs and B data so that it can be passed as one variable to the fit function
RandB = np.vstack((runsTemp[tstartCut:place[0]], BsTemp[tstartCut:place[0]])).T
#Call the fit function, this syntax is terrible but works. lambda essentially 
#defines a new function that we can pass as our fit function
popt, pcov = curve_fit(lambda RandB, *pT: wrapT(RandB, *pT), RandB, tempsTemp[tstartCut:place[0]], p0=pT)
#if you want to see fit results for temperature, uncomment here
#    print(popt)
#    print(popt/np.sqrt(np.diag(pcov
    
#In order to get the true temperature, we need to subtract off the magnetic 
#field part. So define a parameter array where the run-dependent fit parts are 0
#and the magnetic field dependent parts are the results of the fit
pSub = np.zeros(len(pT))
pSub[:BOrd] = popt[:BOrd]

for h in range(0, len(fname)):

    Fdat1 = np.loadtxt(fname[h], delimiter="\t")
    F1 = np.loadtxt(f[h], delimiter="\t")
Example #49
0
        species_summary = [r]
        for m in methods:
            for l in losses:
                for b in constraints:
                    # Some text to show what we're up to

                    cons = constraint_labels[constraints.index(b)]
                    notice = f"Attempting {SP} {m}/{l} {cons}"
                    pad = 50 - len(notice)
                    if __name__ == "__main__":
                        print(f"{notice}{pad*' '}", end="")

                    try:  # we need to _try_ this so if it fails we can keep going
                        fittedParameters, pcov = curve_fit(combinedFunction,
                                                           comboX,
                                                           comboY,
                                                           bounds=b,
                                                           method=m,
                                                           loss=l)
                        run_name = f"{SP} {m}_{l} {cons}"  # Add a label

                        # take the fitted parameters and associate them correctly
                        forest_start, soil_start, B, phi_ab, k, v, phi_ba, phi_bs, phi_sa = (
                            fittedParameters)

                        # re-run the growth model to get explicit results
                        biomass_result = growth(
                            x,
                            forest_start,
                            soil_start,
                            B,
                            phi_ab,
def k_effect_figures(fit=False):

    data_dirs = sorted(['data/'+pth for pth in
                        next(os.walk("data/"))[1]])

    df_all = []

    for dpath in data_dirs:

        print('Loading ', dpath)

        try:

            with open(dpath+'/namespace.p', 'rb') as pfile:
                nsp=pickle.load(pfile)

            with open(dpath+'/synsrv_prb_equal.p', 'rb') as pfile:
                synsrv_prbs=np.array(pickle.load(pfile))

            for synsrv in synsrv_prbs:
                concat = {**nsp, **synsrv}
                df_all.append(concat)

        except FileNotFoundError:
            
            print(dpath[-4:], "reports: No namespace or " +\
                              "synsrv_prb data. Skipping.")



    all_Npool = np.unique([df['Npool'] for df in df_all])
    all_k = np.unique([df['k'] for df in df_all])

    for npool in all_Npool:
        
        fig, ax = pl.subplots()
        fig.set_size_inches(5.2,3)

        for df in df_all:

            # if df['Npool']==npool and df['k'] in [7, 12, 17, 27]:
            if df['Npool']==npool and df['k'] in [10, 25, 100, 250, 1000]:

                label = "$k = "+str(df['k'])+"$"

                if fit:

                    N = int(0.6*len(df['dts']))
                    
                    prm, prm_cov = optimize.curve_fit(powerlaw_func_s,
                                                      df['dts'][N:],
                                                      df['synsrv_prb'][N:],
                                                      p0=[0.5, 0.5])

                    xs = np.arange(df['dts'][0],
                                   df['dts'][-1],
                                   1)
                    
                    bl, = ax.plot(xs, 
                                  powerlaw_func_s(xs,*prm),
                                  linestyle='-', alpha=0.55)

                    label = '$\gamma = %.4f$, s=%.3f \n' %(prm[0], prm[1]) + \
                             label + ', $N_{\mathrm{cut}} = %d$' %N 

                    ax.plot(df['dts'], df['synsrv_prb'], '.', #  'o',
                            # markeredgewidth=1,
                            # markerfacecolor='None',
                            color=bl.get_color(),
                            label=label)

                    ax_ll = 0.9*df['dts'][1]
                    
                else:
                    print(df['synsrv_prb'])
                    ax.plot(df['dts'], df['synsrv_prb'], '.', # 'o',
                            # markeredgewidth=1,
                            # markerfacecolor='None',
                            label=label)
                    ax_ll = 0.9*df['dts'][1]

        ax.spines['right'].set_visible(False)
        ax.spines['top'].set_visible(False)
        ax.yaxis.set_ticks_position('left')
        ax.xaxis.set_ticks_position('bottom')

        ax.set_xlabel('simulation steps')
        ax.set_ylabel('survival probability')

        # ---------------------------------------------- 

        directory = 'figures/k_effect_equal_linear_tail-fit/' 

        if not os.path.exists(directory):
            os.makedirs(directory)

        box = ax.get_position()
        ax.set_position([box.x0, box.y0, box.width * 0.8, box.height])

        ax.legend(frameon=False, prop={'size': 7}, loc='center left',
                  labelspacing=1.15, borderpad=1.25, bbox_to_anchor=(1, 0.5))


        fname = "k_effect_Npool%d_linear" %(npool)

        fig.savefig(directory+'/'+fname+'.png', dpi=300,
                    bbox_inches='tight')


        # ---------------------------------------------- 

        directory = 'figures/k_effect_equal_tail-fit/' 

        if not os.path.exists(directory):
            os.makedirs(directory)
        
        ax.set_xscale('log')
        ax.set_yscale('log')

        fname = "k_effect_Npool%d" %(npool)

        fig.savefig(directory+'/'+fname+'.png', dpi=300,
                    bbox_inches='tight')

        # ---------------------------------------------- 

        directory = 'figures/k_effect_equal_trimmed_tail-fit/' 

        if not os.path.exists(directory):
            os.makedirs(directory)
        
        ax.set_xlim(left=ax_ll)

        fname = "k_effect_Npool%d_trimmed" %(npool)

        fig.savefig(directory+'/'+fname+'.png', dpi=300,
                    bbox_inches='tight')



        pl.close(fig)
Example #51
0
# because there std is also = 0, thus making the weight infinite
v1 = df1.v

mode = 0
if mode == 0:
    mask = (v1 < 3) & (v1 > -3) & (df1.f != 0)
elif mode == 1:
    mask = (v1 < 3) & (v1 > 0) & (df1.f != 0)
elif mode == -1:
    mask = (v1 < 0) & (v1 > -3) & (df1.f != 0)

v1 = v1[mask]
f1 = np.array(df1.f)[mask]
err1 = np.array(df1.err)[mask]

popt1, pcov1 = curve_fit(linear, v1, f1)#, sigma=err1, absolute_sigma=True)
ax.plot(v1, linear(v1, *popt1), color="red", ls="-", label="Best fit")

ax.plot(df1.v, linear(df1.v, slope, popt1[1]), color="blue", ls="--", label="Literature")
ax.fill_between(df1.v, linear(df1.v, slope - slope_err, popt1[1]), linear(df1.v, slope + slope_err, popt1[1]),
                color="blue", alpha=0.35)

ax.axvline(omega, color='orange', label="Lock-in freq.")
ax.axvline(-omega, color='orange')

ax.errorbar(df1.v, df1.f, yerr=df1.err, color="red", label="LabView Data", fmt='o', markeredgecolor="black",
            ecolor='black', capthick=2, capsize=2, elinewidth=1, markeredgewidth=0.5, ms=3)
if mode == 0:
    d = {"v" : v1,
         "f1" : f1,
         "f1_err": err1
Example #52
0
def main():
    """Main function."""

    # Create logger.
    log = logging.getLogger()

    # Load data
    simulate = False
    data = np.loadtxt(os.path.join('data', 'angleAndBeta_Bluejet_Redjet.txt'))
    data = data[data[:, 0].argsort()]
    date = data[:, 0]
    times = date - date[0]
    beta = data[:, 1]
    raw_values = beta
    values = beta - np.mean(beta)

    # Filter the data in various ways.
    limit_times = False
    remove_times = False
    remove_large_values = False
    if remove_large_values:
        std_ = np.std(values)
        print('STD:', std_)
        idx = np.logical_and(values < std_ * 2, values > -std_ * 2)
        times = times[idx]
        raw_values = raw_values[idx]
        values = values[idx]
        date = date[idx]
    if limit_times:
        # time_limit = [600, 2000]
        # time_limit = [600, 2000]
        time_limit = [900, 1600]
        idx_min = np.argmax(times >= time_limit[0])
        idx_max = np.argmax(times >= time_limit[1])
        if idx_max == 0:
            idx_max = times.size
        times = times[idx_min:idx_max]
        values = values[idx_min:idx_max]
        raw_values = raw_values[idx_min:idx_max]
        date = date[idx_min:idx_max]
    if remove_times:
        delete_range = [900, 1300]
        idx_min = np.argmax(times >= delete_range[0])
        idx_max = np.argmax(times >= delete_range[1])
        if idx_max == 0:
            idx_max = times.size()
        times = np.delete(times, range(idx_min, idx_max + 1))
        values = np.delete(values, range(idx_min, idx_max + 1))
        raw_values = np.delete(raw_values, range(idx_min, idx_max + 1))
        date = np.delete(date, range(idx_min, idx_max + 1))

    values -= np.mean(values)
    times -= times[0]

    # Simulate signal using time sampling from data set
    if simulate:
        sim_values = np.zeros_like(times)
        signals = [
            dict(amp=0.03, freq=1 / 13.0865, phase=0.0),
            dict(amp=0.04, freq=1 / 11.2384, phase=0.0),
            # dict(amp=0.1, freq=0.0769, phase=0.0),
        ]
        log.debug('- Adding signals:')
        for i, s in enumerate(signals):
            log.debug('  [%02i] amp=%f, freq=%f, phase=%f', i, s['amp'],
                      s['freq'], s['phase'])
            arg = (2 * math.pi * times * s['freq']) + (s['phase'] * math.pi)
            sim_values += s['amp'] * np.cos(arg)
    else:
        signals = None

    log.info('')
    min_frequency = 0
    # IMPORTANT!!!!!!!!
    # CLEAN is *very* sensitive to the position of the peak with a horrible
    # PSF therefore with a poor PSF the spectrum oversample has to be very
    # large to not miss the peak position at all.
    #
    max_frequency = 0.2  # days^-1
    freq_inc = 5e-6  # days^-1
    freq_range = max_frequency - min_frequency
    num_freqs = math.ceil(freq_range / freq_inc)
    log.info('* No. (positive) frequencies = %i', num_freqs)

    if simulate:
        freqs, sim_amps, freqs_psf, sim_psf = dft(num_freqs, freq_inc, times,
                                                  sim_values)
        sim_clean_components, sim_residual = clean(sim_amps,
                                                   sim_psf,
                                                   gain=0.1,
                                                   num_iter=1000,
                                                   freqs=freqs,
                                                   f0=1 / 180)
        sim_clean_spectrum, sim_restored_spectrum = \
            make_clean_spectrum(sim_psf, sim_clean_components, sim_residual)
    else:
        freqs, amps, freqs_psf, psf = dft(num_freqs, freq_inc, times, values)
        clean_components, residual = clean(amps,
                                           psf,
                                           gain=0.1,
                                           num_iter=1000,
                                           freqs=freqs,
                                           f0=1 / 180)
        clean_spectrum, restored_spectrum = \
            make_clean_spectrum(psf, clean_components, residual)

    # Plotting
    amp_type = 'Beta'
    c = freqs.size // 2
    fig, ax = plt.subplots(nrows=4, sharex=False, figsize=(10, 8))
    fig.subplots_adjust(left=0.08,
                        right=0.97,
                        bottom=0.08,
                        top=0.95,
                        hspace=0.4,
                        wspace=0.0)
    if simulate:
        plot_times(ax[0],
                   times,
                   sim_values,
                   y_label=amp_type,
                   title='Simulated signal',
                   xlabel=r'\Delta Date [days]')
        plot_psf(ax[1], freqs_psf, sim_psf, title='PSF')
        plot_dirty(ax[2],
                   freqs,
                   sim_amps,
                   signals,
                   title='Dirty spectrum (simulated)')
        plot_clean_spectrum(ax[3],
                            freqs,
                            sim_restored_spectrum,
                            sim_clean_spectrum,
                            signals,
                            signal_amps=True,
                            title='CLEAN spectrum (simulated)')
    else:
        plot_times(ax[0],
                   times,
                   values,
                   y_label=amp_type,
                   title='Time series',
                   xlabel='Date [days]')
        plot_psf(ax[1], freqs_psf, psf, title='PSF')
        plot_dirty(ax[2], freqs, amps, signals, title='Dirty spectrum.')
        plot_clean_spectrum(ax[3],
                            freqs,
                            residual,
                            clean_spectrum,
                            signals,
                            signal_amps=False,
                            title='CLEAN spectrum')

        # initial guess at fit parameters
        p0 = [0.0055, 0.076, 0.0006]
        p1 = [0.0085, 0.088, 0.0006]
        # p1 = [0.015, 0.0885, 0.0003]

        c = freqs.size // 2
        full_search_range = True
        search_range = 10
        try:
            if full_search_range:
                coeff0, _ = curve_fit(gauss,
                                      freqs[c:],
                                      np.abs(clean_spectrum[c:]),
                                      p0=p0)
                coeff1, _ = curve_fit(gauss,
                                      freqs[c:],
                                      np.abs(restored_spectrum[c:]),
                                      p0=p0)
            else:
                idx0 = np.argmax(freqs >= p0[1] - search_range * p0[2])
                idx1 = np.argmax(freqs >= p0[1] + search_range * p0[2])
                coeff0, _ = curve_fit(gauss,
                                      freqs[idx0:idx1 + 1],
                                      np.abs(clean_spectrum[idx0:idx1 + 1]),
                                      p0=p0)
                coeff1, _ = curve_fit(gauss,
                                      freqs[idx0:idx1 + 1],
                                      np.abs(restored_spectrum[idx0:idx1 + 1]),
                                      p0=p0)

            if abs(1 / coeff1[1] - 1 / p0[1]) < 2 and coeff1[2] / p0[2] < 4:
                if coeff1[0] > np.std(np.abs(restored_spectrum)) * 2:
                    x_fit = np.linspace(coeff1[1] - search_range * coeff1[2],
                                        coeff1[1] + search_range * coeff1[2],
                                        1000)
                    delta_p = (1 / (coeff1[1] - coeff1[2]) - 1 /
                               (coeff1[1] + coeff1[2]))
                    peak_fit = gauss(x_fit, *coeff1)
                    ax[3].plot(x_fit,
                               peak_fit,
                               'r-',
                               label=r'Fit: %.4f $\pm$ %.1f days' %
                               (1 / coeff1[1], delta_p / 2))
            elif abs(1 / coeff0[1] - 1 / p0[1]) < 2 and coeff0[2] / p0[2] < 10:
                if coeff0[0] > np.std(np.abs(restored_spectrum)) * 2:
                    x_fit = np.linspace(coeff0[1] - 5 * coeff0[2],
                                        coeff0[1] + 5 * coeff0[2], 1000)
                    peak_fit = gauss(x_fit, *coeff0)
                    delta_p = (1 / (coeff0[1] - coeff0[2]) - 1 /
                               (coeff0[1] + coeff0[2]))
                    ax[3].plot(x_fit,
                               peak_fit,
                               'r-',
                               label=r'Fit: %.4f $\pm$ %.1f days' %
                               (1 / coeff0[1], delta_p / 2))
        except RuntimeError:
            pass

        try:
            if full_search_range:
                coeff2, _ = curve_fit(gauss,
                                      freqs[c:],
                                      np.abs(clean_spectrum[c:]),
                                      p0=p1)
                coeff3, _ = curve_fit(gauss,
                                      freqs[c:],
                                      np.abs(restored_spectrum[c:]),
                                      p0=p1)
            else:
                idx0 = np.argmax(freqs >= p1[1] - search_range * p1[2])
                idx1 = np.argmax(freqs >= p1[1] + search_range * p1[2])
                coeff2, _ = curve_fit(gauss,
                                      freqs[idx0:idx1 + 1],
                                      np.abs(clean_spectrum[idx0:idx1 + 1]),
                                      p0=p1)
                coeff3, _ = curve_fit(gauss,
                                      freqs[idx0:idx1 + 1],
                                      np.abs(restored_spectrum[idx0:idx1 + 1]),
                                      p0=p1)

            if abs(1 / coeff3[1] - 1 / p1[1]) < 2 and coeff3[2] / p1[2] < 10:
                x_fit = np.linspace(coeff3[1] - search_range * coeff3[2],
                                    coeff3[1] + search_range * coeff3[2], 1000)
                peak_fit = gauss(x_fit, *coeff3)
                delta_p = (1 / (coeff3[1] - coeff3[2]) - 1 /
                           (coeff3[1] + coeff3[2]))
                ax[3].plot(x_fit,
                           peak_fit,
                           'g-',
                           label=r'Fit: %.4f $\pm$ %.1f days' %
                           (1 / coeff3[1], delta_p / 2))
            elif abs(1 / coeff2[1] - 1 / p1[1]) < 2 and coeff2[2] / p1[2] < 5:
                x_fit = np.linspace(coeff2[1] - 5 * coeff2[2],
                                    coeff2[1] + 5 * coeff2[2], 1000)
                peak_fit = gauss(x_fit, *coeff2)
                delta_p = (1 / (coeff2[1] - coeff2[2]) - 1 /
                           (coeff2[1] + coeff2[2]))
                ax[3].plot(x_fit,
                           peak_fit,
                           'g-',
                           label=r'Fit: %.4f $\pm$ %.1f days' %
                           (1 / coeff2[1], delta_p / 2))
        except RuntimeError:
            pass

    ax[3].legend(loc='best', fontsize='small')
    amp_limit_str = '2std' if remove_large_values else 'all'
    if simulate:
        amp_type = 'simulated_%s' % amp_type
    if limit_times:
        plt.savefig('%s_times_%03.0f-%03.0f_amps_%s.png' %
                    (amp_type, time_limit[0], time_limit[1], amp_limit_str))
    else:
        plt.savefig('%s_times_all_amps_%s.png' % (amp_type, amp_limit_str))

    plt.show()
    plt.close()
Example #53
0
def plot_curve(self, pos, tit, path):
    max_degree = 3

    fig = plt.figure()
    ax = plt.subplot(111)
    #ax =  fig.add_axes([0.1, 0.2, 0.4, 0.4])
    box = ax.get_position()
    ax.set_position([box.x0, box.y0, box.width * 0.6, box.height])

    maxX = 0
    minX = 100000000

    for n in xrange(self.number_table):
        if (self.graphList[n] != None):
            l = self.graphList[n].centrality[pos]

            if (min(l) < minX):
                minX = min(l)

            if (max(l) > maxX):
                maxX = max(l)

    nbin = 50
    vec = []
    vec.append(minX)
    val = minX
    step = float(maxX - minX) / float(nbin)

    for i in xrange(nbin - 1):
        val += step
        vec.append(val)

    vec.append(maxX)

    for n in xrange(self.number_table):
        if (self.graphList[n] != None):
            l = self.graphList[n].centrality[pos]

            #ax.hist(l, bins=np.arange(minX, maxX, 10))
            hist, bins = np.histogram(l, bins=vec)
            width = 1 * (bins[1] - bins[0])
            center = (bins[:-1] + bins[1:]) / 2
            ax.bar(center,
                   hist,
                   align='center',
                   alpha=0.4,
                   width=width,
                   color=colors[n])

            x = np.array(center)
            y = np.array(hist)

            #print np.array(x)
            #print np.array(y)

            # m = dict((i,l.count(i)) for i in l)

            # y = np.array(m.values())
            # x = np.array(m.keys())

            # x = ar(range(10)) #gauss points
            # y = ar([0,1,2,3,4,5,4,3,2,1])

            x_new = np.linspace(x[0], x[-1], num=len(x) * 10)

            #ploynomial fit
            best = 0
            min_ = float('inf')
            i = 0
            while (i <= max_degree):
                coefs, val = poly.polyfit(x, y, i, full=True)
                #print str(i) + " *" +  str(val[0])+"*"

                if (val[0] < 1e-15 and min_ != 0):
                    best = i
                    method = 1
                    min_ = 0

                if (len(val[0]) == 0 and min_ != 0):
                    best = i
                    method = 1
                    min_ = 0
                elif (val[0] < min_ and coefs[0] != 0):
                    best = i
                    method = 1
                    min_ = val[0]
                i += 1

            #print "rect" + str(min_)

            # power law
            try:
                popt, pcov = curve_fit(power_law, x, y, maxfev=1000000)
                residuals = y - power_law(x, *popt)
                fres = sum((residuals)**2)
                if (fres < min_):
                    method = 2
                    min_ = fres
            except Exception as e:
                print "No power-law: " + str(e)

            #print "pl " + str(fres)
            try:
                # exponential
                popt1, pcov = curve_fit(exponential, x, y, maxfev=1000000)
                residuals = y - exponential(x, *popt1)
                fres = sum((residuals)**2)
                if (fres < min_):
                    method = 3

                    min_ = fres
            except:
                print "no expopential"

            #print "exp " + str(fres)

            #gaussian
            try:
                len_ = len(x)  #the number of data
                mean = sum(x * y) / len_  #note this correction
                sigma = sum(y * (x - mean)**2) / len_
                popt2, pcov = curve_fit(gauss_function, x, y, maxfev=1000000)
                residuals = y - gauss_function(x, *popt2)
                fres = sum((residuals)**2)
                if (fres < min_):
                    method = 4
                #print fres
                #print min_
                #print(fres < min_)
            except:
                print "no gaussian"

            #print "gaus " + str(fres)

            #hist, bins = ax.hist(x)

            #ax.plot(x, y, colors[n]+ 'o', label= self.graphList[n].name +" data")

            #print "meth: " + str(method)

            if (method == 1):
                coefs = poly.polyfit(x, y, best)
                coefs = coefs[::-1]
                f = np.poly1d(coefs)
                ax.plot(x_new,
                        f(x_new),
                        colors[n],
                        label=self.graphList[n].name + "\n" +
                        func_print(method, coefs))
            elif (method == 2):
                ax.plot(x_new,
                        power_law(x_new, *popt),
                        colors[n],
                        label=self.graphList[n].name + "\n" +
                        func_print(method, popt))
            elif (method == 3):
                ax.plot(x_new,
                        exponential(x_new, *popt1),
                        colors[n],
                        label=self.graphList[n].name + "\n" +
                        func_print(method, popt1))
            elif (method == 4):
                ax.plot(x_new,
                        gauss_function(x_new, *popt2),
                        colors[n],
                        label=self.graphList[n].name + "\n" +
                        func_print(method, popt2))

            print method
    ax.set_xlabel('Value')
    ax.set_ylabel('Frequency')

    # box = ax.get_position()
    # ax.set_position([box.x0, box.y0, box.width * 0.8, box.height])
    # ax.legend(loc='center left', bbox_to_anchor=(1, 1))
    #ax.axis([-0.05, 1.05, -0.05, 1.05])
    #ax.legend(loc='center left', bbox_to_anchor=(0.78, 0.95), prop={'size':9})
    ax.legend(loc='center left',
              bbox_to_anchor=(1.05, 0.60),
              prop={'size': 10})
    #ax.legend()

    plt.title(tit)
    plt.savefig(path + ".png", format='png', dpi=400)

    return plt
Example #54
0
    def fit(self):
        if self.data is None:
            return None
        self.update_data()

        xl = int(self.get_channel('A', int(self.input_a0.text())))
        xr = int(self.get_channel('A', int(self.input_a1.text())))
        if xl < self.ch_range[0]:
            xl = self.ch_range[0]
        if xr >= self.ch_range[1]:
            xr = self.ch_range[1] - 1

        yl = int(self.get_channel('B', int(self.input_a0.text())))
        yr = int(self.get_channel('B', int(self.input_a1.text())))
        if yl < self.ch_range[0]:
            yl = self.ch_range[0]
        if yr >= self.ch_range[1]:
            yr = self.ch_range[1] - 1

        df = pandas.DataFrame(self.data, columns=['A', 'B'])
        good = df[df.A >= xl][df.A <= xr][df.B >= yl][df.B <= yr]

        if self.combo_fit.currentText() == 'A':
            bins, edges = numpy.histogram(good.A,
                                          range=self.ch_range,
                                          bins=self.ch_bins)
            col = 0
            row = 1
        elif self.combo_fit.currentText() == 'B':
            bins, edges = numpy.histogram(good.B,
                                          range=self.ch_range,
                                          bins=self.ch_bins)
            xl = yl
            xr = yr
            col = 1
            row = 0
        elif self.combo_fit.currentText() == 'dt':
            bins, edges = numpy.histogram(good.B - good.A,
                                          range=(-100, 100),
                                          bins=200)
            xl = 0
            xr = 199
            col = 0
            row = 0

        x0 = numpy.argmax(bins[xl:xr]) + edges[xl]
        s = (xr - xl) / 3
        a1 = (bins[xr] - bins[xl]) / (edges[xr] - edges[xl])
        a0 = bins[xl] - edges[xl] * a1

        A = (sum(bins[xl:xr]) - a1 / 2 * (edges[xr]**2 - edges[xl]**2) - a0 *
             (edges[xr] - edges[xl]))
        try:
            popt, pcov = curve_fit(self.peak,
                                   edges[xl:xr],
                                   bins[xl:xr],
                                   p0=[A, x0, s, a1, a0])
        except RuntimeError:
            self.input_fit.setPlainText('Fit error')
            return None

        self.axes[col][row].plot(
            edges[xl:xr] * self.calib[self.combo_fit.currentText()][1] +
            self.calib[self.combo_fit.currentText()][0],
            self.peak(edges[xl:xr], *popt), '--')

        perr = numpy.sqrt(numpy.diag(pcov))

        params = ['A', 'x0', 's', 'a0', 'a1']
        text = ''
        for i in [1, 2]:
            text += '{} = {:.1f} +/- {:.2f}\n'.format(
                params[i],
                popt[i] * self.calib[self.combo_fit.currentText()][1] +
                self.calib[self.combo_fit.currentText()][0],
                perr[i] * self.calib[self.combo_fit.currentText()][1] +
                self.calib[self.combo_fit.currentText()][0])
        self.input_fit.setPlainText(text)
        for i in [0, 3, 4]:
            text += '{} = {:.1f} +/- {:.2f}\n'.format(params[i], popt[i],
                                                      perr[i])
        self.input_fit.setPlainText(text)

        self.figure.canvas.draw()
        self.figure.canvas.flush_events()
    def fit(xs, ys):
        def f(x, a, b):
            return a * x + b

        params, _ = optimize.curve_fit(f, xs, ys)
        return params
set_dir = ["8xset1/", "8xset2/", "8xset3/"]
compsum = 0
for i in set_dir:
    comp1 = np.loadtxt(i + 'out_125_1000.txt')
    comp2 = np.loadtxt(i + 'out_1000_8000.txt')
    comp3 = np.loadtxt(i + 'out_8000_64000.txt')
    compsum += comp1 + comp2 + comp3

np.savetxt("compsum_mono_raw.txt", compsum, fmt="%d")

compsum[compsum > 255] = 255
sum = Image.fromarray(np.uint8(compsum, mode='L'))

sum.save('compsum_mono.jpg')

compsum_loaded = np.loadtxt('compsum_mono_raw.txt')

fit, x = bin_count(np.flip(compsum_loaded, 0), 'mono')
print(fit)
x_data = (x + 0.5) / 256.
y_data = (fit + 0.5) / 256.
plt.plot(x_data, y_data)
popt, pcov = curve_fit(func, x_data, y_data, maxfev=1000)
plt.plot(x_data,
         func(x_data, *popt),
         'r-',
         label='fit: a=%5.3f, c=%5.3f' % tuple(popt))
plt.legend()
plt.savefig('result/fit_curve_mono.png')
plt.clf()
Example #57
0
#get an initial guess for the fit by simply taking the maximum value of the peak
monitor3_height_guess = np.max(monitor_a_intensity)
print("monitor3_height_guess", monitor3_height_guess)
monitor3_peakcenter_guess = monitor_a_tof[np.argmax(monitor_a_intensity)]
print("monitor3_peakcenter_guess", monitor3_peakcenter_guess)
monitor3_sigma_guess = 50
print("monitor3_sigma_guess", monitor3_sigma_guess)

#fit_result = Fit(Function='name=Gaussian,Height='+str(monitor3_height_guess)+',PeakCentre='+str(monitor3_peakcenter_guess)+',Sigma='+str(monitor3_sigma_guess), InputWorkspace=monitor_a, Output=monitor_a, OutputCompositeMembers=True)

#def gaussian_s_b    mu, sig, scale, background
initial_guess = (monitor3_peakcenter_guess, monitor3_sigma_guess,
                 monitor3_height_guess, 1.)
popt, pcov = opt.curve_fit(gaussian_s_b,
                           monitor_a_tof_centered,
                           monitor_a_intensity,
                           p0=initial_guess)
popt_names = ['mu', 'sig', 'scale', 'background']
print("****************")
print("results of gaussian_s_b fit to elastic line in TOF")
for idx, i in enumerate(popt):
    print(popt_names[idx], i)
    print(initial_guess[idx])

data_fitted = gaussian_s_b(monitor_a_tof_centered, *popt)
"""
plt.figure()
plt.plot(monitor_a_tof_centered, data_fitted)
plt.plot(monitor_a_tof_centered, monitor_a_intensity)
plt.plot(monitor_a_tof[:-1], monitor_a_intensity)
plt.show()
Example #58
0
from mpl_toolkits.mplot3d import Axes3D
import numpy as np
from scipy.optimize import curve_fit
from matplotlib import pyplot as plt
from matplotlib import cm

def f(X, a,b):
    return (a/X) + b

my_data = np.genfromtxt('results.txt', delimiter=',')

param, param_cov = curve_fit(f, my_data[:,0], my_data[:,1])
ans = f(my_data[:,0], *param)

total = param[0] + param[1]
s = param[1]/total
p = param[0]/total

print('s', param[1])
print('p', param[0])
print('max speedup', 1/s)

N = my_data[:,0]


plt.plot(N, ans, '--', color ='red', label =("fit p={} s={}".format(param[0], param[1])))
plt.scatter(N, my_data[:,1], label='data')
plt.legend()
plt.xlabel('N proc')
plt.ylabel('T [us]')
plt.title('data fit t=s+p/N')
Example #59
0
def fit_best(LC, p, maxNterms=5, output='compact', plotname=False):
    """ fit a lightcurve with a fourier model given period p
    input
        LC: 2D-array; [t,y,dy]
        p: float; the period

    output:
        array; model values for times t
    """

    t = LC[:, 0]
    y = LC[:, 1]
    dy = LC[:, 2]
    N = np.size(t)

    if N <= 4 + maxNterms * 2:
        return np.nan * np.ones(4 + maxNterms * 2)

    #
    f = make_f(p=p)
    chi2 = np.zeros(maxNterms + 1, dtype=float)  # fill in later
    Fstats = np.zeros(maxNterms + 1, dtype=float)  # fill in later
    pars = np.zeros((maxNterms + 1, (maxNterms + 1) * 2))  # fill in later

    init = [16.0, 0.001]  # the initial values for the minimiser
    for i, Nterms in enumerate(np.arange(maxNterms + 1., dtype=int)):
        # fit using scipy curvefit
        popt, pcov = curve_fit(
            make_f(p),  # function
            t,
            y,  # t,dy
            init,  # initial values [1.0]*2
            sigma=dy  # dy
        )

        init = init + [0.05] * 2

        pars[i, :2 * (i + 1)] = popt
        # make the model
        model = f(t, *popt)
        chi2[i] = np.sum(((y - model) / dy)**2)

    # calc BICs
    BIC = chi2 + np.log(N) * (2 + 2 * np.arange(maxNterms + 1, dtype=float))
    best = np.argmin(BIC)

    power = (chi2[0] - chi2[best]) / chi2[0]
    bestBIC = BIC[best]
    bestpars = pars[best, :]

    if plotname:
        for n in [0, 1]:
            plt.errorbar(t / p % 1 + n, y, dy, fmt='k.')
            plt.plot(t / p % 1 + n, f(t, *bestpars), 'r.')
        plt.ylim(plt.ylim()[::-1])
        plt.savefig(plotname)
        plt.close()

    if output == 'compact':
        # convert to amplitude and phase
        if best > 0:
            bestpars[2:2 + 2 * best] = AB2AmpPhi(bestpars[2:2 + 2 * best])

    return np.r_[power, bestBIC, bestpars]
def norm_fit(pres, arg):
    unitchange = 1 / 0.13332236842105
    norm_pres = unitchange * pres
    norm_arg = (arg - min(arg)) / (max(arg) - min(arg))
    popt, _ = curve_fit(func, norm_arg[1:], norm_pres[1:], maxfev=50000)
    return popt