예제 #1
0
def fit(filename, fitax, difax):
    folder = P(filename).parent
    _, data = read(filename)
    field = str(P(filename).stem).split("data_")[-1]
    if TESTING:
        popt, pcov = cf(double_debye,
                        data[:, 0],
                        data[:, 1],
                        method='trf',
                        verbose=0)
        print(field)
    else:
        popt, pcov = cf(double_debye,
                        data[:, 0],
                        data[:, 1],
                        method='trf',
                        verbose=2,
                        p0=[50, 10, 200, 10])
    leg_popt = [f"{ii:.1f}" for ii in popt]
    fitax.plot(data[:, 0],
               double_debye(data[:, 0], *popt),
               label=f'fit: {field}')
    fitax.plot(data[:, 0], data[:, 1], label=f'raw: {field}')
    fitax.legend()

    difax.plot(data[:, 0],
               data[:, 1] - double_debye(data[:, 0], *popt),
               label=f'{field}')
    difax.legend()

    return popt
    def pltfit(self, data, neg):
        if neg:
            self.params_neg, cov = cf(self.linearfunc, data[1, :], data[0, :],
                                      sigma=self.sigma * np.ones(len(data[1, :])), p0=self.guess_neg, maxfev=10 ** 5)
            chi2 = self.chisquared(self.params_neg, data[1, :], data[0, :])
            dof = len(data[1, :]) - len(self.params_neg)
        else:
            self.params_pos, cov = cf(self.linearfunc, data[1, :], data[0, :],
                                      sigma=self.sigma * np.ones(len(data[1, :])), p0=self.guess_pos, maxfev=10 ** 5)
            chi2 = self.chisquared(self.params_pos, data[1, :], data[0, :])
            dof = len(data[1, :]) - len(self.params_pos)

        print("\nGoodness of fit - chi square measure:")
        print("Chi2 = {}, Chi2/dof = {}\n".format(chi2, chi2 / dof))
        cov = cov * dof / chi2
        paramserr = np.sqrt(np.diag(cov))

        print("Fit parameters:")
        param_names = ['Slope', 'Offset']
        if neg:
            for i in range(len(self.params_neg)):
                print('{} = {:.3e} +/- {:.3e}'.format(param_names[i], self.params_neg[i], paramserr[i]))
            bfieldfit = np.linspace(min(data[1, :]), max(data[1, :]), len(data[1, :]) * 10)
            rxyfit = self.linearfunc(bfieldfit, *self.params_neg)
        else:
            for i in range(len(self.params_pos)):
                print('{} = {:.3e} +/- {:.3e}'.format(param_names[i], self.params_pos[i], paramserr[i]))
            bfieldfit = np.linspace(min(data[1, :]), max(data[1, :]), len(data[1, :]) * 10)
            rxyfit = self.linearfunc(bfieldfit, *self.params_pos)

        plt.scatter(data[1, :], data[0, :], marker='.', label="measured data")
        plt.plot(bfieldfit, rxyfit, marker="", linestyle="-", linewidth=2, color="r", label=" fit")
        plt.xlabel('{} [{}]'.format(self.x_label, self.x_units))
        plt.ylabel('{} [{}]'.format(self.y_label, self.y_units))
        plt.title(self.title + " Rxx Fitted Guess")
        plt.legend(loc='best', numpoints=1)
        print('\nSaving plot 2 for ' + self.title)

        if "OutPlane" in self.DataAddress:
            if neg:
                address = self.DataAddress[:90] + "Graphs/" + self.title + "_InPlane_Linear_Fitted_Neg" + ".png"
            else:
                address = self.DataAddress[:90] + "Graphs/" + self.title + "_InPlane_Linear_Fitted_Pos" + ".png"
        elif neg:
            address = self.DataAddress[:90] + "Graphs/" + self.title + "_Linear_Fitted_Neg" + ".png"
        else:
            address = self.DataAddress[:90] + "Graphs/" + self.title + "_Linear_Fitted_Pos" + ".png"
        plt.savefig(address)
        plt.clf()
예제 #3
0
def fitting(func,xdata,ydata,lb,ub,errors=None,Poisson=False,fixed_param=None):
    params = -999. ; chi2n = -999.

    # Check that the degrees of freedom is larger than 1
    dof = len(xdata)-len(lb)
    if (dof<=1):
        sys.exit('HOD_FIT.py: The degrees of freedom should be >1')

    # Find an adequate initial set of parameters
    ntest = 50
    chi0 = 999.
    for i in range(ntest):
        p1 = random.rand(len(lb))
        p1 = lb + p1*(ub-lb) 

        if (fixed_param is None):
            p2,cov = cf(func,xdata,ydata,p0=p1)
            model = func(xdata,*p2)
        else:
            print 'working on fixing parameters'
            #ia = fixed_param ; a = lb[ia]
            #p2,cov = cf(func+bound([a,a],),xdata,ydata,p0=p1)
            #model = func(xdata,*p2)

        if (Poisson):
            chi = abs(chi2_poisson(ydata,model)/dof -1.)
        else:
            chi = abs(chi2(ydata,model,errors)/dof -1.)

        if (chi<=chi0 or i==0): 
            chi0   = chi
            params = p2
            chi2n  = chi2(ydata,model,errors)/dof
        
    return params,chi2n
예제 #4
0
def disparity_investigation(samplerate_name,shaping,datadate,n_box,n_delay,n_att,numhead):
    phase_time = 1/20000000000  #sets timing between phases (initial sample rate of oscilloscope)
    
    filedir = 'G:/data/watchman/'+str(datadate)+'_watchman_spe/studies/phase/'+samplerate_name+'/phase=0/phase_'+shaping+'/'
    Nloops = len(os.listdir(filedir))

    filedir = 'G:/data/watchman/'+str(datadate)+'_watchman_spe/studies/phase/' + samplerate_name + '/array_data/'
    median_array = np.loadtxt(filedir+'median_array.csv', delimiter=',')
    correction_median_array = np.loadtxt(filedir+'correction_median_array.csv', delimiter=',')

    for i in range(29,40):
        corrected_difference_list = []      #initializing lists (use python lists rather than np arrays when appending, faster)
        for j in range(Nloops):
            print(j)            #printing value to show progress
            #establishing directory and file names
            filedir = 'G:/data/watchman/'+str(datadate)+'_watchman_spe/studies/phase/'+samplerate_name+'/phase='+str(i)+'/phase_'+shaping+'/'
            filename = filedir + 'Phase--waveforms--%05d.txt' % j
            #reading in waveform values and running CFD
            (t,v,_) = rw(filename,numhead)
            t_avg,v_avg = boxcar_wf(t,v,n_box)
            v_delay = delay_wf(v_avg,n_delay)
            v_att = attenuate_wf(v_avg,n_att)
            v_sum = sum_wf(v_att,v_delay)
            t_cross,_,_ = zc_locator(t_avg,v_sum)
            t_cross = t_cross - median_array[0]     #removing linear displacement
            corrected_difference_list.append(-1*i*phase_time - (t_cross + correction_median_array[i]))  #gathering timing difference

        corrected_difference_list = np.asarray(corrected_difference_list)   #turning to numpy array for better calculation

        #calculating full mean and standard deviations
        set_mean = np.mean(corrected_difference_list)
        set_std = np.std(corrected_difference_list)
        set_mean = '%.5g' % set_mean
        set_std = '%.5g' % set_std
        histo_mean,histo_std = gauss_histogram(corrected_difference_list)   #gathering starting means/stds for fit
        corrected_difference_list = corrected_difference_list[(corrected_difference_list >= histo_mean - 4*histo_std) & (corrected_difference_list <= histo_mean + 4*histo_std)]    #cutting off outliers
        histo_data, bins_data = np.histogram(corrected_difference_list, bins = 200)
        binwidth = (bins_data[1] - bins_data[0])                    #determining bin width
        #determining bin centers
        binscenters = np.array([0.5 * (bins_data[i] + bins_data[i+1]) for i in range(len(bins_data)-1)])
        b_guess = (len(corrected_difference_list) * binwidth)   #using area approximation to guess at B value
        popt, _ = cf(fit_function,xdata = binscenters,ydata = histo_data, p0 = [b_guess,histo_mean,histo_std], maxfev = 10000)
        gauss_mean = '%s' % float('%.5g' % popt[1])
        gauss_std = '%s' % float('%.5g' % popt[2])
        #establishing 5 significant figure versions of the mean and std from curve fit
        x_values = np.linspace(popt[1] - 1.5*popt[2], popt[1] + 1.5*popt[2], 100000)    #creating 100,000 x values to map curvefit gaussian to
        fig,ax = plt.subplots()
        ax.bar(binscenters, histo_data, width=binwidth)        #plotting histogram
        ax.plot(x_values, fit_function(x_values, *popt), color='darkorange')   #plotting curve fit
        ax.set_xlabel('True Timing - Recovered Timing')
        ax.set_ylabel('Count')
        ax.set_title('Corrected Timings for Phase=%d' %i +'\nGaussian Fit Values:\nMean = '+gauss_mean+' seconds, '+set_mean+'seconds\nStandard Deviation = '+gauss_std+' seconds, '+set_std+'seconds', fontdict={'fontsize': 14})
        plt.get_current_fig_manager().window.showMaximized()
        plt.show(block = False)
        plt.pause(1)
        filedir = 'G:/data/watchman/20190724_watchman_spe/studies/disparity/'
        filename = 'Phase_%d.png' % i
        savename = filedir + filename
        fig.savefig(savename,dpi = 500)
        plt.close()
예제 #5
0
def fit_CAB_powerlaw(sample_epochs_index, all_index,
                     sparse_data_averages_nonan, sparse_data_std_nonan, scale):
    def plfunc(x, a, b):
        return a * np.power(1 + x, -b)

    #end

    pfit, _ = cf(plfunc, sample_epochs_index, sparse_data_averages_nonan)
    residuals = sparse_data_averages_nonan - plfunc(sample_epochs_index, *pfit)
    squared_res = np.sum(np.power(residuals, 2))
    sum_tot_squares = np.sum(
        (sparse_data_averages_nonan - np.mean(sparse_data_averages_nonan))**2)
    r_squared = 1 - (squared_res / sum_tot_squares)

    all_data = plfunc(all_index, *pfit)
    scatterplot(all_index,
                all_data,
                sample_epochs_index,
                sparse_data_averages_nonan,
                sparse_data_std_nonan,
                pfit,
                scale,
                tag='CAB',
                rsquare=r_squared)

    return pfit
예제 #6
0
    def gaussfit(self):
        for idx, columnx in enumerate(df):
            if boxDict[columnx + str(1)].isChecked():
                pa = df[columnx].tolist()
                for idy, columny in enumerate(df):
                    variablefit = str(idx) + str(idy) + "gauss_fit"
                    if boxDict[columny + str(2)].isChecked():
                        try:
                            fitDict[variablefit][0].remove()
                        except:
                            pass
                        pb = df[columny].tolist()
                        pstart = [1., 0., 1.]
                        coeff, var_matrix = cf(functions.gauss,
                                               pa,
                                               pb,
                                               p0=pstart)
                        fitDict[variablefit] = plt.plot(
                            np.linspace(pa[0], pa[-1], 1000),
                            functions.gauss(np.linspace(pa[0], pa[-1], 1000),
                                            coeff[0], coeff[1], coeff[2]),
                            "-",
                            color="red")
                        self.textBrowser.append("Line fit: " + "a = " +
                                                str(coeff[0]) + ", mu = " +
                                                str(coeff[1]) + ", sigma = " +
                                                str(coeff[2]) + " for " +
                                                str(columnx) + " x " +
                                                str(columny))

        self.reloadpictrue()
예제 #7
0
def process(filename):
    header, data = read(filename)

    data_array = np.array(data)
    data_dict = {
        'param': 1E6 * data_array[:, 0],
        'echo': data_array[:, 3],
    }

    popt, pcov = cf(func, data_dict['param'], data_dict['echo'])
    min_idx = np.argmin(func(data_dict['param'], *popt))
    min_time = data_dict['param'][min_idx]
    rabi_freq = 1 / (2 * min_time *
                     1E-6) / 1E6  # convert the us to s then get freq
    # print(popt)
    fig, ax = plt.subplots()
    ax.plot(data_dict['param'], data_dict['echo'], label='Raw data', c='black')
    ax.plot(data_dict['param'],
            func(data_dict['param'], *popt),
            label=f"Min time: {min_time:.2f} $\mu$s\nFreq: {popt[0]:.2f} MHz",
            c='red')
    ax.legend()
    ax.set_ylabel('Echo intensity (arb. u)')
    ax.set_yticks([])
    ax.set_xlabel('$P_1$ length ($\mu$s)')
    ax.set_title('Rabi echo intensity vs. inversion pulse length')
    for s in ['top', 'right']:
        ax.spines[s].set_visible(False)
    plt.savefig(P(filename).parent.joinpath('fixedSourceRabi.png'), dpi=300)
    plt.show()
예제 #8
0
def fitGaussianX(img, m, b, bright, offset, starX, starY, starR, gX, gY, gR, hotX, hotY):
    parDist,perpDist = GT.getDistancesHorizontal(m, b, img.shape[1], img.shape[0], offset)
    parF,perpF,imgF = getTrailBand(parDist, perpDist, img, 6)
    x,y = GT.getXY(parF,perpF,m,b,offset)
    
    trailScaled = imgF/bright(parF)

    fig,axes = plt.subplots(1,2,sharex = True, sharey = True)

    axes[0].plot(perpF,trailScaled,"bo")
    axes[0].set_title("Trail Gaussian With Stars")

    mask = getValueMask(img, x, y, starX, starY, starR, gX, gY, gR, hotX, hotY, 1)
    perpF = perpF[mask]
    trailScaled = trailScaled[mask]
    

    axes[1].plot(perpF,trailScaled,"bo")
    axes[1].set_title("Trail Gaussian Without Stars")

    plt.show()


    sDev = cf(gauss, perpF, trailScaled)[0][0]


    x = np.arange(-5,5,0.1)
    
    plt.plot(perpF,trailScaled,"bo",label = "Pixel Brightnesses")
    plt.plot(x,gauss(x,sDev), label = "Gaussian Fit")
    plt.title("Gaussian Fit")
    plt.show()

    return sDev
        def crear_plot(self, sub_df, corr):
            y_start, y_end = min(sub_df[y][sub_df['alpha'] == alpha_max]), max(sub_df[y][sub_df['alpha'] == alpha_max])
            y_start -= 0.05 * (y_end - y_start)
            y_end += 0.05 * (y_end - y_start)

            scatter = figure(plot_height=400, plot_width=400,
                             tools='reset,box_zoom,pan,wheel_zoom,lasso_select,undo,redo',
                             sizing_mode='scale_width', output_backend="webgl", toolbar_location='above',
                             y_range=(y_start, y_end))

            hover = HoverTool(
                tooltips="""
                    <div><span style="font-size: 17px; font-weight: bold;">@municipio</span></div>
                    <div><span style="font-size: 12px;">@provincia (@autonomia), @poblacion</span></div>
                    <div><span style="font-size: 14px; font-weight: bold;">@partido</span>
                    <span style="font-size: 12px;">@porcentaje</span></div>
                    """)

            scatter.add_tools(hover)
            scatter.scatter(x=x, y=y, source=sub_df, color='color', alpha='alpha', **scatter_kwargs)
            y_range=(y_start, y_end)
            if corr:  # Con esto añadimos las correlaciones.
                for var_subor_i in self.subordinada_obj:
                    for var_indep_i in self.indepe_obj:
                        si_df = sub_df[(sub_df[var_indepe] == var_indep_i) &
                                       (sub_df[var_subord] == var_subor_i)]

                        if len(si_df) > 1:  # Nos aseguramos porque si no falla para Ceuta y Melilla
                            x_vals, y_vals = si_df[x].values, si_df[y].values

                            if corr_type == 'lineal':
                                def f(x, m, b):
                                    return m * x + b

                                m, b, r, p, err = lr(x_vals, y_vals)
                                text_label = "r² = %.2f" % r**2
                            elif corr_type == 'exp':
                                def f(x, m, b):
                                    return np.power(m * np.log10(x) + b, 10)

                                popt, pcor = cf(f, x_vals, y_vals)
                                m, b = popt
                                ss_res = np.sum((y_vals - f(x_vals, m, b)) ** 2)
                                ss_tot = np.sum((y_vals - np.mean(y_vals)) ** 2)
                                r_squared = 1 - (ss_res / ss_tot)
                                text_label = "r² = %.2f" % r_squared

                            x_arr = np.linspace(min(x_vals), max(x_vals), 100)
                            scatter.line(x_arr, [f(x_i, m, b) for x_i in x_arr], color=si_df['color'].iloc[0])
                            r_label = Label(x=1.05 * max(x_vals), y=f(max(x_vals), m, b),
                                            text=text_label, text_align='left', text_color=si_df['color'].iloc[0],
                                            render_mode='css')
                            scatter.add_layout(r_label)
                            if f(max(x_vals), m, b) > y_end: y_end = f(max(x_vals), m, b)
                            if f(max(x_vals), m, b) < y_start: y_start = f(max(x_vals), m, b)
                        else:
                            pass
                scatter.y_range = Range1d(y_start, y_end)
            return scatter
예제 #10
0
def polyfit6(landmarks):
    a = np.array([m[0] for m in landmarks if 'contour_' in m[1]])
    a = a[a[:, 0].argsort()]
    x = a[:, 0]
    y = a[:, 1]
    cutoff = 7
    popt, pcov = cf(_x6, x[cutoff:-cutoff], y[cutoff:-cutoff])
    return list(popt)
예제 #11
0
def butterfly_catastrophe(landmarks):
    a = np.array([m[0] for m in landmarks if 'contour_' in m[1]])
    a = a[a[:, 0].argsort()]
    x = a[:, 0]
    y = a[:, 1]
    cutoff = 7
    popt, pcov = cf(_butterfly, x[cutoff:-cutoff], y[cutoff:-cutoff])
    return list(popt)
예제 #12
0
파일: data.py 프로젝트: jonbowr/simPyon
def gauss_fit(data,
              title='',
              plot=True,
              n_tot=[],
              weights=None,
              label='',
              norm=None):
    rng = max(data)
    if n_tot == []:
        n_tot = len(data)
    bin_e, bin_edges = np.histogram(data,
                                    bins=np.linspace(0, rng, 50),
                                    density=False,
                                    weights=weights)
    err = np.sqrt(bin_e)

    bin_mid = (bin_edges[1:] + bin_edges[:-1]) / 2
    gauss_param = cf(gauss,
                     bin_mid,
                     bin_e, (max(bin_e), np.average(bin_mid, weights=bin_e),
                             np.average(bin_mid, weights=bin_e)),
                     sigma=1 / err)[0]

    ex = np.linspace(min(data), rng, 400)
    gauss_fit = gauss(ex, gauss_param[0], gauss_param[1], gauss_param[2])

    width = abs(gauss_param[2] * 2 * np.sqrt(2 * np.log(2)))
    width_locs = gauss_param[1] + np.array((-width / 2, width / 2))

    if norm == 'peak':
        gauss_fit = gauss(ex, gauss_param[0], gauss_param[1],
                          gauss_param[2]) / max(bin_e)
        err = bin_e / max(bin_e) * np.sqrt(1 / bin_e + 1 / np.max(bin_e))
        bin_e = bin_e / max(bin_e)
    if type(norm) != str and norm != None:
        bin_e = bin_e / norm
        err = err / norm
        gauss_fit = gauss(ex, gauss_param[0], gauss_param[1],
                          gauss_param[2]) / norm
    if plot == True:
        print('\nFit parmameters: ' + title)
        print('[       a                x0              sigma     ]')
        print(gauss_param)
        print('\nthroughput(%): ' + str(len(data) / n_tot))
        print('E: ' + str(gauss_param[1]))
        print('FWHM: ' + str(width))
        print('dE/E: ' + str(width / gauss_param[1]))
        print('skewness: ' + str(stats.skew(data)))
        line = plt.plot(ex, gauss_fit, alpha=.5, label=label)[0]
        plt.errorbar(bin_mid, bin_e, err, fmt='.', color=line.get_color())
        plt.axvline(width_locs[0], color=line.get_color())
        plt.axvline(width_locs[1], color=line.get_color())
        plt.ylabel('normalized count rate', fontsize=14)
        plt.xlabel('Accepted Enterance Energy(ev)', fontsize=14)
        plt.title(title)
        plt.show()
    return gauss_param, width / gauss_param[1]
예제 #13
0
def extrapolate_correlation(energies, basis):
    """
    Extrapolate the correlation energy to the CBS limit.
    Args: energies and basis are same length tuple-like, with the former
    corresponding to the correlation (post-SCF) energy and basis the cardinal
    values
    Returns: the extrapolated energy, and the results of the fit.
    """
    popt, pcov = cf(linear, basis, energies, p0=[1., np.min(energies)])
    return popt[1], popt
예제 #14
0
def extrapolate_SCF(energies, basis):
    # Function for extrapolating the SCF contribution
    # to the CBS limit.
    # Takes a list of SCF energies in order
    # of T,Q,5-zeta quality, and returns the CBS energy.
    popt, pcov = cf(exponential,
                    basis,
                    energies,
                    p0=[1., 1., np.min(energies)])
    return popt[2], popt
예제 #15
0
def domass(Y, X, func = fit_log, p0 = [1, 2, 8], lim = False, loglim = False, sigma = True, absig = False, \
    abund = False, retdata = False, nonzeroy = True, ranzero = 0):
    '''Take in X(delta) & Y(loghalo) field and fit with function 'func' and starting parameter 'p0'
    '''
    xdata = X.flatten()
    ydata = Y.flatten()
    if nonzeroy:
        pos = numpy.where(ydata > 0)[0]
        if ranzero:
            posz = numpy.where(ydata == 0)[0]
            posz = numpy.random.permutation(posz)
            pos = numpy.concatenate((pos, posz[:int(ranzero * Y.size / 100)]))
        ydata = ydata[pos]
        xdata = xdata[pos]
    if abund:
        #xdata = xdata[xdata > 0].flatten()
        #ydata = ydata[ydata > 0].flatten()
        xdata = numpy.sort(xdata)[::-1]
        ydata = numpy.sort(ydata)[::-1]

    if lim:
        pos = numpy.where(ydata > (lim))[0]
        ydata = ydata[pos]
        xdata = xdata[pos]
    if loglim:
        pos = numpy.where(ydata > log10(lim))[0]
        ydata = ydata[pos]
        xdata = xdata[pos]

    if sigma:
        sigmaval = ydata.copy()
        sigmaval[sigmaval == 0] = 1
        poptall, dummy = cf(func, xdata, ydata, p0 = p0, sigma= 1/sigmaval, \
                            absolute_sigma= absig)
    else:
        poptall, dummy = cf(func, xdata, ydata, p0=p0)

    sigmass = func(X, *poptall)
    if retdata:
        return sigmass, poptall, xdata, ydata
    else:
        return sigmass, poptall
예제 #16
0
def getfit(fn,
           guess,
           yfnm='align.csv',
           xfnm='train_data.csv',
           ykey='Align',
           xkeys=['df2', 'df3'],
           err=False,
           bound=[]):
    x, y, z = getxyz(yfnm=yfnm, xfnm=xfnm, ykey=ykey, xkeys=xkeys, err=err)
    x = x.values
    y = y.values
    z = z.values
    xy = np.row_stack([x, y])

    if bound:
        fit = cf(fn, xy, z, p0=guess, bounds=bound)
    else:
        fit = cf(fn, xy, z, p0=guess)

    return (fit[0])
예제 #17
0
    def _min_fit(self, Id, V):
        """
        Calculates the best linear fit through the Id_saturation regime by
        iterating through several potential peaks in the second derivative
        
        """
        _residuals = np.array([])
        _fits = np.array([0, 0])

        # splines needs to be ascending
        if V[2] < V[1]:
            V = np.flip(V)
            Id = np.flip(Id)

        self.quadrant()

        if self.quad == 'I':  # top right

            Id = np.flip(Id)
            V = np.flip(-V)

        mx_d2 = self._find_peak(Id * 1000,
                                V)  # *1000 improves numerical spline accuracy

        # sometimes for very small currents run into numerical issues
        if not mx_d2:
            mx_d2 = self._find_peak(Id * 1000, V, width=15)

        # for each peak found, fits a line. Uses that to determine Vt, then residual up to that found Vt
        for m in mx_d2:
            # Id = Id - np.min(Id) # 0-offset

            fit, _ = cf(self.line_f,
                        V[:m],
                        Id[:m],
                        bounds=([-np.inf, -np.inf], [0, np.inf]))

            v_x = np.searchsorted(
                V, -fit[1] /
                fit[0])  # finds the Vt from this fit to determine residual
            _res = np.sum(
                np.array((Id[:v_x] - self.line_f(V[:v_x], fit[0], fit[1]))**2))
            _fits = np.vstack((_fits, fit))
            _residuals = np.append(_residuals, _res)

        _fits = _fits[1:, :]
        fit = _fits[np.argmin(_residuals), :]

        if self.quad == 'I':
            fit[0] *= -1

        return fit
예제 #18
0
def fitBrightnessSDev(img, m, b, offset, starX, starY, starR, gX, gY, gR, hotX, hotY, trailSize):
    parDist,perpDist = GT.getDistancesHorizontal(m, b, img.shape[1], img.shape[0], offset)
    parDistF,perpDistF,imgF = getTrailBand(parDist, perpDist, img, trailSize)
    x,y = GT.getXY(parDistF,perpDistF,m,b,offset)

    mask = getValueMask(img, x, y, starX, starY, starR, gX, gY, gR, hotX, hotY, 1)
    par = parDistF[mask]
    perp = perpDistF[mask]
    band = imgF[mask]

    b1, b2, b3, b4, b5, sDev = cf(GT.gaussian2, [par, perp], band, bounds = ([-math.inf,-math.inf,-math.inf,-math.inf,-math.inf,0],[math.inf,math.inf,math.inf,math.inf,math.inf,trailSize]))[0]

    return [np.poly1d([b1, b2, b3, b4, b5]), sDev]
예제 #19
0
 def FitToDoubleGaussian(self):
     """
     Fits the projected spectrum with a double gaussian
     """
     if self.ok==1:
         # Ok if we managed to do a guess on the fit parameters
         self.x=np.arange(0,len(self.wf))
         try:
             self.fitresults,self.fitcov = cf(UXSDataPreProcessing.DoubleGaussianFunction,self.x+self.rangelim[0],self.wf,self.initparams)
             #widths always positive
             self.fitresults[2::3]=abs(self.fitresults[2::3])
         except:
             warnings.warn_explicit('Fit failed',UserWarning,'UXS',0)
예제 #20
0
def fit(x, y, error):
    '''
    Fits the data you give to it to the function you have previously defined
    Arguments:
        x, data fos the x axis
        y, data for the y axis
        error, relative error for the variable in the y axis
    Return:
        popt, slope of the fit
        uncertainty for the slope
    '''
    popt, pcov = cf(function, x, y, sigma=error)
    return popt, np.sqrt(np.diag(pcov))
예제 #21
0
파일: grid_search.py 프로젝트: Dyn0402/Misc
def main():
    streets_away = [0]
    streets_to_check = [0]
    check_time = [0]
    walk_time = [0]
    lines = 0
    drive_time_per_street = 15  # s
    walk_time_per_street = 90  # s
    for i in range(7):
        print(
            f'{streets_away[-1]} streets away, {streets_to_check[-1]} streets to check, '
            f'{streets_to_check[-1] * drive_time_per_street}s check time')
        streets_to_check.append(streets_to_check[-1] + 4 * new_lines(lines))
        check_time.append(streets_to_check[-1] * drive_time_per_street / 2 /
                          60)  # in minutes
        streets_away.append(streets_away[-1] + 1)
        walk_time.append(streets_away[-1] * walk_time_per_street /
                         60)  # in minutes
        lines += 2
    popt, pcov = cf(poly_2, streets_away, streets_to_check)
    print(popt)
    popt, pcov = cf(poly_2, streets_away, check_time)
    print(popt)
    fig, ax1 = plt.subplots()
    ax1.plot(streets_away[1:], check_time[1:], 'bo')
    x_plot = np.linspace(1, 7, 1000)
    ax1.plot(x_plot,
             poly_2(x_plot, *popt),
             'r',
             label='Average Time to Find Car')
    ax1.set_xlabel('Distance of Car from Apartment (Blocks)')
    ax1.set_ylabel('Time to Car (Minutes)')
    ax1.plot(streets_away[1:], walk_time[1:], 'g', label='Time to Walk to Car')
    plt.title('Average Time to Find Car Via Grid Search')
    ax1.grid()
    ax1.legend()
    plt.show()
    print('donzo')
예제 #22
0
def fit(data, guess, index):
    params, cov = cf(function,
                     data[1, :],
                     data[0, :],
                     sigma=0.01 * np.ones(len(data[1, :])),
                     p0=guess,
                     maxfev=10**5)

    residuals = data[0, :] - function(data[1, :], *params)

    global Params
    Params[index] = list(params[0:2])

    return residuals
예제 #23
0
def stellar_relation():
    x1 = 10**15 * 1.6
    x2 = 10**12 * 4

    y1 = x1 * 1.5 * 10**-3
    y2 = x2 * 3.2 * 10**-2
    line1, dummy = cf(line, numpy.array([log10(x1), log10(x2)]),
                      numpy.array([log10(y1), log10(y2)]))

    y1 = x1 * 3.5 * 10**-4
    y2 = x2 * 2.2 * 10**-2
    line2, dummy = cf(line, numpy.array([log10(x1), log10(x2)]),
                      numpy.array([log10(y1), log10(y2)]))

    #mean line
    m = (line1[0] + line2[0]) * 0.5
    c = (line1[1] + line2[1]) * 0.5

    xval = numpy.linspace(log10(x2), log10(x1))
    xline = line(xval, m, c)
    stellar_sigma = interpolate(
        xline, 0.01 + (line(xval, line1[0], line1[1]) - xline))
    return stellar_sigma, m, c
예제 #24
0
def fit(filename, debye_T, approx=False, dims=False):
    header, data = read(filename)
    max_T = 15
    data = data[data[:, 0] < max_T]
    if not approx:
        if dims:
            popt, pcov = cf(debye_ndim, data[:,0], data[:, 1], bounds=(([55, 0, 1/2],[65,np.inf,4])))
        else:
            popt, pcov = cf(debye, data[:,0], data[:, 1], bounds=(([55, 0],[65,np.inf])))
    else:
        popt, pcov = cf(low_T_approx, data[:,0], data[:, 1], p0=[150, 10E-3])
    fig, ax = plt.subplots()
    if not approx:
        if dims:
            ax.plot(data[:,0], debye_ndim(data[:, 0], *popt),label=r"$T_D$" + f"={popt[0]:.1f} K\nA={popt[1]:.1e}\nn={popt[2]:.1f}")
        else:
            ax.plot(data[:,0], debye(data[:, 0], *popt),label=r"$T_D$" + f"={popt[0]:.1f} K\nA={popt[1]:.1e}")
    else:
        ax.plot(data[:,0], low_T_approx(data[:, 0], *popt),label=f"A={popt[0]:.3e}; B={popt[1]:.3e}")
    ax.plot(data[:,0], data[:,1], label="raw")
    ax.legend()
    plt.savefig(P(filename).parent.joinpath('fit_to_0T.png'),dpi=300) 
    # ax.set_yscale('log')
    figg, axx = plt.subplots()
    for field in [0, 1, 3, 6, 9]:
        _, data = read(f"/Users/Brad/Library/Containers/com.eltima.cloudmounter.mas/Data/.CMVolumes/Brad Price/Research/Data/2021/04/1/data_{field}T.dat")
        data = data[data[:, 0] < max_T]
        if not approx:
            if dims:
                plt.plot(data[:, 0], data[:, 1] - debye_ndim(data[:, 0], *popt), label=f"{field} T")
            else:
                plt.plot(data[:, 0], data[:, 1] - debye(data[:, 0], *popt), label=f"{field} T")
        else:
            plt.plot(data[:, 0], data[:, 1] - low_T_approx(data[:, 0], *popt), label=f"{field} T")
    axx.legend()
    plt.savefig(P(filename).parent.joinpath('all_fields_subtract_0T_fit.png'),dpi=300) 
    plt.show()
예제 #25
0
def fit(data, guess, neg, address):
    params, cov = cf(linear,
                     data[1, :],
                     data[0, :],
                     sigma=0.01 * np.ones(len(data[1, :])),
                     p0=guess,
                     maxfev=10**5)

    logfieldfit = np.linspace(min(data[1, :]), max(data[1, :]),
                              len(data[1, :]) * 10)
    logrfit = linear(logfieldfit, *params)

    plt.scatter(data[1, :], data[0, :], marker='.', label="Measured Data")
    plt.plot(logfieldfit,
             logrfit,
             marker="",
             linestyle="-",
             linewidth=1,
             color="r",
             label="Initial Guess")
    plt.xlabel('Log(B-Field)')
    plt.ylabel('Log(Rxx)')
    if address in [-2., -1., -0.5, 0.5, 1., 2., -10.]:
        plt.title("Power {} Linearity Fitted Guess".format(address))
    elif address == 'LogLogThickness':
        plt.title('Log-Log Fitted Guess of Slope versus Thickness')
        print('\nSaving plot 2 for Slope vs. Thickness')
    else:
        plt.title(address[90:99] + " Rxx Log-Log Fitted Guess")
        print('\nSaving plot 2 for ' + address[90:99])
    plt.legend(loc='best', numpoints=1)

    if neg:
        address1 = address[:90] + "Graphs/" + address[
            90:99] + "_LogLog_Fitted_Neg" + ".png"
    elif address in [-2., -1., -0.5, 0.5, 1., 2., -10.]:
        address1 = "C:/Users/ryank/Desktop/Personal Files/Github/PythonCodes/EP3/Mobility and Carrier Density/Graphs/" \
                   "/LogLogs/Thickness/GraphFit{}.png".format(address)
    elif address == 'LogLogThickness':
        address1 = "C:/Users/ryank/Desktop/Personal Files/Github/PythonCodes/EP3/Mobility and Carrier Density/Graphs/" \
                   "LogLogThicknessFitted.png"
    else:
        address1 = address[:90] + "Graphs/LogLogs/" + address[
            90:99] + "_LogLog_Fitted_Pos" + ".png"
    plt.savefig(address1)
    plt.clf()

    return residual(data, params, neg, address)
예제 #26
0
def curve_fit(fit_eqn, X, Y, p0=None, sigma=None, absolute_sigma=False, plot=False, on_figs=None, **kw):
    """
    Fit the two equal length arrays to a certain function Y = f(X).

    Parameters
    ----------
    X: array
    Y: array
    plot: bool
        plot figures or not. Default to False.
    on_figs: list/int
        the current figure numbers to plot to, default to new figures

    Returns
    -------
    a dict, containing
        'params': fitting parameters
        'r_squared': value for evaluating error
        'fitted_data': a dict that has 2D array of fitted data
            easily to Pandas DataFrame by pd.DataFrame(**returned_dict['fitted_data'])
        'ax': the axes reference, if return_refs == True
    """
    X = np.array(X)
    Y = np.array(Y)
    popt, pcov = cf(fit_eqn, X, Y, p0, sigma, absolute_sigma, **kw)
    X_fit = np.linspace(sorted(X)[0], sorted(X)[-1], 1000)
    Y_fit = fit_eqn(X_fit, *popt)
    Y_fit_eqlen = fit_eqn(X, *popt)
    r_squared = get_r_squared(Y, Y_fit_eqlen)
    data = np.column_stack((X_fit, Y_fit))
    col_names = ['X_fit', 'Y_fit']

    return_dict = {'params': popt, 'r_squared': r_squared,
        'fitted_data': {'columns': col_names, 'data': data}}

    if plot:
        initiate_figs(on_figs)
        plt.plot(X, Y, 'o')
        plt.plot(X_fit, Y_fit, '-')
        axes = {'ax': plt.gca()}
        plt.xlabel('X')
        plt.ylabel('Y')
        plt.tight_layout()
        return_dict.update(axes)

    return return_dict
    def updateFit(self):
        data = np.array(self.settings["saveData"])
        fitX = data[:, 0]
        fitY = data[:, 1]
        fitY = fitY / fitY.max()

        self.curveData.setData(fitX, fitY)
        try:
            p, pcov = cf(minir, fitX, fitY, p0=[22, 5, 0.1])
        except TypeError:
            return

        angles = np.linspace(0, 90, 500)
        fit = minir(angles, *p)
        self.curveFit.setData(angles, fit)

        self.textFit.setText("A={:.3f}, mu=\n {:.3f}, c={:.3f}".format(*p),
                             color=(0, 0, 0))
예제 #28
0
def gk_plot(name, winkel, gk, funktion, korr, fitgrenzen):
    params, errors = cf(funktion,
                        np.cos(inrad(winkel[fitgrenzen[0]:fitgrenzen[1]]))**2,
                        noms(gk[fitgrenzen[0]:fitgrenzen[1]]))
    g_plot = np.linspace(0, 1)
    plt.errorbar(np.cos(inrad(winkel))**2,
                 noms(gk) * 1e10,
                 xerr=korr(winkel),
                 yerr=stds(gk) * 1e10,
                 fmt='x',
                 label=r'Messwert')
    plt.plot(g_plot, funktion(g_plot, *params) * 1e10, label=r'Fit')
    plt.xlabel(r'$\cos^2\left(\theta\right)$')
    plt.ylabel(r'$a \:/\: \si{\angstrom}$')
    plt.legend(loc='best')
    plt.tight_layout()
    plt.savefig("build/plot_" + name + ".pdf")
    plt.close()
    return (unp.uarray(params[1], np.sqrt(np.diag(errors)[1])))
예제 #29
0
def fit(data, guess, neg, address):
    params, cov = cf(parabolicfunc, data[1, :], data[0, :], sigma=0.01 * np.ones(len(data[1, :])),
                     p0=guess, maxfev=10 ** 5)
    chi2 = chisquared(params, data[1, :], data[0, :])
    dof = len(data[1, :]) - len(params)

    cov = cov * dof / chi2
    paramserr = np.sqrt(np.diag(cov))

    fieldfit = np.linspace(min(data[1, :]), max(data[1, :]), len(data[1, :]) * 10)
    rfit = parabolicfunc(fieldfit, *params)

    print('\nShowing plot 2 for ' + address[90:98])
    plt.scatter(data[1, :], data[0, :], marker='.', label="Measured Data")
    plt.plot(fieldfit, rfit, marker="", linestyle="-", linewidth=1, color="r", label="Initial Guess")
    plt.xlabel('B-Field')
    plt.ylabel('Rxx')
    plt.title(address[90:98] + " Rxx Linearity Fitted Guess")
    plt.legend(loc='best', numpoints=1)
    plt.show()
    plt.clf()

    return residual(data, params, neg, address)
예제 #30
0
def fit_CABS_powerlaw(sample_epochs_index, all_index,
                      sparse_data_averages_nonan, sparse_data_std_nonan,
                      scale):
    def plfunc(x, a, s, b):
        x_ = np.ones(x.size) + s * x
        return a * np.power(x_, -b)

    #end
    import warnings
    warnings.simplefilter("ignore")

    pfit, _ = cf(plfunc,
                 sample_epochs_index,
                 sparse_data_averages_nonan,
                 p0=[1., 1., 1.],
                 maxfev=2000)
    residuals = sparse_data_averages_nonan - plfunc(sample_epochs_index, *pfit)
    squared_res = np.sum(np.power(residuals, 2))
    sum_tot_squares = np.sum(
        (sparse_data_averages_nonan - np.mean(sparse_data_averages_nonan))**2)
    r_squared = 1 - (squared_res / sum_tot_squares)

    all_data = plfunc(all_index, *pfit)
    scatterplot(all_index,
                all_data,
                sample_epochs_index,
                sparse_data_averages_nonan,
                sparse_data_std_nonan,
                pfit,
                scale,
                tag='CABS',
                rsquare=r_squared)

    return pfit


#end
예제 #31
0
def estimateCaDecayKinetics(time, signals, p0 = None, thr = 2, preTime = 10, 
                            postTime = 40):
    """
    Given a time vector and Ca signal matrix of shape = (C,T), where
        C = # of cells, and T = # of time points (must match length of time
        vector), returns output of shape = (nSamples, 2), where the 1st and
        2nd columns contain the fast and slow decay tau estimates after
        fitting Ca2+ signals with  double exponential
    Parameters:
    time - Time vector of length T
    signals - Ca signals array of shape (nSamples,T)
    p0 - Array-like, (tau_fast, tau_slow, wt_fast), where tau_fast is the 
        fast decay time constant (in sec), tau_slow is the slow decay
        constant, and wt_fast is the weight of the fast exponential (<1)
        for fitting the signal as a weighted sum of the fast and slow
        exponential. Default is None, in which case fitting optimization
        begins without initial estimate
    thr - Threshold for peak detection in Ca signals, in units of zscore
    preTime - Pre-peak time length of the Ca signals to include for segmentation
    postTime - Post-peak "           "          "               "
    Avinash Pujala, JRC, 2017
        
    """
    import numpy as np
    from scipy.optimize import curve_fit as cf
    import apCode.SignalProcessingTools as spt
    import apCode.AnalyzeEphysData as aed
    
    def doubleExp(time, tau1, tau2, wt1):    
        wt2 = 1-wt1
        time = time - time[0]
        e = wt1*np.exp(-time/tau1) + wt2*np.exp(-time/tau2)
        return e
    
    def listToArray(x):
        lens = [len(item) for item in x]
        lenOfLens = len(lens)       
        lens = lens[np.min((lenOfLens-1,2))]
        a = np.zeros((len(x),lens))
        delInds = []
        for itemNum,item in enumerate(x):
            if len(item) == lens:
                a[itemNum,:] = item
            else:
                delInds.append(itemNum)
        a = np.delete(a,delInds,axis = 0)
        return a, delInds
    if np.ndim(signals)==1:
        signals = np.reshape(signals,(1,len(signals)))
    dt = time[2]-time[1]
    pts_post = np.round(postTime/dt).astype(int)
    pts_pre = np.round(preTime/dt).astype(int) 
    x_norm = spt.zscore(signals,axis = 1)
    x_seg, params, x_seg_fit = [],[],[]
    nSamples = np.shape(signals)[0]
    excludedSamples = np.zeros((nSamples,1))
    for nSample in np.arange(nSamples):
        inds_pk = spt.findPeaks(x_norm[nSample,:],thr = thr,ampType = 'rel')[0]
        if len(inds_pk)==0:
            print('Peak detection failed for sample #', nSample, '. Try lowering threshold')
            excludedSamples[nSample] = 1
        else:
            blah = aed.SegmentDataByEvents(signals[nSample,:],inds_pk,pts_pre,pts_post,axis = 0)
            blah = listToArray(blah)[0]          
            blah = np.mean(blah,axis=0)
            x_seg.append(blah) 
            ind_max = np.where(blah == np.max(blah))[0][0]
            y = spt.standardize(blah[ind_max:])
            t = np.arange(len(y))*dt           
            popt,pcov = cf(doubleExp,t,y,p0 = [10,20, 0.5], bounds = (0,20))
            if popt[0]> popt[1]:
                popt[0:2] = popt[2:0:-1]
                popt[-1] = 1-popt[-1]
            params.append(popt)
            foo = doubleExp(t,popt[0],popt[1],popt[2])
            x_seg_fit.append(foo)
    excludedSamples = np.where(excludedSamples)[0]
    includedSamples = np.setdiff1d(np.arange(nSamples),excludedSamples)
    x_seg,delInds = listToArray(x_seg)
    params = np.delete(np.array(params),delInds,axis = 0)
    delInds = includedSamples[delInds]
    if len(delInds)>0:
        print('Sample #', delInds, 'excluded for short segment length. Consider decreasing pre-peak time length')
    excludedSamples = np.union1d(delInds,excludedSamples)
    
    x_seg = spt.standardize(np.array(x_seg),axis = 1)    
    x_seg_fit = np.array(listToArray(x_seg_fit)[0])
    out = {'raw': x_seg,'fit': x_seg_fit,'params': np.array(params),'excludedSamples': excludedSamples}
    return out
interpArduinoTime = interp(dylosTime, arduinoTime, arduinoTime)#arduinoData['1um'])
interpArduinoP1Ratio = interp(dylosTime, arduinoTime, rawRollingArduinoP1Ratio)  
interpArduinoP2Ratio = interp(dylosTime, arduinoTime, rawRollingArduinoP2Ratio)
rollingArduinoP1Ratio = pd.rolling_mean(np.array(interpArduinoP1Ratio), 20)
rollingArduinoP2Ratio = pd.rolling_mean(np.array(interpArduinoP2Ratio), 20)

### fitting and plotting

xData = interpArduinoP1Ratio
yData = dylosData['1um']

# fitting data

# linear, assumes intercept is 0
fitLineX = np.linspace(min(xData), max(xData), 1000)
linearpopt, linearpcov = cf(linearfunc, xData, yData)
print 'linear: ',linearpopt
fitLineYlinear = linearfunc(fitLineX, linearpopt[0])

# 2nd degree
fitLineX = np.linspace(min(xData), max(xData), 1000)
deg2popt, deg2pcov = cf(deg2func, xData, yData)
print '2nd degree: ', deg2popt
fitLineY2nd = deg2func(fitLineX, deg2popt[0], deg2popt[1], deg2popt[2])

# plotting
plt.scatter(xData, yData)
plt.plot(fitLineX, fitLineYlinear, linewidth=3, label='linear')
plt.plot(fitLineX, fitLineY2nd, linewidth=3, label='2nd degree')
plt.legend()
plt.xlabel('DSM501A P1 ratio')
예제 #33
0
파일: calculations.py 프로젝트: bixel/FP14
with open('build/delta_f.tex', 'w') as f:
    f.write(r'\SI{{{:L}}}{{\kilo\hertz}}'.format(delta_f / 1e3))

with open('build/m.tex', 'w') as f:
    f.write(r'\num{{{:L}}}'.format(m))

# Teil e)
# Abhängigkeit phase-Spannung
T, U = np.genfromtxt('am-demodulation-e.txt', unpack=True)


# fit
def func(x, a, b, c, d):
    """ Simple cosine function
    """
    return a * np.cos(b * x + c) + d


popt, pocov = cf(func, T, U, p0=[150, 0.01, -1, 0])

plt.plot(T, U, 'r+', label='Messpunkte')
# plt.errorbar(T, U, yerr=0.1, label='Messpunkte', fmt='r+')
ts = np.linspace(np.amin(T), np.amax(T), 200)
plt.plot(ts, func(ts, *popt), label='Fit')
plt.xlabel(r'$T/\mathrm{ns}$')
plt.xlim(-5, 95)
plt.ylabel(r'$U/\mathrm{mV}$')
plt.legend(loc='best')
plt.savefig('build/demodulation-cosinus.pdf')
plt.clf()
예제 #34
0
def eos_fit(V, Y, eos='birch_murnaghan', B0_prime=None, plot=False, on_figs=None):
    """
    Fit the volume and total energy, or pressure to the Birch-Murnaghan equation of state.

    Note: bulk modulus B0 will be returned in the unit of GPa.

    Parameters
    ----------
    V: array
        volume (Angstrom^3)
    Y: array
        total energy (eV), or pressure (GPa) if eos has '_p'
    eos: string
        chosen from ['birch_murnaghan', 'vinet']. Default to 'birch_murnaghan'
    B0_prime: float
        Keep B0_prime fixed to a given value or not. Default to None
    plot: bool
        whether to plot the data, default to False.
    on_figs: list/int
        the current figure numbers to plot to, default to new figures

    Returns
    -------
    a dict, containing
        'params': fitting parameters
        'r_squared': value for evaluating error
        'fitted_data': a dict that has 2D array of fitted data
            easily to Pandas DataFrame by pd.DataFrame(**returned_dict['fitted_data'])
    """
    V = np.array(V)
    Y = np.array(Y)

    if not B0_prime:
        if '_p' not in eos:
            initial_parameters = [V.mean(), 2.5, 4, Y.mean()]
            fit_eqn = eval(eos)
        else:
            initial_parameters = [V.mean(), 2.5, 4]
            fit_eqn = eval(eos)
    else:
        if '_p' not in eos:
            initial_parameters = [V.mean(), 2.5, Y.mean()]
            fit_eqn = fix_B0_prime(eval(eos), B0_prime)
        else:
            initial_parameters = [V.mean(), 2.5]
            fit_eqn = fix_B0_prime(eval(eos), B0_prime)

    popt, pcov = cf(fit_eqn, V, Y, initial_parameters)
    V_fit = np.linspace(sorted(V)[0], sorted(V)[-1], 1000)
    Y_fit = fit_eqn(V_fit, *popt)
    Y_fit_eqlen = fit_eqn(V, *popt)
    r_squared = get_r_squared(Y, Y_fit_eqlen)
    data = np.column_stack((V_fit, Y_fit))
    if '_p' not in eos:
        col_names = ['V_fit', 'E_fit']
    else:
        col_names = ['V_fit', 'p_fit']

    params = {'V0': popt[0], 'B0': popt[1] * 160.2}
    if not B0_prime:
        params['B0_prime'] = popt[2]
        if '_p' not in eos:
            params['E0'] = popt[3]
    else:
        if '_p' not in eos:
            params['E0'] = popt[2]

    return_dict = {'params': params, 'r_squared': r_squared,
                'fitted_data': {'columns': col_names, 'data': data}}

    if plot:
        initiate_figs(on_figs)
        plt.plot(V, Y, 'o')
        plt.plot(V_fit, Y_fit, '-')
        axes = {'ax': plt.gca()}
        plt.xlabel(r'V ($\AA^{3}$)')
        if '_p' not in eos:
            plt.ylabel('E (eV)')
        else:
            plt.ylabel('P (GPa)')
        plt.tight_layout()
        return_dict.update(axes)

    return return_dict
        arduino1umData.append(interpArduinoData[each]*2.5)
        arduinoP1ratio.append(interpArduinoRatio[each + mvaperiod/2])
rollingP1ratio = pd.rolling_mean(np.array(arduinoP1ratio),20)
for each in range(len(rollingP1ratio)):
    if np.isnan(rollingP1ratio[each]):
        rollingP1ratio[each] = arduinoP1ratio[each]
        print arduinoP1ratio[each]
P1fit = np.polyfit(rollingP1ratio, dylos1umData, deg=4)
P1corr = np.poly1d(P1fit)
minRatio = min(rollingP1ratio)
maxRatio = max(rollingP1ratio)
fitLineX = np.linspace(minRatio, maxRatio, 1000)
fitLineY = P1corr(fitLineX)
print '4th order:',P1fit

popt, pcov = cf(func, rollingP1ratio, dylos1umData)
print '3rd order 0-intercept:',popt
fitLineY2 = func(fitLineX, popt[0], popt[1], popt[2])

popt3, pcov3 = cf(expfunc, rollingP1ratio, dylos1umData)
print 'exponential:',popt3
fitLineY3 = expfunc(fitLineX, popt3[0], popt3[1])

linearpopt, linearpcov = cf(linearfunc, rollingP1ratio, dylos1umData)
print 'linear:',linearpopt
fitLineYlinear = linearfunc(fitLineX, linearpopt[0])

allData = {}
allData['time'] = interpTimes
allData['dylos data'] = dylos1umData
allData['arduino data'] = arduino1umData