def setUp(self): @LaTeX(name='I', parameter_names=(r'R_0', r'\alpha_T', 'p_2', 'p_1', 'p_0'), expression=r'U/ \left( R_0 (1 + t \cdot \alpha_T) \right)') @FitFunction def IUmodel(U, R0=1., alph=0.004, p5=0.5, p4=0.9, p3=19.38): # use empirical temerature-dependence T(U) from T-U fit T = p5 * U * U + p4 * U + p3 return U / (R0 * (1. + T * alph)) @LaTeX(name='T', parameter_names=('p_5', 'p_4', 'p_3'), expression=r'p_5 U^{2} + p_4 U + p_3') @FitFunction def quadric(U, p2=0.5, p4=0.9, p0=19.38): return p2 * U * U + p4 * U + p0 # Get data from file U = [0.5, 1., 1.5] I = [0.5, 0.89, 1.41] T = [293.5 - 273.15, 293.8 - 273.15, 295.4 - 273.15] # Set first dataset kTUdata = kafe.Dataset(data=(U, T)) # Set second dataset kIUdata = kafe.Dataset(data=(U, I)) Fit1 = kafe.Fit(kTUdata, quadric, quiet=True) Fit2 = kafe.Fit(kIUdata, IUmodel, quiet=True) self.parameter_space = kafe.multifit._ParameterSpace([Fit1, Fit2])
def build_kafe_fit_and_plot_object( kafedataset, fitfunktion=kafe.function_library.linear_2par): kafefit = kafe.Fit(kafedataset, fitfunktion) kafefit.do_fit() kafeplot = kafe.Plot(kafefit) kafeplot.plot_all() return [kafefit, kafeplot]
def test_W_boson_mass_averaging_with_y_cov_mat(): W_mass_values = np.array( [80.429, 80.339, 80.217, 80.449, 80.477, 80.310, 80.324, 80.353]) W_mass_cov_mat = np.matrix([[ 0.003466, 0.000441, 0.000441, 0.000441, 0.000625, 0.000625, 0.000625, 0.000625 ], [ 0.000441, 0.00577, 0.000441, 0.000441, 0.000625, 0.000625, 0.000625, 0.000625 ], [ 0.000441, 0.000441, 0.005065, 0.000441, 0.000625, 0.000625, 0.000625, 0.000625 ], [ 0.000441, 0.000441, 0.000441, 0.003805, 0.000625, 0.000625, 0.000625, 0.000625 ], [ 0.000625, 0.000625, 0.000625, 0.000625, 0.006697, 0.001936, 0.001936, 0.001936 ], [ 0.000625, 0.000625, 0.000625, 0.000625, 0.001936, 0.010217, 0.001936, 0.001936 ], [ 0.000625, 0.000625, 0.000625, 0.000625, 0.001936, 0.001936, 0.00802, 0.001936 ], [ 0.000625, 0.000625, 0.000625, 0.000625, 0.001936, 0.001936, 0.001936, 0.00656 ]]) ref_pval = (80.3743268547, ) ref_perr = (0.03513045624, ) _dataset = kafe.Dataset(data=(range(len(W_mass_values)), W_mass_values)) _dataset.add_error_source('y', 'matrix', W_mass_cov_mat) from kafe.function_library import constant_1par _fit = kafe.Fit(_dataset, constant_1par) _fit.do_fit() _pval, _perr = _fit.get_parameter_values(), _fit.get_parameter_errors() assert np.allclose(_pval, ref_pval) assert np.allclose(_perr, ref_perr)
def test_W_boson_mass_averaging_without_cov_mats(): W_mass_values = np.array( [80.429, 80.339, 80.217, 80.449, 80.477, 80.310, 80.324, 80.353]) W_mass_errors = np.array([ 0.05887274, 0.07596051, 0.07116882, 0.06168468, 0.0818352, 0.10107918, 0.08955445, 0.08099383 ]) ref_pval = (80.3727519701, ) ref_perr = (0.02629397757, ) _dataset = kafe.Dataset(data=(range(len(W_mass_values)), W_mass_values)) _dataset.add_error_source('y', 'simple', W_mass_errors) from kafe.function_library import constant_1par _fit = kafe.Fit(_dataset, constant_1par) _fit.do_fit() _pval, _perr = _fit.get_parameter_values(), _fit.get_parameter_errors() assert np.allclose(_pval, ref_pval) assert np.allclose(_perr, ref_perr)
def plot_calibration_line_kafe( x, y, y_err=0.0, x_err=0.0, directory=False, PdfPages=False, suffix='Calibration', xlabel='$\gamma$-peak position from literature [keV]', ylabel=r'$\Delta$VCAL'): hdataset = kafe.build_dataset(x, y, yabserr=y_err, xabserr=x_err, title="Data", axis_labels=[xlabel, ylabel]) hfit = kafe.Fit(hdataset, linear_2par) hfit.do_fit() hplot = kafe.Plot(hfit) hplot.plot_all() hplot.save(directory + "tdc_calibrated_data_kafe_%s.png" % suffix) PdfPages.savefig()
#generate_datasets('dataset1.dat', 'dataset2.dat') # Initialize the Datasets my_datasets = [ kafe.Dataset(title="Example Dataset 1"), kafe.Dataset(title="Example Dataset 2") ] # Load the Datasets from files my_datasets[0].read_from_file(input_file='dataset1.dat') my_datasets[1].read_from_file(input_file='dataset2.dat') # Create the Fits my_fits = [ kafe.Fit(dataset, linear_2par, fit_label="Linear regression " + dataset.data_label[-1]) for dataset in my_datasets ] # Do the Fits for fit in my_fits: fit.do_fit() # Create the plots my_plot = kafe.Plot(my_fits[0], my_fits[1]) # Draw the plots my_plot.plot_all() ###############
############ # Define x-axis data my_x_data = linspace(-3, 3, 20) # twenty evenly-spaced points on # the x axis, from -3 to 3 # Generate y-axis data from model my_y_data = list(map(lambda x: gauss_2par(x, 0, 1), my_x_data)) # Construct the Datasets my_dataset = kafe.Dataset(data=(my_x_data, my_y_data), title="Standard-Normalverteilung") # Fit the model to the data my_fit = kafe.Fit(my_dataset, gauss_2par, fit_label='Standard-Normalverteilung') # Don't call do_fit for this Fit. # Plot the Fit my_plot = kafe.Plot(my_fit, show_legend=True) # Instruct LaTeX to use the EulerVM package (optional, uncomment if needed) #plt.rcParams.update({'text.latex.preamble': ['\\usepackage{eulervm}']}) # Draw the Plots my_plot.plot_all( show_info_for='all', # include every fit in the info box show_data_for=None) # don't show the points, just the curve
############ # Workflow # ############ # Generate the Dataset and store it in a file #generate_dataset('oscillation.dat') # load the experimental data from a file my_dataset = parse_column_data('oscillation.dat', field_order="x,y,xabserr,yabserr", title="Damped Oscillator", axis_labels=['Time $t$', 'Amplitude']) # Create the Fit my_fit = kafe.Fit(my_dataset, damped_oscillator) # fit_label="Linear Regression " + dataset.data_label[-1]) # Set the initial values for the fit: # a_0 tau omega phi my_fit.set_parameters((1., 2., 6., 0.8)) # Do the Fits my_fit.do_fit() # Create the plots my_plot = kafe.Plot(my_fit) # Draw the plots my_plot.plot_all()
hlines, data1 = ppk.readCSV("praezession.csv", nlhead=1) print(hlines) time, dF = data1 my_dataset = kafe.Dataset(data=data1, title="Praezession", axis_labels=['Drehfrequenz', 'Umlaufdauer'], axis_units=['$Hz$', '$s$']) my_dataset.add_error_source('x', 'simple', 0.01) my_dataset.add_error_source('y', 'simple', 0.99) #my_dataset=kafe.Dataset(data=data1) my_fits = [kafe.Fit(my_dataset, linear_2par)] for fit in my_fits: fit.do_fit() my_plot = kafe.Plot(my_fits[0]) my_plot.plot_all() my_plot.save('kafe_praezession.pdf') my_plot.show() #plt.plot(time,dF,'r+',label = "Messung der Daempfung") #plt.xlabel("t(min)") #plt.ylabel("Drehfrequenz(Hz)") #plt.grid() #plt.show()
wE = np.sqrt(w) dE = 1 * np.ones(d.shape) # 1mm error for d Y = np.log(d**2 * wo) YE = np.sqrt((1 / (wo) * woE)**2 + # error on count (2 * d / (d**2) * dE)**2) # error on d if len(sys.argv) <= 2: import kafe from kafe.function_library import linear_2par dataset = kafe.Dataset(data=(d, Y)) dataset.add_error_source('y', 'simple', YE) # poisson error for y dataset.add_error_source('x', 'simple', dE) # poisson error for y fit = kafe.Fit(dataset, linear_2par) fit.do_fit(quiet=True) slope = fit.final_parameter_values[0] slopeE = fit.final_parameter_errors[0] yoffset = fit.final_parameter_values[1] os.execv(__file__, [ 'test', str(slope), str(slopeE), str(yoffset), 'save' if len(sys.argv) == 2 else 'show' ]) else: slope = float(sys.argv[1])
def kFit(func, x, y, sx, sy, p0=None, p0e=None, xabscor=None, yabscor=None, xrelcor=None, yrelcor=None, title='Daten', axis_labels=['X', 'Y'], plot=True, quiet=False): """ fit function func with errors on x and y; uses package `kafe` Args: * func: function to fit * x: np-array, independent data * y: np-array, dependent data the following are single floats or arrays of length of x * sx: scalar or np-array, uncertainty(ies) on x * sy: scalar or np-array, uncertainty(ies) on y * p0: array-like, initial guess of parameters * p0e: array-like, initial guess of parameter uncertainties * xabscor: absolute, correlated error(s) on x * yabscor: absolute, correlated error(s) on y * xrelcor: relative, correlated error(s) on x * yrelcor: relative, correlated error(s) on y * title: string, title of gaph * axis_labels: List of strings, axis labels x and y * plot: flag to switch off graphical ouput * quiet: flag to suppress text and log output Returns: * np-array of float: parameter values * np-array of float: parameter errors * np-array: cor correlation matrix * float: chi2 \chi-square """ # regression with kafe import kafe # create a data set ... dat = kafe.Dataset(data=(x,y), title=title, axis_labels=axis_labels, basename='kRegression') # ... and add all error sources dat.add_error_source('x','simple',sx) dat.add_error_source('y','simple',sy) if xabscor != None: dat.add_error_source('x','simple', xabscor, correlated=True) if yabscor != None: dat.add_error_source('y','simple', yabscor, correlated=True) if xrelcor != None: dat.add_error_source('x','simple', xrelcor, relative=True, correlated=True) if yrelcor != None: dat.add_error_source('y','simple', yrelcor, relative=True, correlated=True) # set up and run fit fit = kafe.Fit(dat, func) if p0 is not None: fit.set_parameters(p0, p0e) fit.do_fit(quiet=quiet) # harvest results # par, perr, cov, chi2 = fit.get_results() # for kafe vers. > 1.1.0 par = np.array(fit.final_parameter_values) pare = np.array(fit.final_parameter_errors) cor = fit.par_cov_mat/np.outer(pare, pare) chi2 = fit.minimizer.get_fit_info('fcn') if(plot): kplot=kafe.Plot(fit) kplot.plot_all() kplot.show() return par, pare, cor, chi2
def plot_tdc_gamma_spectrum_kafe(self, cluster_hist=False, p0=None, scan_parameter_range=False, cols=[0], rows=[0], title=False, cluster_background=False, background=True, PdfPages=False, Directory=None): Delta_Vcal_max = scan_parameter_range[-1] Delta_Vcal_min = 0 # scan_parameter_range[0] bin_size = 1 tdc_data = au.get_pixel_data(cluster_hist, cols, rows, test="delta_vcal") hist_data, edges = np.histogram(tdc_data, bins=np.arange(Delta_Vcal_min, Delta_Vcal_max, bin_size)) x, y = edges[:-1], hist_data y_err = np.sqrt(y) hdataset = kafe.build_dataset(x, y, yabserr=y_err, title="Data for %s source" % title, axis_labels=['x', 'y']) # error for bins with zero contents is set to 1. covmat = hdataset.get_cov_mat("y") for i in range(0, len(covmat)): if covmat[i, i] == 0.: covmat[i, i] = 1. hdataset.set_cov_mat('y', covmat) # write it back # Create the Fit instance if len(p0) > 3: #hfit1 = kafe.Fit(hdataset, gauss, fit_label="Fit of a Gaussian to histogram data 1") # hfit1.set_parameters(mean=p0[2], sigma=p0[4], scale=p0[0], no_warning=True) hfit2 = kafe.Fit(hdataset, gauss, fit_label="Fit of a Gaussian to histogram data 2") hfit2.set_parameters(mean=p0[3], sigma=p0[5], scale=p0[1], no_warning=True) # hfit1.do_fit() hfit2.do_fit() hplot = kafe.Plot(hfit2) else: hfit = kafe.Fit(hdataset, gauss, fit_label="Fit of a Gaussian to histogram data") hfit.set_parameters(mean=p0[1], sigma=p0[2], scale=p0[0], no_warning=True) # perform an initial fit with temporary errors (minimal output) hfit.call_minimizer(final_fit=True, verbose=False) hfit.do_fit() hplot = kafe.Plot(hfit) hfit.get_parameter_values() # re-set errors using model at pre-fit parameter values: # sigma_i^2=cov[i, i]=n(x_i) #fdata = hfit.fit_function.evaluate(hfit.xdata, hfit.current_parameter_values) #np.fill_diagonal(covmat, fdata) # hfit.current_cov_mat = covmat # write back new covariance matrix # now do final fit with full output hplot.plot_all() hplot.save(Directory + "\h5_files" + "tdc_calibrated_data_kafe_%s.png" % title) PdfPages.savefig()
# Step 1: fit the relation T(U) using a quadratic model # construct a kafe dataset kData_T_U = kafe.Dataset(data=(U, T - T0), basename='u-t-data', title='Temperature vs. Voltage', axis_labels=['U [V]', 'I [A]']) # declare errors on U and T kData_T_U.add_error_source('x', 'simple', sigU) kData_T_U.add_error_source('y', 'simple', sigT) # construct and do the fit using a quadratic model (parabola) kFit_T_U_empirical = kafe.Fit(kData_T_U, empirical_T_U_model, fit_name='u-t-fit-quadratic') kFit_T_U_empirical.do_fit() # store the fit results in variables for later use quadratic_par_values = kFit_T_U_empirical.get_parameter_values() quadratic_par_errors = kFit_T_U_empirical.get_parameter_errors() quadratic_par_covariance = kFit_T_U_empirical.get_error_matrix() # plot the results (optional) kPlot_T_U_empirical = kafe.Plot(kFit_T_U_empirical) kPlot_T_U_empirical.plot_all() kPlot_T_U_empirical.save("kafe_example11_TU.png") # Step 2: fit the relation I(U) using the empirical model
# load the experimental data from a file my_dataset = parse_column_data('counting_rate.dat', field_order="x,y,yabserr", title="Counting Rate per Angle") ### pre-fit # error for bins with zero contents is set to 1. covmat = my_dataset.get_cov_mat('y') for i in range(0, len(covmat)): if covmat[i, i] == 0.: covmat[i, i] = 1. my_dataset.set_cov_mat('y', covmat) # write it back # Create the Fit my_fit = kafe.Fit(my_dataset, poly4) # fit_label="Linear Regression " + dataset.data_label[-1]) # perform an initial fit with temporary errors (minimal output) my_fit.call_minimizer(final_fit=False, verbose=False) # set errors using model at pre-fit parameter values: sigma_i^2=cov[i,i]=n(x_i) fdata = my_fit.fit_function.evaluate(my_fit.xdata, my_fit.current_parameter_values) np.fill_diagonal(covmat, fdata) my_fit.current_cov_mat = covmat # use modified covariance matrix # ### end pre-fit # Do the Fit
dataset1 = kafe.Dataset(data=(B1, f1)) dataset2 = kafe.Dataset(data=(B2, f2)) dataset3 = kafe.Dataset(data=(B3, f3)) #error sources dataset1.add_error_source('x', 'simple', dB) dataset1.add_error_source('y', 'simple', df) dataset2.add_error_source('x', 'simple', dB) dataset2.add_error_source('y', 'simple', df) dataset3.add_error_source('x', 'simple', dB) dataset3.add_error_source('y', 'simple', df) #fit fit1 = kafe.Fit(dataset1, linear_2par) fit2 = kafe.Fit(dataset2, linear_2par) fit3 = kafe.Fit(dataset3, linear_2par) fit1.do_fit(quiet=True) fit2.do_fit(quiet=True) fit3.do_fit(quiet=True) slope1 = fit1.final_parameter_values[0] slope2 = fit2.final_parameter_values[0] slope3 = fit3.final_parameter_values[0] slope_err1 = fit1.final_parameter_errors[0] slope_err2 = fit2.final_parameter_errors[0] slope_err3 = fit3.final_parameter_errors[0]
# GQ 27-JUL-14 <initial version> #--------------------------------------------------------------------- # import everything we need from kafe import kafe from kafe.function_library import constant_1par from kafe.file_tools import buildDataset_fromFile # # --------------------------------------------------------- # begin execution fname = 'WData.dat' # build a kafe Dataset from input file curDataset = buildDataset_fromFile(fname) # perform fit curFit = kafe.Fit(curDataset, constant_1par) curFit.do_fit() print "average:", curFit.get_parameter_values() print "error :", curFit.get_parameter_errors() myPlot = kafe.Plot(curFit) myPlot.plot_all() myPlot.save("kafe_example4.pdf") myPlot.show()
############ # Workflow # ############ # Generate the Dataset and store it in a file #generate_dataset('dataset.dat') # Initialize the Dataset my_dataset = kafe.Dataset(title="Example dataset", axis_labels=['t', 'A']) # Load the Dataset from the file my_dataset.read_from_file(input_file='dataset.dat') # Create the Fit my_fit = kafe.Fit(my_dataset, exponential) # Do the Fit my_fit.do_fit() # Create the plots my_plot = kafe.Plot(my_fit) # Draw the plots my_plot.plot_all() ############### # Plot output # ############### # Save the plots
4.60 ], [ 0.35, 0.26, 0.52, 0.44, 0.48, 0.55, 0.66, 0.48, 0.75, 0.70, 0.75, 0.80, 0.90 ]), title='some data', axis_labels=['$x$', '$y=f(x)$']) #### specify the error model my_dataset.add_error_source('y', 'simple', [ 0.06, 0.07, 0.05, 0.05, 0.07, 0.07, 0.09, 0.10, 0.11, 0.10, 0.11, 0.12, 0.10 ]) #### Create the Fit object my_fit = kafe.Fit(my_dataset, quadratic_3par) # Set initial values and error estimates my_fit.set_parameters((0., 1., 0.2), (0.5, 0.5, 0.5)) # Do the Fit my_fit.do_fit() #### Create result plots and output them my_plot = kafe.Plot(my_fit) my_plot.plot_all() my_plot.save('kafe_example0.pdf') # to file ### Create (and save) contour and profile plots from kafe.fit import CL2Chi2 contour1 = my_fit.plot_contour(0, 1, dchi2=[1., CL2Chi2(.6827)]) contour2 = my_fit.plot_contour(0, 2, dchi2=[1., CL2Chi2(.6827)])
def kRegression(x, y, sx, sy, xabscor=None, yabscor=None, xrelcor=None, yrelcor=None, title='Daten', axis_labels=['X', 'Y'], plot=True, quiet=False): """ linear regression y(x) = ax + b with errors on x and y; uses package `kafe` Args: * x: np-array, independent data * y: np-array, dependent data the following are single floats or arrays of length of x * sx: scalar or np-array, uncertainty(ies) on x * sy: scalar or np-array, uncertainty(ies) on y * xabscor: absolute, correlated error(s) on x * yabscor: absolute, correlated error(s) on y * xrelcor: relative, correlated error(s) on x * yrelcor: relative, correlated error(s) on y * title: string, title of gaph * axis_labels: List of strings, axis labels x and y * plot: flag to switch off graphical ouput * quiet: flag to suppress text and log output Returns: * float: a slope * float: b constant * float: sa sigma on slope * float: sb sigma on constant * float: cor correlation * float: chi2 \chi-square """ # regression with kafe import kafe from kafe.function_library import linear_2par # create a data set ... dat = kafe.Dataset(data=(x,y), title=title, axis_labels=axis_labels, basename='kRegression') # ... and add all error sources dat.add_error_source('x', 'simple', sx) dat.add_error_source('y', 'simple', sy) if xabscor != None: dat.add_error_source('x', 'simple', xabscor, correlated=True) if yabscor != None: dat.add_error_source('y', 'simple', yabscor, correlated=True) if xrelcor != None: dat.add_error_source('x', 'simple', xrelcor, relative=True, correlated=True) if yrelcor != None: dat.add_error_source('y', 'simple', yrelcor, relative=True, correlated=True) # set up and run fit fit = kafe.Fit(dat, linear_2par) fit.do_fit(quiet=quiet) # harvest results # par, perr, cov, chi2 = fit.get_results() # for kafe vers. > 1.1.0 # a = par[0] # b = par[1] # sa = perr[0] # sb = perr[1] # cor = cov[1,0]/(sa*sb) a = fit.final_parameter_values[0] b = fit.final_parameter_values[1] sa = fit.final_parameter_errors[0] sb = fit.final_parameter_errors[1] cor = fit.par_cov_mat[1,0]/(sa*sb) chi2 = fit.minimizer.get_fit_info('fcn') if(plot): kplot=kafe.Plot(fit) kplot.plot_all() kplot.show() return a, b, sa, sb, cor, chi2
np.mean(U_hall_plat_f) + 10) # TODO: prettify that # do linear regression of hall voltages data_hall_regression_i = np.array(data_hall_regression_i) data_hall_regression_1_over_R_hall = np.array( data_hall_regression_1_over_R_hall) slope_1_over_R_hall_over_i, offset_1_over_R_hall_over_i = np.polyfit( data_hall_regression_i, data_hall_regression_1_over_R_hall, 1) dataset_1_over_R_hall_over_i = kafe.Dataset( data=[data_hall_regression_i, data_hall_regression_1_over_R_hall]) dataset_1_over_R_hall_over_i.add_error_source( 'y', 'simple', data_hall_regression_1_over_R_hall_err) fit_1_over_R_hall_over_i = kafe.Fit(dataset_1_over_R_hall_over_i, linear_2par) fit_1_over_R_hall_over_i.do_fit(quiet=True) # my_p = kafe.Plot(fit_1_over_R_hall_over_i) # my_p.plot_all() # my_p.show() slope_1_over_R_hall_over_i_err = fit_1_over_R_hall_over_i.get_results()[0][1] R_k_measured = 1 / slope_1_over_R_hall_over_i # alpha_measured = 299792458 * 4 * np.pi * 1e-7 / (2 * R_k_measured) alpha_measured = slope_1_over_R_hall_over_i * 299792458 * 4 * np.pi * 1e-7 / 2 alpha_measured_err = slope_1_over_R_hall_over_i_err * 299792458 * 4 * np.pi * 1e-7 / 2 ax_R_hall.plot(data_hall_regression_i, data_hall_regression_1_over_R_hall * 1e3, 'xk') ax_R_hall.plot(data_hall_regression_i,
############ # Generate the Dataset and store it in a file #generate_dataset('dataset.dat') # Initialize Dataset my_dataset = kafe.Dataset(title="Example Dataset") # Load the Dataset from the file my_dataset.read_from_file('dataset.dat') #print my_dataset.get_cov_mat(0) #print my_dataset.get_cov_mat(1) # Create the Fits my_fits = [kafe.Fit(my_dataset, exp_2par), kafe.Fit(my_dataset, linear_2par)] # Do the Fits for fit in my_fits: fit.do_fit() # Create the plots my_plot = kafe.Plot(my_fits[0], my_fits[1]) # Draw the plots my_plot.plot_all(show_data_for=0) # only show data once (it's the same data) ############### # Plot output # ###############
# ---- fit function definition in kafe style # (decorators for nice output not mandatory) @ASCII(expression='k * ( x - xO ) + c') @LaTeX(name='f', parameter_names=('k', 'c', 'xO'), expression=r'k\,(x+\tilde{x})\,+c') @FitFunction def lin(x, k=1.0, c=0.0): return k * (x + xO) + c # ---- begin of fit --- # set the function fitf = lin # own definition #fitf=quadratic_3par # or from kafe function library # --------- begin of workflow ---------------------- # set data kdata = kafe.Dataset(data=(m, s), basename='kData', title='example data') kdata.add_error_source('y', 'simple', 1.0) kfit = kafe.Fit(kdata, fitf) # create the fit kfit.do_fit() # perform fit print(kfit) kplot = kafe.Plot(kfit) # create plot object kplot.axis_labels = ['x', 'Daten und f(x)'] kplot.plot_all() # make plots kplot.save('Blatt6_b_fit.pdf') kplot.show()
data_label="mit Gewicht") ] my_dataset[0].add_error_source('y', 'simple', 0.01) my_dataset[0].add_error_source('x', 'simple', 0.01) my_dataset[1].add_error_source('y', 'simple', 0.01) my_dataset[1].add_error_source('x', 'simple', 0.01) #my_dataset[0]=kafe.Dataset(data=data1) #my_dataset[1]=kafe.Dataset(data=data2) #my_dataset.add_error_source() my_fits = [ kafe.Fit(dataset, linear_2par, fit_label="Linear regression") for dataset in my_dataset ] for fit in my_fits: fit.do_fit() my_plot = kafe.Plot(my_fits[0], my_fits[1]) my_plot.axis_labels = ['Drehfrequenz$[Hz]$', 'Nutationsfrequenz$[Hz]$'] my_plot.axis_units = ['$Hz$', '$Hz$'] my_plot.plot_all() my_plot.save('kafe_nutation.pdf') my_plot.show() #plt.plot(time1,dF1,'r+',label = "Messung der Nutations")
syabscor=0.1 # an absolute, correlated error on y ### --- perform a fit with kafe # create the kafe data set ... dat = kafe.Dataset(data=(xdat, ydat), title='ToyData', axis_labels=['X', 'Y'], basename='kRegression') # ... and add all error sources dat.add_error_source('x','simple', sigx_abs) dat.add_error_source('x','simple', sxrelcor, relative=True, correlated=True) ey = np.absolute(sigy_rel* ydat * np.ones(nd)) # array of relative y errors dat.add_error_source('y','simple', ey) dat.add_error_source('y','simple', syabscor, correlated=True) # set-up the fit ... fit = kafe.Fit(dat, model) # ... run it ... fit.do_fit(quiet=False) # ... harvest results in local variables par, par_err, cov, chi2 = fit.get_results() # for kafe vers. > 1.1.0 cor = cov/np.outer(par_err, par_err) # produce plots kplot=kafe.Plot(fit) kplot.plot_all() #kplot.show() # plt.draw(); plt.pause(2.) # show plot for 2s. # save input data as table (in include-direcotory for LaTeX) data = np.array([xdat, sigx_abs*np.ones(nd), ydat, ey])