def setUp(self): @LaTeX(name='I', parameter_names=(r'R_0', r'\alpha_T', 'p_2', 'p_1', 'p_0'), expression=r'U/ \left( R_0 (1 + t \cdot \alpha_T) \right)') @FitFunction def IUmodel(U, R0=1., alph=0.004, p5=0.5, p4=0.9, p3=19.38): # use empirical temerature-dependence T(U) from T-U fit T = p5 * U * U + p4 * U + p3 return U / (R0 * (1. + T * alph)) @LaTeX(name='T', parameter_names=('p_5', 'p_4', 'p_3'), expression=r'p_5 U^{2} + p_4 U + p_3') @FitFunction def quadric(U, p2=0.5, p4=0.9, p0=19.38): return p2 * U * U + p4 * U + p0 # Get data from file U = [0.5, 1., 1.5] I = [0.5, 0.89, 1.41] T = [293.5 - 273.15, 293.8 - 273.15, 295.4 - 273.15] # Set first dataset kTUdata = kafe.Dataset(data=(U, T)) # Set second dataset kIUdata = kafe.Dataset(data=(U, I)) Fit1 = kafe.Fit(kTUdata, quadric, quiet=True) Fit2 = kafe.Fit(kIUdata, IUmodel, quiet=True) self.parameter_space = kafe.multifit._ParameterSpace([Fit1, Fit2])
def generate_datasets(output_file_path1, output_file_path2): '''The following block generates the Datasets and writes a file for each of them.''' import numpy as np # need some functions from numpy my_datasets = [] n_p = 10 xmin, xmax = 3, 4 slope, y_intercept = 3.2, 0.08 sigma_x, sigma_y = 0.1, 0.3 xdata = np.linspace(xmin, xmax, n_p) + np.random.normal(0.0, sigma_x, n_p) ydata = slope * xdata + [y_intercept] * n_p ydata += np.random.normal(0.0, sigma_y, n_p) my_datasets.append(kafe.Dataset(data=(xdata, ydata))) my_datasets[-1].add_error_source('x', 'simple', sigma_x) my_datasets[-1].add_error_source('y', 'simple', sigma_y) n_p = 10 xmin, xmax = 2, 3 slope, y_intercept = 2.9, 0.1 sigma_x, sigma_y = 0.05, 0.5 xdata = np.linspace(xmin, xmax, n_p) + np.random.normal(0.0, sigma_x, n_p) ydata = slope * xdata + [y_intercept] * n_p ydata += np.random.normal(0.0, sigma_y, n_p) my_datasets.append(kafe.Dataset(data=(xdata, ydata))) my_datasets[-1].add_error_source('x', 'simple', sigma_x) my_datasets[-1].add_error_source('y', 'simple', sigma_y) my_datasets[0].write_formatted(output_file_path1) my_datasets[1].write_formatted(output_file_path2)
def setUp(self): @FitFunction def IUmodel(U, R0=1., alph=0.004, p5=0.5, p4=0.9, p3=19.38): # use empirical temerature-dependence T(U) from T-U fit T = p5 * U * U + p4 * U + p3 return U / (R0 * (1. + T * alph)) @FitFunction def quadric(U, p2=0.5, p1=0.9, p0=19.38): return p2 * U * U + p1 * U + p0 U = [0.5, 1., 1.5] I = [0.5, 0.89, 1.41] T = [293.5 - 273.15, 293.8 - 273.15, 295.4 - 273.15] # Set first dataset kTUdata = kafe.Dataset(data=(U, T)) # Set second dataset kIUdata = kafe.Dataset(data=(U, I)) self.Test_Multifit = kafe.Multifit([(kTUdata, quadric), (kIUdata, IUmodel)], minimizer_to_use=None, quiet=True)
def test_W_boson_mass_averaging_with_y_cov_mat(): W_mass_values = np.array( [80.429, 80.339, 80.217, 80.449, 80.477, 80.310, 80.324, 80.353]) W_mass_cov_mat = np.matrix([[ 0.003466, 0.000441, 0.000441, 0.000441, 0.000625, 0.000625, 0.000625, 0.000625 ], [ 0.000441, 0.00577, 0.000441, 0.000441, 0.000625, 0.000625, 0.000625, 0.000625 ], [ 0.000441, 0.000441, 0.005065, 0.000441, 0.000625, 0.000625, 0.000625, 0.000625 ], [ 0.000441, 0.000441, 0.000441, 0.003805, 0.000625, 0.000625, 0.000625, 0.000625 ], [ 0.000625, 0.000625, 0.000625, 0.000625, 0.006697, 0.001936, 0.001936, 0.001936 ], [ 0.000625, 0.000625, 0.000625, 0.000625, 0.001936, 0.010217, 0.001936, 0.001936 ], [ 0.000625, 0.000625, 0.000625, 0.000625, 0.001936, 0.001936, 0.00802, 0.001936 ], [ 0.000625, 0.000625, 0.000625, 0.000625, 0.001936, 0.001936, 0.001936, 0.00656 ]]) ref_pval = (80.3743268547, ) ref_perr = (0.03513045624, ) _dataset = kafe.Dataset(data=(range(len(W_mass_values)), W_mass_values)) _dataset.add_error_source('y', 'matrix', W_mass_cov_mat) from kafe.function_library import constant_1par _fit = kafe.Fit(_dataset, constant_1par) _fit.do_fit() _pval, _perr = _fit.get_parameter_values(), _fit.get_parameter_errors() assert np.allclose(_pval, ref_pval) assert np.allclose(_perr, ref_perr)
def generate_dataset(output_file_path): '''The following block generates the Datasets and writes a file for each of them.''' import numpy as np # need some functions from numpy n_p = 10 xmin, xmax = 1, 10 growth, constant = 0.15, 1.3 sigma_x, sigma_y = 0.3, 0.2 xdata = np.linspace(xmin, xmax, n_p) + np.random.normal(0.0, sigma_x, n_p) ydata = map(lambda x: exp_2par(x, growth, constant), xdata) ydata += np.random.normal(0.0, sigma_y, n_p) my_dataset = kafe.Dataset(data=(xdata, ydata)) my_dataset.add_error_source('x', 'simple', sigma_x) my_dataset.add_error_source('y', 'simple', sigma_y) my_dataset.write_formatted(output_file_path)
def generate_dataset(output_file_path): '''The following block generates the Datasets and writes a file for each of them.''' import numpy as np # need some functions from numpy n_p = 10 xmin, xmax = 1, 5 sigma_x, sigma_y = 0.3, 0.4 xdata = np.linspace(xmin, xmax, n_p) + np.random.normal(0.0, sigma_x, n_p) A0, tau = 1., 1. ydata = map(lambda x: exponential(x, A0, tau), xdata) ydata *= np.random.normal(1.0, sigma_y, n_p) my_datasets.append(kafe.Dataset(data=(xdata, ydata))) my_datasets[-1].add_error_source('x', 'simple', sigma_x) my_datasets[-1].add_error_source('y', 'simple', sigma_y, relative=True) my_dataset.write_formatted(output_file_path)
def test_W_boson_mass_averaging_without_cov_mats(): W_mass_values = np.array( [80.429, 80.339, 80.217, 80.449, 80.477, 80.310, 80.324, 80.353]) W_mass_errors = np.array([ 0.05887274, 0.07596051, 0.07116882, 0.06168468, 0.0818352, 0.10107918, 0.08955445, 0.08099383 ]) ref_pval = (80.3727519701, ) ref_perr = (0.02629397757, ) _dataset = kafe.Dataset(data=(range(len(W_mass_values)), W_mass_values)) _dataset.add_error_source('y', 'simple', W_mass_errors) from kafe.function_library import constant_1par _fit = kafe.Fit(_dataset, constant_1par) _fit.do_fit() _pval, _perr = _fit.get_parameter_values(), _fit.get_parameter_errors() assert np.allclose(_pval, ref_pval) assert np.allclose(_perr, ref_perr)
def kRegression(x, y, sx, sy, xabscor=None, yabscor=None, xrelcor=None, yrelcor=None, title='Daten', axis_labels=['X', 'Y'], plot=True, quiet=False): """ linear regression y(x) = ax + b with errors on x and y; uses package `kafe` Args: * x: np-array, independent data * y: np-array, dependent data the following are single floats or arrays of length of x * sx: scalar or np-array, uncertainty(ies) on x * sy: scalar or np-array, uncertainty(ies) on y * xabscor: absolute, correlated error(s) on x * yabscor: absolute, correlated error(s) on y * xrelcor: relative, correlated error(s) on x * yrelcor: relative, correlated error(s) on y * title: string, title of gaph * axis_labels: List of strings, axis labels x and y * plot: flag to switch off graphical ouput * quiet: flag to suppress text and log output Returns: * float: a slope * float: b constant * float: sa sigma on slope * float: sb sigma on constant * float: cor correlation * float: chi2 \chi-square """ # regression with kafe import kafe from kafe.function_library import linear_2par # create a data set ... dat = kafe.Dataset(data=(x,y), title=title, axis_labels=axis_labels, basename='kRegression') # ... and add all error sources dat.add_error_source('x', 'simple', sx) dat.add_error_source('y', 'simple', sy) if xabscor != None: dat.add_error_source('x', 'simple', xabscor, correlated=True) if yabscor != None: dat.add_error_source('y', 'simple', yabscor, correlated=True) if xrelcor != None: dat.add_error_source('x', 'simple', xrelcor, relative=True, correlated=True) if yrelcor != None: dat.add_error_source('y', 'simple', yrelcor, relative=True, correlated=True) # set up and run fit fit = kafe.Fit(dat, linear_2par) fit.do_fit(quiet=quiet) # harvest results # par, perr, cov, chi2 = fit.get_results() # for kafe vers. > 1.1.0 # a = par[0] # b = par[1] # sa = perr[0] # sb = perr[1] # cor = cov[1,0]/(sa*sb) a = fit.final_parameter_values[0] b = fit.final_parameter_values[1] sa = fit.final_parameter_errors[0] sb = fit.final_parameter_errors[1] cor = fit.par_cov_mat[1,0]/(sa*sb) chi2 = fit.minimizer.get_fit_info('fcn') if(plot): kplot=kafe.Plot(fit) kplot.plot_all() kplot.show() return a, b, sa, sb, cor, chi2
my_datasets.append(kafe.Dataset(data=(xdata, ydata))) my_datasets[-1].add_error_source('x', 'simple', sigma_x) my_datasets[-1].add_error_source('y', 'simple', sigma_y, relative=True) my_dataset.write_formatted(output_file_path) ############ # Workflow # ############ # Generate the Dataset and store it in a file #generate_dataset('dataset.dat') # Initialize the Dataset my_dataset = kafe.Dataset(title="Example dataset", axis_labels=['t', 'A']) # Load the Dataset from the file my_dataset.read_from_file(input_file='dataset.dat') # Create the Fit my_fit = kafe.Fit(my_dataset, exponential) # Do the Fit my_fit.do_fit() # Create the plots my_plot = kafe.Plot(my_fit) # Draw the plots my_plot.plot_all()
@LaTeX(name='A', x_name="t", parameter_names=('A_0', '\\tau{}'), expression="A_0\\,\\exp(\\frac{-t}{\\tau})") @FitFunction def exponential(t, A0=1, tau=1): return A0 * exp(-t / tau) hlines, data1 = ppk.readCSV("praezession.csv", nlhead=1) print(hlines) time, dF = data1 my_dataset = kafe.Dataset(data=data1, title="Praezession", axis_labels=['Drehfrequenz', 'Umlaufdauer'], axis_units=['$Hz$', '$s$']) my_dataset.add_error_source('x', 'simple', 0.01) my_dataset.add_error_source('y', 'simple', 0.99) #my_dataset=kafe.Dataset(data=data1) my_fits = [kafe.Fit(my_dataset, linear_2par)] for fit in my_fits: fit.do_fit() my_plot = kafe.Plot(my_fits[0]) my_plot.plot_all() my_plot.save('kafe_praezession.pdf')
@FitFunction def exponential(t, A0=1, tau=1): return A0 * exp(-t / tau) hlines, data1 = ppk.readCSV("Nutationsfrequenz_ohneGewicht.csv", nlhead=1) print(hlines) hlines2, data2 = ppk.readCSV("Nutationsfrequenz_mitGewicht.csv", nlhead=1) time1, dF1 = data1 time2, dF2 = data2 my_dataset = [ kafe.Dataset(data=data1, title="Nutation ohne Gewicht", axis_label=['Drehfrequenz', 'Nutationsfrequenz'], data_label=" ohne Gewicht"), kafe.Dataset(data=data2, title="Nutation mit Gewicht", data_label="mit Gewicht") ] my_dataset[0].add_error_source('y', 'simple', 0.01) my_dataset[0].add_error_source('x', 'simple', 0.01) my_dataset[1].add_error_source('y', 'simple', 0.01) my_dataset[1].add_error_source('x', 'simple', 0.01) #my_dataset[0]=kafe.Dataset(data=data1) #my_dataset[1]=kafe.Dataset(data=data2)
np.mean(U_hall_plat) - 10, np.mean(U_hall_plat_f) + 10) axL.vlines(np.max(B_hall_plat_f), np.mean(U_hall_plat) - 10, np.mean(U_hall_plat_f) + 10) # TODO: prettify that # do linear regression of hall voltages data_hall_regression_i = np.array(data_hall_regression_i) data_hall_regression_1_over_R_hall = np.array( data_hall_regression_1_over_R_hall) slope_1_over_R_hall_over_i, offset_1_over_R_hall_over_i = np.polyfit( data_hall_regression_i, data_hall_regression_1_over_R_hall, 1) dataset_1_over_R_hall_over_i = kafe.Dataset( data=[data_hall_regression_i, data_hall_regression_1_over_R_hall]) dataset_1_over_R_hall_over_i.add_error_source( 'y', 'simple', data_hall_regression_1_over_R_hall_err) fit_1_over_R_hall_over_i = kafe.Fit(dataset_1_over_R_hall_over_i, linear_2par) fit_1_over_R_hall_over_i.do_fit(quiet=True) # my_p = kafe.Plot(fit_1_over_R_hall_over_i) # my_p.plot_all() # my_p.show() slope_1_over_R_hall_over_i_err = fit_1_over_R_hall_over_i.get_results()[0][1] R_k_measured = 1 / slope_1_over_R_hall_over_i # alpha_measured = 299792458 * 4 * np.pi * 1e-7 / (2 * R_k_measured) alpha_measured = slope_1_over_R_hall_over_i * 299792458 * 4 * np.pi * 1e-7 / 2 alpha_measured_err = slope_1_over_R_hall_over_i_err * 299792458 * 4 * np.pi * 1e-7 / 2
''' general example for fitting with kafe - construct dataset - specify the error model using the add_error_source() method - perform fit (2nd order polynomial) - show and save output ''' import kafe #from kafe.function_tools import FitFunction, LaTeX, ASCII from kafe.function_library import quadratic_3par #### create a Dataset instance: my_dataset = kafe.Dataset(data=([ 0.05, 0.36, 0.68, 0.80, 1.09, 1.46, 1.71, 1.83, 2.44, 2.09, 3.72, 4.36, 4.60 ], [ 0.35, 0.26, 0.52, 0.44, 0.48, 0.55, 0.66, 0.48, 0.75, 0.70, 0.75, 0.80, 0.90 ]), title='some data', axis_labels=['$x$', '$y=f(x)$']) #### specify the error model my_dataset.add_error_source('y', 'simple', [ 0.06, 0.07, 0.05, 0.05, 0.07, 0.07, 0.09, 0.10, 0.11, 0.10, 0.11, 0.12, 0.10 ]) #### Create the Fit object my_fit = kafe.Fit(my_dataset, quadratic_3par) # Set initial values and error estimates my_fit.set_parameters((0., 1., 0.2), (0.5, 0.5, 0.5))
f1, U1_1, U2_1, U1 = np.loadtxt('./source/ESR-Coil-E.dat', unpack=True) f2, U1_2, U2_2, U2 = np.loadtxt('./source/ESR-Coil-F.dat', unpack=True) f3, U1_3, U2_3, U3 = np.loadtxt('./source/ESR-Coil-G.dat', unpack=True) #conversion to magnetic field strength (in mT) B1 = (U1 * 1e-3 / R) * c B2 = (U2 * 1e-3 / R) * c B3 = (U3 * 1e-3 / R) * c #errors df = 0.1 #frequency error (in MHz) dB = 52.3e-3 #calculated magnetic field error by hand, see paper #datasets dataset1 = kafe.Dataset(data=(B1, f1)) dataset2 = kafe.Dataset(data=(B2, f2)) dataset3 = kafe.Dataset(data=(B3, f3)) #error sources dataset1.add_error_source('x', 'simple', dB) dataset1.add_error_source('y', 'simple', df) dataset2.add_error_source('x', 'simple', dB) dataset2.add_error_source('y', 'simple', df) dataset3.add_error_source('x', 'simple', dB) dataset3.add_error_source('y', 'simple', df) #fit fit1 = kafe.Fit(dataset1, linear_2par)
def kFit(func, x, y, sx, sy, p0=None, p0e=None, xabscor=None, yabscor=None, xrelcor=None, yrelcor=None, title='Daten', axis_labels=['X', 'Y'], plot=True, quiet=False): """ fit function func with errors on x and y; uses package `kafe` Args: * func: function to fit * x: np-array, independent data * y: np-array, dependent data the following are single floats or arrays of length of x * sx: scalar or np-array, uncertainty(ies) on x * sy: scalar or np-array, uncertainty(ies) on y * p0: array-like, initial guess of parameters * p0e: array-like, initial guess of parameter uncertainties * xabscor: absolute, correlated error(s) on x * yabscor: absolute, correlated error(s) on y * xrelcor: relative, correlated error(s) on x * yrelcor: relative, correlated error(s) on y * title: string, title of gaph * axis_labels: List of strings, axis labels x and y * plot: flag to switch off graphical ouput * quiet: flag to suppress text and log output Returns: * np-array of float: parameter values * np-array of float: parameter errors * np-array: cor correlation matrix * float: chi2 \chi-square """ # regression with kafe import kafe # create a data set ... dat = kafe.Dataset(data=(x,y), title=title, axis_labels=axis_labels, basename='kRegression') # ... and add all error sources dat.add_error_source('x','simple',sx) dat.add_error_source('y','simple',sy) if xabscor != None: dat.add_error_source('x','simple', xabscor, correlated=True) if yabscor != None: dat.add_error_source('y','simple', yabscor, correlated=True) if xrelcor != None: dat.add_error_source('x','simple', xrelcor, relative=True, correlated=True) if yrelcor != None: dat.add_error_source('y','simple', yrelcor, relative=True, correlated=True) # set up and run fit fit = kafe.Fit(dat, func) if p0 is not None: fit.set_parameters(p0, p0e) fit.do_fit(quiet=quiet) # harvest results # par, perr, cov, chi2 = fit.get_results() # for kafe vers. > 1.1.0 par = np.array(fit.final_parameter_values) pare = np.array(fit.final_parameter_errors) cor = fit.par_cov_mat/np.outer(pare, pare) chi2 = fit.minimizer.get_fit_info('fcn') if(plot): kplot=kafe.Plot(fit) kplot.plot_all() kplot.show() return par, pare, cor, chi2
# ---- fit function definition in kafe style # (decorators for nice output not mandatory) @ASCII(expression='k * ( x - xO ) + c') @LaTeX(name='f', parameter_names=('k', 'c', 'xO'), expression=r'k\,(x+\tilde{x})\,+c') @FitFunction def lin(x, k=1.0, c=0.0): return k * (x + xO) + c # ---- begin of fit --- # set the function fitf = lin # own definition #fitf=quadratic_3par # or from kafe function library # --------- begin of workflow ---------------------- # set data kdata = kafe.Dataset(data=(m, s), basename='kData', title='example data') kdata.add_error_source('y', 'simple', 1.0) kfit = kafe.Fit(kdata, fitf) # create the fit kfit.do_fit() # perform fit print(kfit) kplot = kafe.Plot(kfit) # create plot object kplot.axis_labels = ['x', 'Daten und f(x)'] kplot.plot_all() # make plots kplot.save('Blatt6_b_fit.pdf') kplot.show()
my_dataset = kafe.Dataset(data=(xdata, ydata)) my_dataset.add_error_source('x', 'simple', sigma_x) my_dataset.add_error_source('y', 'simple', sigma_y) my_dataset.write_formatted(output_file_path) ############ # Workflow # ############ # Generate the Dataset and store it in a file #generate_dataset('dataset.dat') # Initialize Dataset my_dataset = kafe.Dataset(title="Example Dataset") # Load the Dataset from the file my_dataset.read_from_file('dataset.dat') #print my_dataset.get_cov_mat(0) #print my_dataset.get_cov_mat(1) # Create the Fits my_fits = [kafe.Fit(my_dataset, exp_2par), kafe.Fit(my_dataset, linear_2par)] # Do the Fits for fit in my_fits: fit.do_fit() # Create the plots
return exp(-((x - mu)**2 / (2 * sigma**2))) * norm_factor ############ # Workflow # ############ # Define x-axis data my_x_data = linspace(-3, 3, 20) # twenty evenly-spaced points on # the x axis, from -3 to 3 # Generate y-axis data from model my_y_data = list(map(lambda x: gauss_2par(x, 0, 1), my_x_data)) # Construct the Datasets my_dataset = kafe.Dataset(data=(my_x_data, my_y_data), title="Standard-Normalverteilung") # Fit the model to the data my_fit = kafe.Fit(my_dataset, gauss_2par, fit_label='Standard-Normalverteilung') # Don't call do_fit for this Fit. # Plot the Fit my_plot = kafe.Plot(my_fit, show_legend=True) # Instruct LaTeX to use the EulerVM package (optional, uncomment if needed) #plt.rcParams.update({'text.latex.preamble': ['\\usepackage{eulervm}']}) # Draw the Plots
#distance, count without Cd, count with Cd d, wo, w = np.loadtxt(sys.path[0] + '/values.dat', unpack=True) woE = np.sqrt(wo) # poisson error for series without Cd shield wE = np.sqrt(w) dE = 1 * np.ones(d.shape) # 1mm error for d Y = np.log(d**2 * wo) YE = np.sqrt((1 / (wo) * woE)**2 + # error on count (2 * d / (d**2) * dE)**2) # error on d if len(sys.argv) <= 2: import kafe from kafe.function_library import linear_2par dataset = kafe.Dataset(data=(d, Y)) dataset.add_error_source('y', 'simple', YE) # poisson error for y dataset.add_error_source('x', 'simple', dE) # poisson error for y fit = kafe.Fit(dataset, linear_2par) fit.do_fit(quiet=True) slope = fit.final_parameter_values[0] slopeE = fit.final_parameter_errors[0] yoffset = fit.final_parameter_values[1] os.execv(__file__, [ 'test', str(slope), str(slopeE), str(yoffset), 'save' if len(sys.argv) == 2 else 'show'
my_datasets[-1].add_error_source('y', 'simple', sigma_y) my_datasets[0].write_formatted(output_file_path1) my_datasets[1].write_formatted(output_file_path2) ############ # Workflow # ############ # Generate the Dataseta and store them in files #generate_datasets('dataset1.dat', 'dataset2.dat') # Initialize the Datasets my_datasets = [ kafe.Dataset(title="Example Dataset 1"), kafe.Dataset(title="Example Dataset 2") ] # Load the Datasets from files my_datasets[0].read_from_file(input_file='dataset1.dat') my_datasets[1].read_from_file(input_file='dataset2.dat') # Create the Fits my_fits = [ kafe.Fit(dataset, linear_2par, fit_label="Linear regression " + dataset.data_label[-1]) for dataset in my_datasets ]
return U / (R0 * (1.0 + _temperature * alph)) # -- Next, read the data from an external file # load all data into numpy arrays U, I, T = np.loadtxt('OhmsLawExperiment.dat', unpack=True) # data sigU, sigI, sigT = 0.1, 0.1, 0.1 # uncertainties T0 = 273.15 # 0 degrees C as absolute Temperature (in Kelvin) # -- Finally, go through the fitting procedure # Step 1: construct a kafe dataset for the T-U data kData_T_U = kafe.Dataset(data=(U, T - T0), basename='u-t-data', title='Temperature vs. Voltage', axis_labels=['U [V]', r"T [$^\circ$C]"]) # declare errors on U and T kData_T_U.add_error_source('x', 'simple', sigU) kData_T_U.add_error_source('y', 'simple', sigT) # Step 2: construct a kafe dataset for the U-I data kData_I_U = kafe.Dataset(data=(U, I), basename='u-i-data', title='Current vs. Voltage', axis_labels=['U [V]', 'I [A]']) # declare errors on U and I kData_I_U.add_error_source('x', 'simple', sigU) kData_I_U.add_error_source('y', 'simple', sigI)
### --- read data and set errors hlines, data= readCSV('ToyData.dat') xdat = data[0] ydat = data[1] nd=len(xdat) sigx_abs = 0.2 # absolute error on x sigy_rel = 0.1 # relative error on y # errors of this kind only supported by kafe sxrelcor=0.05 # a relative, correlated error on x syabscor=0.1 # an absolute, correlated error on y ### --- perform a fit with kafe # create the kafe data set ... dat = kafe.Dataset(data=(xdat, ydat), title='ToyData', axis_labels=['X', 'Y'], basename='kRegression') # ... and add all error sources dat.add_error_source('x','simple', sigx_abs) dat.add_error_source('x','simple', sxrelcor, relative=True, correlated=True) ey = np.absolute(sigy_rel* ydat * np.ones(nd)) # array of relative y errors dat.add_error_source('y','simple', ey) dat.add_error_source('y','simple', syabscor, correlated=True) # set-up the fit ... fit = kafe.Fit(dat, model) # ... run it ... fit.do_fit(quiet=False) # ... harvest results in local variables par, par_err, cov, chi2 = fit.get_results() # for kafe vers. > 1.1.0 cor = cov/np.outer(par_err, par_err)