def fit_gaussian_experiments_variable_mean_and_std( means: np.array, stds: np.array, exps: np.array, bins: int = 50, n_sigma: int = 3) -> Iterable[List[float]]: l = len(stds) SEED = [] MU = [] STD = [] AVG = [] RMS = [] CHI2 = [] for i, mean in enumerate(means): for j, std in enumerate(stds): k = i * l + j e = exps[k] r = mean - n_sigma * std, mean + n_sigma * std bin_size = (r[1] - r[0]) / bins gp = gaussian_parameters(e, range=r, bin_size=bin_size) fc = fit_energy(e, nbins=bins, range=r, n_sigma=n_sigma) SEED.append(Measurement(mean, std)) MU.append(Measurement(fc.fr.par[1], fc.fr.err[1])) STD.append(Measurement(fc.fr.par[2], fc.fr.err[2])) AVG.append(gp.mu) RMS.append(gp.std) CHI2.append(fc.fr.chi2) return SEED, MU, STD, AVG, RMS, CHI2
def ltmap_lsqfit(X, Y, Z, E, XYbins, min_entries=20, nbins=10): """ compute the lifetime map with the unbinned fit. Returns E0, LT measureents matrices (x, y) chi2 matrix and valid boolean matrix. """ nbins = len(XYbins) - 1 e0 = np.zeros(nbins * nbins).reshape(nbins, nbins) lt = np.zeros(nbins * nbins).reshape(nbins, nbins) e0u = np.zeros(nbins * nbins).reshape(nbins, nbins) ltu = np.zeros(nbins * nbins).reshape(nbins, nbins) chi2 = np.zeros(nbins * nbins).reshape(nbins, nbins) valid = np.zeros(nbins * nbins, dtype=bool).reshape(nbins, nbins) for i in range(nbins): sel_x = in_range(X, *XYbins[i:i + 2]) for j in range(nbins): sel_y = in_range(Y, *XYbins[j:j + 2]) sel = sel_x & sel_y if np.count_nonzero(sel) < min_entries: continue try: me0, mlt, ichi2, iok = lt_lsqfit(Z[sel], E[sel], chi2=True, nbins=nbins) e0[i, j] = me0.value e0u[i, j] = me0.uncertainty lt[i, j] = mlt.value ltu[i, j] = mlt.uncertainty chi2[i, j] = ichi2 valid[i, j] = iok except: pass return Measurement(e0, e0u), Measurement(lt, ltu), chi2, valid
def fit_slices_2d_gauss(xdata, ydata, zdata, xbins, ybins, zbins, min_entries = 1e2, ignore_errors = _FIT_EXCEPTIONS): """ Slice the data in x and y, histogram each slice, fit it to a gaussian and return the relevant values. Parameters ---------- xdata, ydata, zdata: array_likes Values of each coordinate. xbins, ybins: array_likes The bins in the x and y coordinates. zbins: array_like The bins in the z coordinate for histograming the data. min_entries: int (optional) Minimum amount of entries to perform the fit. Returns ------- mean: Measurement(np.ndarray, np.ndarray) Values of mean with errors. sigma: Measurement(np.ndarray, np.ndarray) Values of sigma with errors. chi2: np.ndarray Chi2 from each fit. valid: boolean np.ndarray Where the fit has been succesfull. """ nbins_x = np.size (xbins) - 1 nbins_y = np.size (ybins) - 1 nbins = nbins_x, nbins_y mean = np.zeros(nbins) sigma = np.zeros(nbins) meanu = np.zeros(nbins) sigmau = np.zeros(nbins) chi2 = np.zeros(nbins) valid = np.zeros(nbins, dtype=bool) for i in range(nbins_x): sel_x = in_range(xdata, *xbins[i:i + 2]) for j in range(nbins_y): sel_y = in_range(ydata, *ybins[j:j + 2]) sel = sel_x & sel_y if np.count_nonzero(sel) < min_entries: continue try: f = quick_gauss_fit(zdata[sel], zbins) mean [i, j] = f.values[1] meanu [i, j] = f.errors[1] sigma [i, j] = f.values[2] sigmau[i, j] = f.errors[2] chi2 [i, j] = f.chi2 valid [i, j] = True except Exception as exc: if not isinstance(exc, ignore_errors): raise return Measurement(mean, meanu), Measurement(sigma, sigmau), chi2, valid
def fit_lifetimes_from_profile(kre: KrEvent, kR: KrRanges, kNB: KrNBins, kB: KrBins, kL: KrRanges, min_entries=1e2): nbins = kNB.XY, kNB.XY const = np.zeros(nbins) slope = np.zeros(nbins) constu = np.zeros(nbins) slopeu = np.zeros(nbins) chi2 = np.zeros(nbins) valid = np.zeros(nbins, dtype=bool) zrange = kL.Z #print(zrange) #print(kNB.Z) for i in range(kNB.XY): #print(f' i = {i}') xr = kB.XY[i:i + 2] #print(f'range x = {xr}') sel_x = in_range(kre.X, *kB.XY[i:i + 2]) xxx = np.count_nonzero([x for x in sel_x if x == True]) #print(xxx) for j in range(kNB.XY): #print(f' j = {j}') yr = kB.XY[j:j + 2] #print(f'range y = {yr}') sel_y = in_range(kre.Y, *kB.XY[j:j + 2]) xxx = np.count_nonzero([x for x in sel_y if x == True]) #print(xxx) sel = sel_x & sel_y mine = np.count_nonzero(sel) #print(f'min entries = {mine}') if mine < min_entries: continue #print('trying to fit') try: f = fit_profile_1d_expo(kre.Z[sel], kre.E[sel], kNB.Z, xrange=zrange) #print(f' f = {f}') const[i, j] = f.values[0] constu[i, j] = f.errors[0] slope[i, j] = abs(f.values[1]) slopeu[i, j] = f.errors[1] chi2[i, j] = f.chi2 valid[i, j] = True except: print('error') pass return Measurement(const, constu), Measurement(slope, slopeu), chi2, valid
def test_fits_yield_good_pulls_variable_mean_and_std(): Nevt = int(1e3) sigmas = np.random.uniform(low=1.0, high=50., size=30) means = np.random.uniform(low=100, high=1000., size=30) SEED = [] MU = [] STD = [] AVG = [] RMS = [] CHI2 = [] n_sigma = 3 for sigma in sigmas: for mean in means: SEED.append(Measurement(mean, sigma)) e = np.random.normal(mean, sigma, Nevt) r = mean - n_sigma * sigma, mean + n_sigma * sigma bin_size = (r[1] - r[0]) / 50 gp = gaussian_parameters(e, range=r, bin_size=bin_size) fc = fit_energy(e, nbins=50, range=r, n_sigma=n_sigma) MU.append(Measurement(fc.fr.par[1], fc.fr.err[1])) STD.append(Measurement(fc.fr.par[2], fc.fr.err[2])) AVG.append(gp.mu) RMS.append(gp.std) CHI2.append(fc.fr.chi2) mean = np.array([x.value for x in SEED]) sigma = np.array([x.uncertainty for x in SEED]) avg = np.array([x.value for x in AVG]) avg_u = np.array([x.uncertainty for x in AVG]) rms = np.array([x.value for x in RMS]) rms_u = np.array([x.uncertainty for x in RMS]) mu = np.array([x.value for x in MU]) mu_u = np.array([x.uncertainty for x in MU]) std = np.array([x.value for x in STD]) std_u = np.array([x.uncertainty for x in STD]) p_mu, p_std = mean_and_std((mean - mu) / mu_u, range_=(-10, 10)) print(f'(mean-mu) / mu_u -> {p_mu}, {p_std}') assert p_mu == approx(0, abs=0.2) assert p_std == approx(1, abs=0.3) p_mu, p_std = mean_and_std((mean - avg) / avg_u, range_=(-10, 10)) print(f'(mean-avg) / avg_u -> {p_mu}, {p_std}') assert p_mu == approx(0, abs=0.2) assert p_std == approx(1, abs=0.3) p_mu, p_std = mean_and_std((sigma - std) / std_u, range_=(-10, 10)) print(f'(sigma-std) / std_u -> {p_mu}, {p_std}') assert p_mu < 1.5 assert p_std == approx(1, abs=0.3) p_mu, p_std = mean_and_std((sigma - rms) / rms_u, range_=(-10, 10)) print(f'(sigma-rms) / rms_u -> {p_mu}, {p_std}') assert p_mu < 1 assert p_std == approx(1, abs=0.3)
def s1_means_and_vars(dst): hr = divide_np_arrays(dst.S1h.values, dst.S1e.values) return S1D( E=Measurement( *weighted_mean_and_std(dst.S1e.values, np.ones(len(dst)))), W=Measurement(*weighted_mean_and_std(dst.S1w, np.ones(len(dst)))), H=Measurement(*weighted_mean_and_std(dst.S1h, np.ones(len(dst)))), R=Measurement(*weighted_mean_and_std(hr, np.ones(len(dst)))), T=Measurement(*weighted_mean_and_std(dst.S1t, np.ones(len(dst)))))
def s1d_from_dst( dst: DataFrame, range_s1e: Tuple[float, float] = (0, 40), range_s1w: Tuple[float, float] = (0, 500), range_s1h: Tuple[float, float] = (0, 10), range_s1t: Tuple[float, float] = (0, 600) ) -> S1D: hr = divide_np_arrays(dst.S1h.values, dst.S1e.values) return S1D(E=Measurement(*mean_and_std(dst.S1e.values, range_s1e)), W=Measurement(*mean_and_std(dst.S1w.values, range_s1w)), H=Measurement(*mean_and_std(dst.S1h.values, range_s1h)), R=Measurement(*mean_and_std(hr, (0, 1))), T=Measurement(*mean_and_std(dst.S1t.values, range_s1t)))
def fit_slices_1d_gauss(xdata, ydata, xbins, ybins, min_entries=1e2): """ Slice the data in x, histogram each slice, fit it to a gaussian and return the relevant values. Parameters ---------- xdata, ydata: array_likes Values of each coordinate. xbins: array_like The bins in the x coordinate. ybins: array_like The bins in the y coordinate for histograming the data. min_entries: int (optional) Minimum amount of entries to perform the fit. Returns ------- mean: Measurement(np.ndarray, np.ndarray) Values of mean with errors. sigma: Measurement(np.ndarray, np.ndarray) Values of sigma with errors. chi2: np.ndarray Chi2 from each fit. valid: boolean np.ndarray Where the fit has been succesfull. """ nbins = np.size(xbins) - 1 mean = np.zeros(nbins) sigma = np.zeros(nbins) meanu = np.zeros(nbins) sigmau = np.zeros(nbins) chi2 = np.zeros(nbins) valid = np.zeros(nbins, dtype=bool) for i in range(nbins): sel = in_range(xdata, *xbins[i:i + 2]) if np.count_nonzero(sel) < min_entries: continue try: f = quick_gauss_fit(ydata[sel], ybins) mean[i] = f.values[1] meanu[i] = f.errors[1] sigma[i] = f.values[2] sigmau[i] = f.errors[2] chi2[i] = f.chi2 valid[i] = True except: pass return Measurement(mean, meanu), Measurement(sigma, sigmau), chi2, valid
def xymap_compare(xymap1, xymap0, mask1, mask0, type='difference', default=0.): """ compare two (x, y) maps, xymap1, xmap0 are the arrays (x, y), mask1, mask1 are the array mask Returns ans array with the differencei (or pull, 100*ratio) and a mask bool array. """ # types: pull, ratio v0, uv0, ok0 = xymap0.value, xymap0.uncertainty, mask0 v1, uv1, ok1 = xymap1.value, xymap1.uncertainty, mask1 ok = np.logical_and(ok0, ok1) nbins, nbins = v0.shape d = default * np.ones(nbins * nbins).reshape(nbins, nbins) err = np.zeros(nbins * nbins).reshape(nbins, nbins) err = np.sqrt(uv0 * uv0 + uv1 * uv1) ok = np.logical_and(ok, err > 0.) d[ok] = v1[ok] - v0[ok] err[~ok] = default if (type == 'ratio'): sel = v0 > 0 d[sel] = 100. * d[sel] / v0[sel] err[sel] = 100. * err[sel] / v0[sel] if (type == 'pull'): sel = err > 0. d[sel] = d[sel] / err[sel] dmap = Measurement(d, err) return dmap, ok
def pars_from_fcs( fcs: List[FitCollection] ) -> Tuple[List[Measurement], List[Measurement], np.array]: E = [] LT = [] C2 = [] for fc in fcs: if fc.fr.valid: par = fc.fr.par err = fc.fr.err E.append(Measurement(par[0], err[0])) LT.append(Measurement(par[1], err[1])) C2.append(fc.fr.chi2) else: warnings.warn(f' fit did not succeed, returning NaN ', UserWarning) E.append(Measurement(NN, NN)) LT.append(Measurement(NN, NN)) C2.append(NN) return E, LT, np.array(C2)
def s2d_from_dst(dst: DataFrame) -> S2D: return S2D(E=Measurement(*mean_and_std(dst.S2e.values, (0, 20000))), W=Measurement(*mean_and_std(dst.S2w.values, (0, 30))), Q=Measurement(*mean_and_std(dst.S2q.values, (0, 1000))), N=Measurement(*mean_and_std(dst.Nsipm.values, (0, 40))), X=Measurement(*mean_and_std(dst.X.values, (-200, 200))), Y=Measurement(*mean_and_std(dst.Y.values, (-200, 200))))
def gaussian_parameters(x : np.array, range : Tuple[Number], bin_size : float = 1)->GaussPar: """ Return the parameters defining a Gaussian g = N * exp(x - mu)**2 / (2 * std**2) where N is the normalization: N = 1 / (sqrt(2 * np.pi) * std) The parameters returned are the mean (mu), standard deviation (std) and the amplitude (inverse of N). """ mu, std = mean_and_std(x, range) ff = np.sqrt(2 * np.pi) * std amp = len(x) * bin_size / ff sel = in_range(x, *range) N = len(x[sel]) # number of samples in range mu_u = std / np.sqrt(N) std_u = std / np.sqrt(2 * (N -1)) amp_u = np.sqrt(2 * np.pi) * std_u return GaussPar(mu = Measurement(mu, mu_u), std = Measurement(std, std_u), amp = Measurement(amp, amp_u))
def ltmap(X, Y, Z, E, XYbins, Znbins, Zrange): """ returns E0 and LT from the lifetime fit """ Escale, ELT,\ Echi2, Eok = fit_slices_2d_expo(X, Y, Z, E, XYbins, XYbins, Znbins, zrange=Zrange, min_entries=50) Eok = Eok & (ELT.value < -100) & (ELT.value > -1e5) & np.isfinite(Echi2) Escale_rel = to_relative(Escale, percentual=True) ELT_rel = to_relative(ELT, percentual=True) # xs, ys = 0.5*(XYbins[:-1]+XYbins[1:]), 0.5*(XYbins[:-1]+XYbins[1:]) # e0map = XYMap(xs, ys, Escale_rel.value, Escale_rel.uncertainty, Eok) # ltmap = XYMap(xs, ys, -ELT_rel.value , ELT_rel.uncertainty , Eok) # return e0map, ltmap ELT = Measurement(-1. * ELT.value, ELT.uncertainty) return Escale, ELT, Echi2, Eok
def lt_lsqfit(Z, E, chi2=True, nbins=12): """ unbinned fit to the lifetime, return e0, lt best estimate, chi2, and valid (bool) flag if chi2 is False, return 0 for chi2, nbins is the number of bins to compute the chi2 (default 12) """ ok = True e0, lt, e0u, ltu, xchi2 = 0, 0, 0, 0, 0 DE = -np.log(E) try: cc, cov = np.polyfit(Z, DE, 1, full=False, cov=True) #print(cov) a, b = cc[0], cc[1] lt = 1 / a ltu = lt * lt * np.sqrt(cov[0, 0]) e0 = np.exp(-b) e0u = e0 * np.sqrt(cov[1, 1]) except: ok = False pass # print('lifetime :', lt, '+-', ult) # print('e0 :', e0, '+-', ue0) me0, mlt = Measurement(e0, e0u), Measurement(lt, ltu) if (not chi2 or not ok): return me0, mlt, xchi2, ok xs, ys, uys = fitf.profileX(Z, DE, nbins) c*k = ~np.isnan(uys) try: res = (a * xs[c*k] + b - ys[c*k]) / uys[c*k] # print(res, uys) xchi2 = np.sum(res * res) / (1. * len(xs) - 1) except: ok = False pass #print(me0, mlt, chi2) return me0, mlt, xchi2, ok
def s2_means_and_vars(dst): return S2D( E=Measurement( *weighted_mean_and_std(dst.S2e.values, np.ones(len(dst)))), W=Measurement(*weighted_mean_and_std(dst.S2w, np.ones(len(dst)))), Q=Measurement(*weighted_mean_and_std(dst.S2q, np.ones(len(dst)))), N=Measurement(*weighted_mean_and_std(dst.Nsipm, np.ones(len(dst)))), X=Measurement(*weighted_mean_and_std(dst.X, np.ones(len(dst)))), Y=Measurement(*weighted_mean_and_std(dst.Y, np.ones(len(dst)))))
def h1n(n, nx, ny, names, h1ds, bins, ranges, xlabels, ylabels, titles=None, legends=None, figsize=(10, 10)): fig = plt.figure(figsize=figsize) stats = {} for i in range(n): ax = fig.add_subplot(nx, ny, i + 1) x = h1ds[i] r = ranges[i] x1 = loc_elem_1d(x, find_nearest(x, r[0])) x2 = loc_elem_1d(x, find_nearest(x, r[1])) xmin = min(x1, x2) xmax = max(x1, x2) x2 = x[xmin:xmax] o = np.ones(len(x2)) mu, std = weighted_mean_and_std(x2, o) stats[names[i]] = Measurement(mu, std) ax.set_xlabel(xlabels[i], fontsize=11) ax.set_ylabel(ylabels[i], fontsize=11) ax.hist(x, bins=bins[i], range=r, histtype='step', edgecolor='black', linewidth=1.5, label=r'$\mu={:7.2f},\ \sigma={:7.2f}$'.format(mu, std)) ax.legend(fontsize=10, loc=legends[i]) plt.grid(True) if titles: plt.title(titles[i]) plt.tight_layout() return stats
def fit_slices_2d_expo(kre: KrEvent, krnb: KrNBins, krb: KrBins, krr: KrRanges, fit_var="E", min_entries=1e2) -> KrLTSlices: """ Slice the data in x and y, make the profile in z of E, fit it to a exponential and return the relevant values. """ xbins = krb.XY ybins = krb.XY nbins_x = np.size(xbins) - 1 nbins_y = np.size(ybins) - 1 nbins_z = krnb.Z nbins = nbins_x, nbins_y const = np.zeros(nbins) slope = np.zeros(nbins) constu = np.zeros(nbins) slopeu = np.zeros(nbins) chi2 = np.zeros(nbins) valid = np.zeros(nbins, dtype=bool) zrange = krr.Z for i in range(nbins_x): sel_x = in_range(kre.X, *xbins[i:i + 2]) for j in range(nbins_y): sel_y = in_range(kre.Y, *ybins[j:j + 2]) sel = sel_x & sel_y if np.count_nonzero(sel) < min_entries: print( f'entries ={entries} not enough to fit bin (i,j) =({i},{j})' ) valid[i, j] = False continue try: z = kre.Z[sel] t = kre.E[sel] if fit_var == "Q": t = kre.Q[sel] f = fit_profile_1d_expo(z, t, nbins_z, xrange=zrange) re = np.abs(f.errors[1] / f.values[1]) if re > 0.5: print( f'Relative error to large, re ={re} for bin (i,j) =({i},{j})' ) valid[i, j] = False const[i, j] = f.values[0] constu[i, j] = f.errors[0] slope[i, j] = -f.values[1] slopeu[i, j] = f.errors[1] chi2[i, j] = f.chi2 valid[i, j] = True except: pass return KrLTSlices(Ez0=Measurement(const, constu), LT=Measurement(slope, slopeu), chi2=chi2, valid=valid)
def histo_fit_fb_pars( fp: FitParTS, fpf: Optional[FitParTS] = None, fpb: Optional[FitParTS] = None, range_chi2: Tuple[float, float] = (0, 3), range_e0: Tuple[float, float] = (10000, 12500), range_lt: Tuple[float, float] = (2000, 3000) ) -> FitParFB: fig = plt.figure(figsize=(14, 6)) ax = fig.add_subplot(1, 3, 1) _, _, c2_mu, c2_std = h1(fp.c2, bins=20, range=range_chi2, color='black', stats=True) plot_histo(PlotLabels('chi2', 'Entries', ''), ax) if fpf: _, _, c2f_mu, c2f_std = h1(fpf.c2, bins=20, range=range_chi2, color='red', stats=True) plot_histo(PlotLabels('chi2', 'Entries', ''), ax) if fpb: _, _, c2b_mu, c2b_std = h1(fpb.c2, bins=20, range=range_chi2, color='blue', stats=True) plot_histo(PlotLabels('chi2', 'Entries', ''), ax) ax = fig.add_subplot(1, 3, 2) _, _, e0_mu, e0_std = h1(fp.e0, bins=20, range=range_e0, color='black', stats=True) plot_histo(PlotLabels('E0', 'Entries', ''), ax) if fpf: _, _, e0f_mu, e0f_std = h1(fpf.e0, bins=20, range=range_e0, color='red', stats=True) plot_histo(PlotLabels('E0', 'Entries', ''), ax) if fpb: _, _, e0b_mu, e0b_std = h1(fpb.e0, bins=20, range=range_e0, color='blue', stats=True) plot_histo(PlotLabels('E0', 'Entries', ''), ax) ax = fig.add_subplot(1, 3, 3) _, _, lt_mu, lt_std = h1(fp.lt, bins=20, range=range_lt, color='black', stats=True) plot_histo(PlotLabels('LT', 'Entries', ''), ax) if fpf: _, _, ltf_mu, ltf_std = h1(fpf.lt, bins=20, range=range_lt, color='red', stats=True) plot_histo(PlotLabels('LT', 'Entries', ''), ax) if fpb: _, _, ltb_mu, ltb_std = h1(fpb.lt, bins=20, range=range_lt, color='blue', stats=True) plot_histo(PlotLabels('LT', 'Entries', ''), ax) plt.tight_layout() return FitParFB(Measurement(c2_mu, c2_std), Measurement(c2f_mu, c2f_std), Measurement(c2b_mu, c2b_std), Measurement(e0_mu, e0_std), Measurement(e0f_mu, e0f_std), Measurement(e0b_mu, e0b_std), Measurement(lt_mu, lt_std), Measurement(ltf_mu, ltf_std), Measurement(ltb_mu, ltb_std))
def to_relative(data, *args, **kwargs): """ Produce another Measurement instance with relative instead of the absolute ones. """ return Measurement(data.value, relative_errors(*data, *args, **kwargs))
def fit_lifetime_slices(kre: KrEvent, krnb: KrNBins, krb: KrBins, krr: KrRanges, fit_var="E", min_entries=1e2) -> KrLTSlices: """ Slice the data in x and y, make the profile in z of E, fit it to a exponential and return the relevant values. """ xybins = krb.XY nbins_xy = np.size(xybins) - 1 nbins_z = krnb.Z nbins = nbins_xy, nbins_xy const = np.zeros(nbins) slope = np.zeros(nbins) constu = np.zeros(nbins) slopeu = np.zeros(nbins) chi2 = np.zeros(nbins) valid = np.zeros(nbins, dtype=bool) zrange = krr.Z for i in range(nbins_xy): sel_x = in_range(kre.X, *xybins[i:i + 2]) for j in range(nbins_xy): #print(f' bin =({i},{j}); index = {index}') sel_y = in_range(kre.Y, *xybins[j:j + 2]) sel = sel_x & sel_y entries = np.count_nonzero(sel) if entries < min_entries: #print(f'entries ={entries} not enough to fit bin (i,j) =({i},{j})') valid[i, j] = False continue try: z = kre.Z[sel] t = kre.E[sel] if fit_var == "Q": t = kre.Q[sel] x, y, yu = fitf.profileX(z, t, nbins_z, zrange) seed = expo_seed(x, y) f = fitf.fit(fitf.expo, x, y, seed, sigma=yu) re = np.abs(f.errors[1] / f.values[1]) #print(f' E +- Eu = {f.values[0]} +- {f.errors[0]}') #print(f' LT +- LTu = {-f.values[1]} +- {f.errors[1]}') #print(f' LTu/LT = {re} chi2 = {f.chi2}') const[i, j] = f.values[0] constu[i, j] = f.errors[0] slope[i, j] = -f.values[1] slopeu[i, j] = f.errors[1] chi2[i, j] = f.chi2 valid[i, j] = True if re > 0.5: # print(f'Relative error to large, re ={re} for bin (i,j) =({i},{j})') # print(f' LT +- LTu = {-f.values[1]} +- {f.errors[1]}') # print(f' LTu/LT = {re} chi2 = {f.chi2}') valid[i, j] = False except: print(f'fit failed for bin (i,j) =({i},{j})') pass return KrLTSlices(Es=Measurement(const, constu), LT=Measurement(slope, slopeu), chi2=chi2, valid=valid)
def fit_slices_2d_expo(xdata, ydata, zdata, tdata, xbins, ybins, nbins_z, zrange=None, min_entries = 1e2, ignore_errors = _FIT_EXCEPTIONS): """ Slice the data in x and y, make the profile in z of t, fit it to a exponential and return the relevant values. Parameters ---------- xdata, ydata, zdata, tdata: array_likes Values of each coordinate. xbins, ybins: array_like The bins in the x coordinate. nbins_z: int The number of bins in the z coordinate for the profile. zrange: length-2 tuple (optional) Fix the range in z. Default is computed from min and max of the input data. min_entries: int (optional) Minimum amount of entries to perform the fit. Returns ------- const: Measurement(np.ndarray, np.ndarray) Values of const with errors. slope: Measurement(np.ndarray, np.ndarray) Values of slope with errors. chi2: np.ndarray Chi2 from each fit. valid: boolean np.ndarray Where the fit has been succesfull. """ nbins_x = np.size (xbins) - 1 nbins_y = np.size (ybins) - 1 nbins = nbins_x, nbins_y const = np.zeros(nbins) slope = np.zeros(nbins) constu = np.zeros(nbins) slopeu = np.zeros(nbins) chi2 = np.zeros(nbins) valid = np.zeros(nbins, dtype=bool) if zrange is None: zrange = np.min(zdata), np.max(zdata) for i in range(nbins_x): sel_x = in_range(xdata, *xbins[i:i + 2]) for j in range(nbins_y): sel_y = in_range(ydata, *ybins[j:j + 2]) sel = sel_x & sel_y if np.count_nonzero(sel) < min_entries: continue try: f = fit_profile_1d_expo(zdata[sel], tdata[sel], nbins_z, xrange=zrange) const [i, j] = f.values[0] constu[i, j] = f.errors[0] slope [i, j] = f.values[1] slopeu[i, j] = f.errors[1] chi2 [i, j] = f.chi2 valid [i, j] = True except Exception as exc: if not isinstance(exc, ignore_errors): raise return Measurement(const, constu), Measurement(slope, slopeu), chi2, valid
def fwhm(fpar, ferr): r = 2.355 * 100. * fpar[2] / fpar[1] ru = r * np.sqrt(ferr[1] / fpar[1]**2 + ferr[2] / fpar[2]**2) return Measurement(r, ru)
def fit_and_plot_slices_2d_expo( kre: KrEvent, krnb: KrNBins, krb: KrBins, krr: KrRanges, fit_var="E", min_entries=1e2, figsize=(12, 12)) -> KrLTSlices: """ Slice the data in x and y, make the profile in z of E, fit it to a exponential and return the relevant values. """ xybins = krb.XY nbins_xy = np.size(xybins) - 1 nbins_z = krnb.Z nbins = nbins_xy, nbins_xy const = np.zeros(nbins) slope = np.zeros(nbins) constu = np.zeros(nbins) slopeu = np.zeros(nbins) chi2 = np.zeros(nbins) valid = np.zeros(nbins, dtype=bool) zrange = krr.Z fig = plt.figure(figsize=figsize) # Creates a new figure k = 0 index = 0 for i in range(nbins_xy): sel_x = in_range(kre.X, *xybins[i:i + 2]) for j in range(nbins_xy): index += 1 #print(f' bin =({i},{j}); index = {index}') if k % 25 == 0: k = 0 fig = plt.figure(figsize=figsize) ax = fig.add_subplot(5, 5, k + 1) k += 1 sel_y = in_range(kre.Y, *xybins[j:j + 2]) sel = sel_x & sel_y entries = np.count_nonzero(sel) if entries < min_entries: print( f'entries ={entries} not enough to fit bin (i,j) =({i},{j})' ) valid[i, j] = False continue try: z = kre.Z[sel] t = kre.E[sel] if fit_var == "Q": t = kre.Q[sel] x, y, yu = fitf.profileX(z, t, nbins_z, zrange) ax.errorbar(x, y, yu, np.diff(x)[0] / 2, fmt="kp", ms=7, lw=3) seed = expo_seed(x, y) f = fitf.fit(fitf.expo, x, y, seed, sigma=yu) plt.plot(x, f.fn(x), "r-", lw=4) plt.grid(True) re = np.abs(f.errors[1] / f.values[1]) #print(f' E +- Eu = {f.values[0]} +- {f.errors[0]}') #print(f' LT +- LTu = {-f.values[1]} +- {f.errors[1]}') #print(f' LTu/LT = {re} chi2 = {f.chi2}') if re > 0.5: print( f'Relative error to large, re ={re} for bin (i,j) =({i},{j})' ) print(f' LT +- LTu = {-f.values[1]} +- {f.errors[1]}') print(f' LTu/LT = {re} chi2 = {f.chi2}') valid[i, j] = False const[i, j] = f.values[0] constu[i, j] = f.errors[0] slope[i, j] = -f.values[1] slopeu[i, j] = f.errors[1] chi2[i, j] = f.chi2 valid[i, j] = True except: print(f'fit failed for bin (i,j) =({i},{j})') pass plt.tight_layout() return KrLTSlices(Ez0=Measurement(const, constu), LT=Measurement(slope, slopeu), chi2=chi2, valid=valid)