def test_fitter(): f = Fitter([1, 1, 1, 2, 2, 2, 2, 2, 3, 3, 3, 3], distributions=['gamma'], xmin=0, xmax=4) try: f.plot_pdf() except: pass f.fit() f.summary() assert f.xmin == 0 assert f.xmax == 4 # reset the range: f.xmin = None f.xmax = None assert f.xmin == 1 assert f.xmax == 3 f = Fitter([1, 1, 1, 2, 2, 2, 2, 2, 3, 3, 3, 3], distributions=['gamma']) f.fit() f.summary() assert f.xmin == 1 assert f.xmax == 3
def makeCSGraphUnderground(ctype, startGammaEE=None): data = loadCrossSection(ctype) c = TCanvas('c_%s' % ctype, '', 1280, 720) g = data.makeGraph('g_%s' % ctype, '#sqrt{s} / GeV', '#sigma / nb') g.Draw('AP') if not startGammaEE: fit = Fitter( 'fit_%s' % ctype, '[0] + [1] * x + 12*pi / [2]^2 * x^2 * [3]^2 / ((x^2-[2]^2)^2 + x^4 * [4]^2 / [2]^2) * 0.3894e6' ) fitfuncstring = "a + b#sqrt{s} + #frac{12#pi}{M_{Z}^{2}} #frac{s #Gamma_{%s}^{2}}{(s-M_{Z}^{2})^{2} + s^{2} #Gamma_{Z}^{2} / M_{Z}^{2}}" % ctype[ 0] else: fit = Fitter( 'fit_%s' % ctype, '[0] + [1] * x + 12*pi / [2]^2 * x^2 * [3] * [5] / ((x^2-[2]^2)^2 + x^4 * [4]^2 / [2]^2) * 0.3894e6' ) fitfuncstring = "a + b#sqrt{s} + #frac{12#pi}{M_{Z}^{2}} #frac{s #Gamma_{e} #Gamma_{%s}}{(s-M_{Z}^{2})^{2} + s^{2} #Gamma_{Z}^{2} / M_{Z}^{2}}" % ctype[ 0] fit.setParam(0, 'a', 0) fit.setParam(1, 'b', 0) fit.setParam(2, 'M_{Z}', 91.2) fit.setParam(4, '#Gamma_{Z}', 2.5) if not startGammaEE: fit.setParam(3, '#Gamma_{%s}' % ctype[0], 0.08) else: fit.setParam(3, '#Gamma_{e}', startGammaEE, True) fit.setParam(5, '#Gamma_{%s}' % ctype[0], 2) fit.fit(g, 88, 94) fit.saveData('../fit/crosssections_%s.txt' % ctype) l = TLegend(0.625, 0.575, 0.98, 0.98) l.SetTextSize(0.03) l.AddEntry(g, "%s Wirkungsquerschnitte" % ctype, 'p') l.AddEntry(fit.function, "Fit mit #sigma(s) = ", 'l') l.AddEntry(None, "", '') l.AddEntry(None, fitfuncstring, '') l.AddEntry(None, "", '') if not startGammaEE: fit.addParamsToLegend( l, [('%.3f', '%.3f'), ('%.3f', '%.3f'), ('%.3f', '%.3f'), ('%.3f', '%.3f'), ('%.3f', '%.3f')], chisquareformat='%f', units=['nb', 'nb/GeV', 'GeV / c^{2}', 'GeV', 'GeV']) else: fit.addParamsToLegend( l, [('%.3f', '%.3f'), ('%.3f', '%.3f'), ('%.3f', '%.3f'), '%.4f', ('%.3f', '%.3f'), ('%.3f', '%.3f')], chisquareformat='%f', units=['nb', 'nb/GeV', 'GeV/c^{2}', 'GeV', 'GeV', 'GeV']) l.Draw() c.Update() c.Print('../img/crosssections_%s.pdf' % ctype, 'pdf') return fit.params[3]['value']
def test_others(): from scipy import stats data = stats.gamma.rvs(2, loc=1.5, scale=2, size=1000) f = Fitter(data, bins=100, distributions="common") f.fit() assert f.df_errors.loc["gamma"].loc['aic'] > 100 f = Fitter(data, bins=100, distributions="gamma") f.fit() assert f.df_errors.loc["gamma"].loc['aic'] > 100
def fit_powerlaw_distribution(data): f = Fitter(data) f.distributions = ['powerlaw'] f.fit() k = f.get_best() alpha = list(k.values())[0][0] return alpha
def best_fit_all_continuous(self, data): # Distributions to check all_cd = [ st.alpha, st.anglit, st.arcsine, st.beta, st.betaprime, st.bradford, st.burr, st.cauchy, st.chi, st.chi2, st.cosine, st.dgamma, st.dweibull, st.erlang, st.expon, st.exponnorm, st.exponweib, st.exponpow, st.f, st.fatiguelife, st.fisk, st.foldcauchy, st.foldnorm, st.frechet_r, st.frechet_l, st.genlogistic, st.genpareto, st.gennorm, st.genexpon, st.genextreme, st.gausshyper, st.gamma, st.gengamma, st.genhalflogistic, st.gilbrat, st.gompertz, st.gumbel_r, st.gumbel_l, st.halfcauchy, st.halflogistic, st.halfnorm, st.halfgennorm, st.hypsecant, st.invgamma, st.invgauss, st.invweibull, st.johnsonsb, st.johnsonsu, st.ksone, st.kstwobign, st.laplace, st.levy, st.levy_l, st.levy_stable, st.logistic, st.loggamma, st.loglaplace, st.lognorm, st.lomax, st.maxwell, st.mielke, st.nakagami, st.ncx2, st.ncf, st.nct, st.norm, st.pareto, st.pearson3, st.powerlaw, st.powerlognorm, st.powernorm, st.rdist, st.reciprocal, st.rayleigh, st.rice, st.recipinvgauss, st.semicircular, st.t, st.triang, st.truncexpon, st.truncnorm, st.tukeylambda, st.uniform, st.vonmises, st.vonmises_line, st.wald, st.weibull_min, st.weibull_max, st.wrapcauchy ] with warnings.catch_warnings(): warnings.filterwarnings('ignore') dists = [x.name for x in all_cd] f = Fitter(data, distributions=dists) f.fit() f.summary()
def evalPedestal(): name = 'pedestal' data = MyonData.fromPath('../data/%s.TKA' % name) data.convertToCountrate() c = TCanvas('c_ped', '', 1280, 720) g = data.makeGraph('g_ped', 'channel c', 'countrate n / (1/s)') g.SetLineColor(1) g.SetLineWidth(1) g.GetXaxis().SetRangeUser(0, 20) g.Draw('APX') fit = Fitter('fit_%s' % name, 'gaus(0)') fit.setParam(0, 'A', 30) fit.setParam(1, 'x', 6) fit.setParam(2, '#sigma', 3) fit.setParamLimits(2, 0, 100) fit.fit(g, 3.5, 10.5) fit.saveData('../fit/%s.txt' % name) l = TLegend(0.55, 0.6, 0.85, 0.85) l.SetTextSize(0.03) l.AddEntry(g, 'pedestal', 'p') l.AddEntry(fit.function, 'fit with n(c) = A gaus(c; x, #sigma)', 'l') fit.addParamsToLegend(l, (('%.2f', '%.2f'), ('%.3f', '%.3f'), ('%.3f', '%.3f')), chisquareformat='%.2f', units=('1/s', '', ''), lang='en') l.Draw() g.Draw('P') c.Update() c.Print('../img/%s.pdf' % name, 'pdf') return (fit.params[1]['value'], fit.params[1]['error'])
def fitting_marginals(inp_data): # inp_data: Input is a dataframe # Returns: fits = [] for i in xrange(inp_data.shape[1]): f = Fitter(inp_data.iloc[:, i]) f.fit() # choose best distribution and best parameter fit j = 0 found = False while (found == False): cand_dist = f.df_errors.sort_values('sumsquare_error').iloc[j].name if cand_dist in f.fitted_param.keys(): best_dist = cand_dist best_params = f.fitted_param[cand_dist] found = True j += 1 fits.append((best_dist, best_params)) f.summary() # generate scipy rv objects for marginals marginal_fits = [ eval('scipy.stats.' + fits[i][0])(*fits[i][1]) for i in xrange(len(fits)) ] return marginal_fits
def main(): z, sz = 840, 40 d = [210, 106, 75] sd = [10, 4, 4] calc = list(map(lambda x: -20 * log10(x / z), d)) scalc = list( map(lambda x: 20 * sqrt((x[1] / x[0])**2 + (sz / z)**2) / log(10), zip(*[d, sd]))) data = DataErrors.fromLists(calc, [12, 18, 21], scalc, [0] * 3) c = TCanvas('c', '', 1280, 720) g = data.makeGraph('g', 'measured attenuation m / dB', 'nominal value n / dB') g.Draw('AP') fit = Fitter('fit', 'pol1(0)') fit.setParam(0, 'a', 0) fit.setParam(1, 'b', 1) fit.fit(g, 11, 22) fit.saveData('../fit/attenuator.txt') l = TLegend(0.15, 0.6, 0.5, 0.85) l.SetTextSize(0.03) l.AddEntry(g, 'measured att. vs. nominal value', 'p') l.AddEntry(fit.function, 'fit with n = a + b m', 'l') fit.addParamsToLegend(l, (('%.2f', '%.2f'), ('%.2f', '%.2f')), chisquareformat='%.4f', units=['dB', ''], lang='en') l.Draw() c.Update() c.Print('../img/attenuator.pdf', 'pdf')
def fitTransmissionSignal(name): data = OPData.fromPath(DIR + name + '.tab', 2) c = TCanvas('c', '', 1280, 720) g = data.makeGraph('g_%s' % name, 'Zeit t / s', 'Spannung der Photodiode U_{ph} / V') prepareGraph(g, 2) g.GetXaxis().SetRangeUser(0.004, 0.019) g.Draw('APX') xmin, xmax = 0.0054, 0.015 fit = Fitter('fit_%s' % name[-2:], '[0] - [1] * exp(-x/[2])') fit.setParam(0, 'a', 0.01) fit.setParam(1, 'b', 100) fit.setParam(2, '#tau', 0.001) fit.fit(g, xmin, xmax, 'M') fit.saveData('../fit/part5/%s.txt' % name) g.Draw('P') l = TLegend(0.35, 0.2, 0.65, 0.525) l.SetTextSize(0.03) l.AddEntry(g, 'Spannung der Photodiode', 'p') l.AddEntry(fit.function, 'Fit mit U_{ph}(t) = a - b e^{-t/#tau}', 'l') fit.addParamsToLegend(l, [('%.6f', '%.6f'), ('%.2f', '%.2f'), ('%.6f', '%.6f')], chisquareformat='%.2f', units=['V', 'V', 's']) l.Draw() c.Update() c.Print('../img/part5/%s.pdf' % name.replace('.', '-'), 'pdf') return fit.params[2]['value'], fit.params[2]['error']
def makeBFit(darkTimes, Bs): dt, sdt = list(zip(*darkTimes)) b, sb = list(zip(*Bs)) data = DataErrors.fromLists(dt, b, sdt, sb) c = TCanvas('c_B', '', 1280, 720) g = data.makeGraph('g_B', 'Dunkelzeit t_{D} / ms', 'Fitparameter B / V') g.Draw('APX') fit = Fitter('fit_B', '[0] + [1] * (1 - exp(-x/[2]))') fit.function.SetNpx(1000) fit.setParam(0, 'a', 0.1) fit.setParam(1, 'b', 0.1) fit.setParam(2, 'T_{R_{F}}', 6) fit.fit(g, 0, 25) fit.saveData('../fit/B.txt') l = TLegend(0.55, 0.15, 0.85, 0.6) l.SetTextSize(0.03) l.AddEntry(g, 'Fitparameter B', 'p') l.AddEntry(fit.function, 'Fit mit B(t_{D}) = a + b (1 - e^{-x/T_{R_{F}}})', 'l') fit.addParamsToLegend(l, (('%.3f', '%.3f'), ('%.3f', '%.3f'), ('%.1f', '%.1f')), chisquareformat='%.2f', units=['V', 'V', 'ms']) l.Draw() g.Draw('P') c.Print('../img/part6/BFit.pdf', 'pdf')
def compareSpectrum(prefix, spectrum, litvals): xlist = list(zip(*spectrum))[0] sxlist = list(zip(*spectrum))[1] compData = DataErrors.fromLists(xlist, litvals, sxlist, [0] * len(litvals)) c = TCanvas('c_%s_compspectrum' % prefix, '', 1280, 720) g = compData.makeGraph('g_%s_compspectrum' % prefix, 'experimentell bestimmte HFS-Aufspaltung #Delta#nu^{exp}_{%s} / GHz' % prefix, 'theoretische HFS-Aufspaltung #Delta#nu^{theo} / GHz') g.Draw('AP') fit = Fitter('fit_%s_compspectum' % prefix, 'pol1(0)') fit.setParam(0, 'a_{%s}' % prefix, 0) fit.setParam(1, 'b_{%s}' % prefix, 1) fit.fit(g, compData.getMinX() - 0.5, compData.getMaxX() + 0.5) if prefix == "up": l = TLegend(0.15, 0.6, 0.45, 0.85) else: l = TLegend(0.15, 0.6, 0.5, 0.85) l.SetTextSize(0.03) l.AddEntry(g, 'Spektrum', 'p') l.AddEntry(fit.function, 'Fit mit #Delta#nu^{theo} = a_{%s} + b_{%s} #Delta#nu^{exp}_{%s}' % (prefix, prefix, prefix), 'l') fit.addParamsToLegend(l, [('%.2f', '%.2f'), ('%.3f', '%.3f')], chisquareformat='%.2f', units=['GHz', '']) l.Draw() c.Update() if not DEBUG: c.Print('../img/part2/%s-spectrum.pdf' % prefix, 'pdf')
def fitdist(**kwargs): """""" import csv col = kwargs['column_number'] with open(kwargs["filename"], "r") as csvfile: data = csv.reader(csvfile, delimiter=kwargs['delimiter']) data = [float(x[col - 1]) for x in data] from fitter import Fitter distributions = kwargs['distributions'].split(",") distributions = [x.strip() for x in distributions] fit = Fitter(data, distributions=distributions) if kwargs['verbose'] is False: kwargs["progress"] = False fit.fit(progress=kwargs["progress"]) fit.summary() if kwargs['verbose']: print() from pylab import savefig if kwargs['verbose']: print( "Saved image in fitter.png; use --output-image to change the name") tag = kwargs['tag'] savefig("{}.png".format(tag)) best = fit.get_best() bestname = list(best.keys())[0] values = list(best.values())[0] msg = f"Fitter version {version}\nBest fit is {bestname} distribution\nparameters: " msg += f"{values}\n The parameters have to be used in that order in scipy" if kwargs["verbose"]: print(msg) with open("{}.log".format(tag), "w") as fout: fout.write(msg)
def makeSigmaFit(darkTimes, sigmas): dt, sdt = list(zip(*darkTimes)) s, ss = list(zip(*sigmas)) data = DataErrors.fromLists(dt, s, sdt, ss) c = TCanvas('c_sigma', '', 1280, 720) g = data.makeGraph('g_sigma', 'Dunkelzeit t_{D} / ms', 'Verschmierung #sigma / #mus') g.Draw('APX') fit = Fitter('fit_sigma', 'pol1(0)') fit.setParam(0, 'a', 0) fit.setParam(1, 'b', 1) fit.fit(g, 0, 25) fit.saveData('../fit/sigma.txt') l = TLegend(0.6, 0.15, 0.85, 0.5) l.SetTextSize(0.03) l.AddEntry(g, 'Verschmierung #sigma', 'p') l.AddEntry(None, 'der Fermi-Verteilung', '') l.AddEntry(fit.function, 'Fit mit #sigma(t_{D}) = a + b t_{D}', 'l') fit.addParamsToLegend(l, (('%.2f', '%.2f'), ('%.2f', '%.2f')), chisquareformat='%.2f', units=['#mus', '10^{-3}']) l.Draw() g.Draw('P') c.Print('../img/part6/sigmaFit.pdf', 'pdf')
def get_fitter(): range_ = [-50, 250] nbins = 100 initial = dict( norm=None, # Automatically calculated by fitter from histogram eped=-0, eped_sigma=10, spe=38, spe_sigma=2, lambda_=0.7, opct=0.4, pap=0.09, dap1=0.5, dap2=0.5) limit = dict(limit_norm=(0, 100000), limit_eped=(-10, 10), limit_eped_sigma=(2, 20), limit_spe=(30, 50), limit_spe_sigma=(2, 20), limit_lambda_=(0.1, 3), limit_opct=(0, 0.8), limit_pap=(0, 0.8), limit_dap1=(0, 0.8), limit_dap2=(0, 0.8)) fix = dict(fix_norm=True, ) fitter = Fitter() fitter.range = range_ fitter.nbins = nbins fitter.initial = initial fitter.limits = limit fitter.fix = fix return fitter
def distribucion_fitter(data): """ Esta libreria funciona para determinar la distribucion de grupo de datos. Ajusta los datos a cada una de las distribuciones y realiza las prueba pertinentes. Sin embargo, hace uso de la librería scipy para hacer las pruebas, por lo que realmente es una forma alterna a scipy sin usarla directamente. La única diferencia entre este y el anterior método recae en la eficiencia. """ distr = [ "norm", "exponweib", "weibull_max", "weibull_min", "pareto", "uniform", "t", "expon", "lognorm", "beta", "alpha", "cauchy", "f", "loguniform", "chi2", "laplace", "gamma" ] lista = list((data.dtypes == "int64") | (data.dtypes == "float64")) nombres = [ data.columns[i] for i in range(len(lista)) if lista[i] == True ] #almacenamos el nombre de todas las columnas cuyos valores sean numericos dfs, parametros, best_f = [], [], [] for ele in nombres: fitter = Fitter(data[ele], distributions=distr) #metodo principal fitter.fit(n_jobs=multiprocessing.cpu_count() ) #Hacemos que use todos los nucleos del procesador p = fitter.summary( Nbest=1, plot=False) #aqui se almacen todos los resultados de la prueba parametros.append(fitter.get_best(method='sumsquare_error')) dfs.append( p ) #agregamos los dataframes a una lista y ahora tenemos una lista de dataframes full = pd.concat(dfs, ignore_index=False) #agregamos todos los dataframes full.insert(0, "Columna", nombres) full.insert( 2, 'parametros', parametros ) #insertamos la columna en cuestion con los parametros de la mejor distribucion return full
def fitdistribution(self): self.standardize() f = Fitter(self.data.func) f.fit() # may take some time since by default, all distributions are tried # but you call manually provide a smaller set of distributions return f.summary()
def fit_all(miss): dist = get_distributions() f = Fitter(miss, timeout=600, distributions=dist) f.fit() print(f.df_errors.sort_values('sumsquare_error')) logmsg('best fit = %s', str(f.get_best())) f.summary() plt.show()
def fit_exp(miss): dist = ['expon'] f = Fitter(miss, distributions=dist, timeout=600) f.fit() # logmsg('fitted params exp = %s', str(f.fitted_param)) # f.summary() # plt.show() return f.df_errors['expon']
def calculate_text_similarity_distribution(reviewers_df, cdfs): sample = reviewers_df['similarity_index'].tolist() f = Fitter(sample, distributions=cdfs) f.fit() best = f.get_best() key = list(best.keys())[0] dist = eval("sc." + key) distribution_index = dist.pdf(sample, *(f.fitted_param[key])) return distribution_index
def calculate_average_helpfulness_distribution(reviewers_df, cdfs): sample = reviewers_df['avg_helpfulness'].tolist() f = Fitter(sample, distributions=cdfs) f.fit() best = f.get_best() key = list(best.keys())[0] dist = eval("sc." + key) distribution_avg = dist.pdf(sample, *(f.fitted_param[key])) return distribution_avg
def fit_exp(miss): dist = ['expon'] f = Fitter(miss, distributions=dist, timeout=600) f.fit() params = f.fitted_param['expon'] logmsg('fitted params exp = %s', str(params)) f.summary() plt.show() return params
def fitLaserVoltage(g, xmin, xmax, file): fit = Fitter('%s-laser' % file[:-4], 'pol1(0)') fit.function.SetLineColor(92) fit.function.SetLineWidth(2) fit.setParam(0, 'a', 0) fit.setParam(1, 'b', 100) fit.fit(g, xmin, xmax, '+') fit.saveData('../fit/part2/%s-laser.txt' % file) return (fit.params[1]['value'], fit.params[1]['error'], fit.function)
def calculate_rating_deviation_distribution(reviewers_df, cdfs): sample = reviewers_df['rating_deviation'].tolist() f = Fitter(sample, distributions=cdfs) f.fit() best = f.get_best() key = list(best.keys())[0] dist = eval("sc." + key) distribution_rating = dist.pdf(sample, *(f.fitted_param[key])) return distribution_rating
def calculate_product_count_distribution(reviewers_df, cdfs): sample = reviewers_df['common_products'].tolist() f = Fitter(sample, distributions=cdfs) f.fit() f.summary() best = f.get_best() key = list(best.keys())[0] dist = eval("sc." + key) distribution_common = dist.pdf(sample, *(f.fitted_param[key])) return distribution_common
def init_perceptron_from(arguments) -> Fitter: return Fitter( Perceptron(random_state=42, n_jobs=-1, early_stopping=arguments.early_stopping_perceptron, penalty=arguments.penalty_perceptron, alpha=arguments.alpha, eta0=arguments.eta0, tol=arguments.tol, validation_fraction=arguments.validation_fraction))
def get_accuracy(params): gnn_graph = utils.build_gnn_graph(dataset, params) model = GNN(gnn_graph).to(device) setting = utils.from_json("json/setting.json")[args.dataset] optimizer = torch.optim.Adam(model.parameters(), lr=setting["learning_rate"], weight_decay=setting["weight_decay"]) fitter = Fitter(model, data, optimizer) history = fitter.run(verbose=args.verbose) reward = max(history.val.acc) return reward
def evalDiode(): datalist = loadCSVToList('../data/part1/Kennlinie.txt') data = DataErrors() U0 = datalist[0][1] sU0 = 0.05 + 0.01 * U0 for I, u in datalist: U = u - U0 su = 5 + 0.01 * u sU = sqrt(su**2 + sU0**2) data.addPoint(I, U, 0.1, sU) xmin, xmax = 53, 71.5 c = TCanvas('c_diode', '', 1280, 720) g = data.makeGraph('g_diode', "Laserstrom I_{L} / mA", "Photodiodenspannung U_{ph} / mV") g.GetXaxis().SetRangeUser(-5, 90) g.SetMinimum(-50) g.SetMaximum(1400) g.Draw('APX') # y=0 line line = TLine(-5, 0, 90, 0) line.SetLineColor(OPData.CH2ECOLOR) line.Draw() data.filterX(xmin, xmax) g2 = data.makeGraph('g_diode_2', "Laserstrom I_{L} / mA", "Photodiodenspannung U_{ph} / mV") g2.SetMarkerColor(OPData.CH1COLOR) g2.SetLineColor(OPData.CH1COLOR) fit = Fitter('fit_diode', '[0] * (x-[1])') fit.function.SetNpx(1000) fit.setParam(0, 'a', 1) fit.setParam(1, 'I_{th}', 50) fit.fit(g2, 40, 77) fit.saveData('../fit/part1/kennlinie.txt') l = TLegend(0.15, 0.55, 0.4, 0.85) l.SetTextSize(0.03) l.AddEntry(g, 'Laserdiodenkennlinie', 'p') l.AddEntry(g2, 'Ausschnitt zum Fitten', 'p') l.AddEntry(fit.function, 'Fit mit U_{ph} = a (I_{ L} - I_{ th} )', 'l') fit.addParamsToLegend(l, (('%.1f', '%.1f'), ('%.2f', '%.2f')), chisquareformat='%.2f', units=['mV/mA', 'mA']) l.Draw() g.Draw('P') g2.Draw('P') c.Update() c.Print('../img/part1/diodenkennlinie.pdf', 'pdf')
def evaluate(params, dataset, device='cuda:0', val_test='test'): data = Dataset(dataset) gnn_graph = utils.build_gnn_graph(data, params) model = GNN(gnn_graph).to(device) # logger.info(dataset) setting = utils.from_json("json/setting.json")[dataset] optimizer = torch.optim.Adam(model.parameters(), lr=setting["learning_rate"], weight_decay=setting["weight_decay"]) fitter = Fitter(model, data[0].to(device), optimizer) history = fitter.run(val_test=val_test, verbose=False) return max(history.val.acc)
def distribution(data1, year): f = Fitter(data1, distributions=['norm']) # 给定想要拟合的分布 f.fit() f.summary() # summary提供分布的拟合优度,以及分布的直方图与概率密度曲线图 # f.hist() #绘制组数=bins的标准化直方图 # f.plot_pdf(names=None, Nbest=3, lw=2) #绘制分布的概率密度函数 A = f.summary() print(A.loc['norm', 'aic']) plt.title('第%s年的生物量,AIC=%.2f,BIC=%.2f' % (str(year), A.loc['norm', 'aic'], A.loc['norm', 'bic'])) plt.show()
def fit_all(miss): # dist = get_distributions() # dist = ['genpareto', 'betaprima', 'lomax', 'f', 'ncf'] # dist = ['genpareto', 'lomax', 'f', 'ncf'] # dist = ['genpareto', 'lomax'] # dist = ['expon', 'lomax'] dist = ['expon'] f = Fitter(miss, timeout=600, distributions=dist) f.fit() print(f.df_errors.sort_values('sumsquare_error')) logmsg('best fit = %s', str(f.get_best())) f.summary() plt.show()