def fit(self, method='simplex'): """Initiate a fit of the model using Sherpa. :param method: Method to be used to fit the model (e.g. simplex, levmar, or moncar) """ dummy_data = np.zeros(1) dummy_times = np.arange(1) ui.load_arrays(1, dummy_times, dummy_data) ui.set_method(method) ui.get_method().config.update(sherpa_configs.get(method, {})) ui.load_user_model(CalcModel(self.model, self.fit_logger), 'xijamod') # sets global xijamod ui.add_user_pars('xijamod', self.model.parnames) ui.set_model(1, 'xijamod') calc_stat = CalcStat(self.model, self.fit_logger) ui.load_user_stat('xijastat', calc_stat, lambda x: np.ones_like(x)) ui.set_stat(xijastat) # Set frozen, min, and max attributes for each xijamod parameter for par in self.model.pars: xijamod_par = getattr(xijamod, par.full_name) xijamod_par.val = par.val xijamod_par.frozen = par.frozen xijamod_par.min = par.min xijamod_par.max = par.max ui.fit(1) self.save_snapshot(fit_stat=calc_stat.min_fit_stat, method=method)
def fit(self): dummy_data = np.zeros(1) dummy_times = np.arange(1) ui.load_arrays(1, dummy_times, dummy_data) ui.set_method(self.method) ui.get_method().config.update(sherpa_configs.get(self.method, {})) ui.load_user_model(CalcModel(self.model), 'xijamod') # sets global xijamod ui.add_user_pars('xijamod', self.model.parnames) ui.set_model(1, 'xijamod') calc_stat = CalcStat(self.model, self.child_pipe, self.maxiter) ui.load_user_stat('xijastat', calc_stat, lambda x: np.ones_like(x)) ui.set_stat(xijastat) # Set frozen, min, and max attributes for each xijamod parameter for par in self.model.pars: xijamod_par = getattr(xijamod, par.full_name) xijamod_par.val = par.val xijamod_par.frozen = par.frozen xijamod_par.min = par.min xijamod_par.max = par.max if any(not par.frozen for par in self.model.pars): try: ui.fit(1) calc_stat.message['status'] = 'finished' fit_logger.info('Fit finished normally') except FitTerminated as err: calc_stat.message['status'] = 'terminated' fit_logger.warning('Got FitTerminated exception {}'.format(err)) self.child_pipe.send(calc_stat.message)
def fit(self): dummy_data = np.zeros(1) dummy_times = np.arange(1) ui.load_arrays(1, dummy_times, dummy_data) ui.set_method(self.method) ui.get_method().config.update(sherpa_configs.get(self.method, {})) ui.load_user_model(CalcModel(self.model), 'xijamod') # sets global xijamod ui.add_user_pars('xijamod', self.model.parnames) ui.set_model(1, 'xijamod') calc_stat = CalcStat(self.model, self.child_pipe) ui.load_user_stat('xijastat', calc_stat, lambda x: np.ones_like(x)) ui.set_stat(xijastat) # Set frozen, min, and max attributes for each xijamod parameter for par in self.model.pars: xijamod_par = getattr(xijamod, par.full_name) xijamod_par.val = par.val xijamod_par.frozen = par.frozen xijamod_par.min = par.min xijamod_par.max = par.max if any(not par.frozen for par in self.model.pars): try: ui.fit(1) calc_stat.message['status'] = 'finished' logging.debug('Fit finished normally') except FitTerminated as err: calc_stat.message['status'] = 'terminated' logging.debug('Got FitTerminated exception {}'.format(err)) self.child_pipe.send(calc_stat.message)
def test_no_covar(stat, clean_ui, setup_covar): "Test an exception is thrown if covar is not run" ui.set_stat(stat) with pytest.raises(SessionErr) as exc: ui.get_draws() assert NO_COVAR_MSG == str(exc.value)
def test_covar_wstat_no_background(clean_ui, setup_covar): "Test an exception is thrown when wstat is used without background" ui.covar() ui.set_stat("wstat") with pytest.raises(StatErr) as exc: ui.get_draws() assert WSTAT_ERR_MSG == str(exc.value)
def test_no_covar(self): for stat in self.right_stats: ui.set_stat(stat) try: ui.get_draws() except SessionErr as ve: self.assertEqual(self.no_covar_msg, str(ve)) return self.fail(self.fail_msg)
def test_covar_wrong_stat(stat, clean_ui, setup_covar): "Test an exception is thrown is the proper stat is not set" ui.covar() ui.set_stat(stat) with pytest.raises(ValueError) as exc: ui.get_draws() assert WRONG_STAT_MSG.format(stat) == str(exc.value)
def test_no_covar(self): for stat in self.right_stats: ui.set_stat(stat) try: ui.get_draws() except SessionErr as ve: self.assertEqual(self.no_covar_msg, ve.message) return self.fail(self.fail_msg)
def test_covar_wstat_no_background(self): ui.covar() ui.set_stat("wstat") try: ui.get_draws() except StatErr as ve: self.assertEqual(self.wstat_err_msg, str(ve)) return self.fail(self.fail_msg)
def test_covar_wstat_no_background(self): ui.covar() ui.set_stat("wstat") try: ui.get_draws() except StatErr as ve: self.assertEqual(self.wstat_err_msg, ve.message) return self.fail(self.fail_msg)
def test_covar_as_none(self): for stat in self.right_stats - {'wstat'}: ui.set_stat(stat) ui.fit() ui.covar() niter = 10 stat, accept, params = ui.get_draws(niter=niter) self.assertEqual(niter + 1, stat.size) self.assertEqual(niter + 1, accept.size) self.assertEqual((2, niter + 1), params.shape) self.assertTrue(numpy.any(accept))
def test_covar_as_argument(self): for stat in self.right_stats - {'wstat'}: ui.set_stat(stat) ui.fit() matrix = [[0.00064075, 0.01122127], [0.01122127, 0.20153251]] niter = 10 stat, accept, params = ui.get_draws(niter=niter, covar_matrix=matrix) self.assertEqual(niter + 1, stat.size) self.assertEqual(niter + 1, accept.size) self.assertEqual((2, niter + 1), params.shape) self.assertTrue(numpy.any(accept))
def test_covar_as_none(self): for stat in self.right_stats - {'wstat'}: ui.set_stat(stat) ui.fit() ui.covar() niter = 10 stat, accept, params = ui.get_draws(niter=niter) self.assertEqual(niter+1, stat.size) self.assertEqual(niter+1, accept.size) self.assertEqual((2, niter+1), params.shape) self.assertTrue(numpy.any(accept))
def test_covar_as_argument(self): for stat in self.right_stats - {'wstat'}: ui.set_stat(stat) ui.fit() matrix = [[0.00064075, 0.01122127], [0.01122127, 0.20153251]] niter = 10 stat, accept, params = ui.get_draws(niter=niter, covar_matrix=matrix) self.assertEqual(niter+1, stat.size) self.assertEqual(niter+1, accept.size) self.assertEqual((2, niter+1), params.shape) self.assertTrue(numpy.any(accept))
def test_set_error_array_wrong(field, clean_ui): """What happens when we set the stat/syserror to an array of the wrong length?""" y = np.asarray([2, 3, 4]) ui.load_arrays(1, np.arange(3), y) ui.set_stat('cstat') setfunc = getattr(ui, f"set_{field}") # this does not error out setfunc(np.asarray([1, 2, 3, 4])) getfunc = getattr(ui, f"get_{field}") assert getfunc() == pytest.approx([1, 2, 3, 4])
def test_user_model1d_fit(): """Check can use in a fit.""" mname = "test_model" ui.load_user_model(um_line, mname) ui.add_user_pars(mname, ["slope", "intercept"], parvals = [1.0, 1.0]) mdl = ui.get_model_component(mname) x = numpy.asarray([-2.4, 2.3, 5.4, 8.7, 12.3]) # Set up the data to be scattered around y = -0.2 x + 2.8 # Pick the deltas so that they sum to 0 (except for central # point) # slope = -0.2 intercept = 2.8 dy = numpy.asarray([0.1, -0.2, 0.14, -0.1, 0.2]) ydata = x * slope + intercept + dy ui.load_arrays(1, x, ydata) ui.set_source(mname) ui.ignore(5.0, 6.0) # drop the central bin ui.set_stat('leastsq') ui.set_method('simplex') ui.fit() fres = ui.get_fit_results() assert fres.succeeded assert fres.parnames == ('test_model.slope', 'test_model.intercept') assert fres.numpoints == 4 assert fres.dof == 2 # Tolerance has been adjusted to get the tests to pass on my # machine. It's really just to check that the values have chanegd # from their default values. # assert fres.parvals[0] == pytest.approx(slope, abs=0.01) assert fres.parvals[1] == pytest.approx(intercept, abs=0.05) # Thse should be the same values, so no need to use pytest.approx # (unless there's some internal translation between types done # somewhere?). # assert mdl.slope.val == fres.parvals[0] assert mdl.intercept.val == fres.parvals[1]
def test_covar_as_argument(stat, clean_ui, setup_covar): ui.set_stat(stat) ui.fit() matrix = [[0.00064075, 0.01122127], [0.01122127, 0.20153251]] niter = 10 stat, accept, params = ui.get_draws(niter=niter, covar_matrix=matrix) n = niter + 1 assert stat.size == n assert accept.size == n assert params.shape == (2, n) assert np.any(accept)
def test_covar_as_none(stat, clean_ui, setup_covar): ui.set_stat(stat) ui.fit() ui.covar() niter = 10 stat, accept, params = ui.get_draws(niter=niter) n = niter + 1 assert stat.size == n assert accept.size == n assert params.shape == (2, n) assert np.any(accept)
def test_covar_wrong_stat(self): ui.covar() fail = False wrong_stats = set(ui.list_stats()) - self.right_stats for stat in wrong_stats: ui.set_stat(stat) try: ui.get_draws() except ValueError as ve: self.assertEqual(self.wrong_stat_msg.format(stat), ve.message) continue fail = True break if fail: self.fail(self.fail_msg)
def test_set_syserror_array(clean_ui): """What happens when we set the syserror to an array?""" staterror = 0.1 * np.ones(3) syserror = 0.5 * np.ones(3) combo = np.sqrt(0.01 + 0.25) * np.ones(3) y = np.asarray([2, 3, 4]) ui.load_arrays(1, np.arange(3), y, staterror, None) ui.set_stat('cstat') ui.set_syserror(syserror) assert ui.get_staterror() == pytest.approx(staterror) assert ui.get_syserror() == pytest.approx(syserror) assert ui.get_error() == pytest.approx(combo)
def test_covar_wrong_stat(self): ui.covar() fail = False wrong_stats = set(ui.list_stats()) - self.right_stats for stat in wrong_stats: ui.set_stat(stat) try: ui.get_draws() except ValueError as ve: self.assertEqual(self.wrong_stat_msg.format(stat), str(ve)) continue fail = True break if fail: self.fail(self.fail_msg)
def test_set_syserror_scalar_no_fractional(clean_ui): """What happens when we set the syserror to a scalar fractional=False?""" staterror = 0.1 * np.ones(3) syserror = 0.5 * np.ones(3) combo = np.sqrt(0.01 + 0.25) * np.ones(3) ui.load_arrays(1, np.arange(3), np.ones(3), staterror, syserror) ui.set_stat('cstat') ui.set_syserror(3) assert ui.get_staterror() == pytest.approx(staterror) assert ui.get_syserror() == pytest.approx(3 * np.ones(3)) combo = np.sqrt(0.01 + 9) * np.ones(3) assert ui.get_error() == pytest.approx(combo)
def setup_err_estimate_multi_ids(strings=False): """Create the environment used in test_err_estimate_xxx tests. The model being fit is polynom1d with c0=50 c1=-2 and was evaluated and passed through sherpa.utils.poisson_noise to create the datasets. Since we can have string or integer ids we allow either, but do not try to mix them. """ if strings: id1 = "1" id2 = "2" id3 = "3" else: id1 = 1 id2 = 2 id3 = 3 ui.load_arrays(id1, [1, 3, 7, 12], [50, 40, 27, 20]) ui.load_arrays(id2, [-3, 4, 5], [55, 34, 37]) ui.load_arrays(id3, [10, 12, 20], [24, 26, 7]) # NOTE: dataset "not-used" is not used in the fit and is not # drawn from the distributino used to create the other datasets. # ui.load_arrays("not-used", [2000, 2010, 2020], [10, 12, 14]) mdl = ui.create_model_component("polynom1d", "mdl") mdl.c1.thaw() ui.set_source(id1, mdl) ui.set_source(id2, mdl) ui.set_source(id3, mdl) # apply the model to dataset not-used just so we can check we # don't end up using it mdl_not_used = ui.create_model_component("scale1d", "mdl_not_used") ui.set_source("not-used", mdl + mdl_not_used) # use cstat so we have an approximate goodness-of-fit just to # check we are getting sensible results. # ui.set_stat("cstat") ui.set_method("simplex")
def test_set_syserror_scalar_fractional(clean_ui): """What happens when we set the syserror to a scalar fractional=True?""" staterror = 0.1 * np.ones(3) syserror = 0.5 * np.ones(3) combo = np.sqrt(0.01 + 0.25) * np.ones(3) y = np.asarray([2, 3, 4]) ui.load_arrays(1, np.arange(3), y, staterror, syserror) ui.set_stat('cstat') ui.set_syserror(0.4, fractional=True) assert ui.get_staterror() == pytest.approx(staterror) assert ui.get_syserror() == pytest.approx(0.4 * y) combo = np.sqrt(0.01 + 0.16 * y * y) assert ui.get_error() == pytest.approx(combo)
def fit_model( model, comm=None, method='simplex', config=None, nofit=None, freeze_pars=freeze_pars, thaw_pars=[], ): dummy_data = np.zeros(1) dummy_times = np.arange(1) ui.load_arrays(1, dummy_times, dummy_data) ui.set_method(method) ui.get_method().config.update(config or sherpa_configs.get(method, {})) ui.load_user_model(CalcModel(model, comm), 'xijamod') ui.add_user_pars('xijamod', model.parnames) ui.set_model(1, 'xijamod') fit_parnames = set() for parname, parval in zip(model.parnames, model.parvals): getattr(xijamod, parname).val = parval fit_parnames.add(parname) if any([re.match(x + '$', parname) for x in freeze_pars]): fit_logger.info('Freezing ' + parname) ui.freeze(getattr(xijamod, parname)) fit_parnames.remove(parname) if any([re.match(x + '$', parname) for x in thaw_pars]): fit_logger.info('Thawing ' + parname) ui.thaw(getattr(xijamod, parname)) fit_parnames.add(parname) if 'tau' in parname: getattr(xijamod, parname).min = 0.1 calc_stat = CalcStat(model, comm) ui.load_user_stat('xijastat', calc_stat, lambda x: np.ones_like(x)) ui.set_stat(xijastat) if fit_parnames and not nofit: ui.fit(1) else: model.calc()
def fit_model(model, comm=None, method='simplex', config=None, nofit=None, freeze_pars=freeze_pars, thaw_pars=[], ): dummy_data = np.zeros(1) dummy_times = np.arange(1) ui.load_arrays(1, dummy_times, dummy_data) ui.set_method(method) ui.get_method().config.update(config or sherpa_configs.get(method, {})) ui.load_user_model(CalcModel(model, comm), 'xijamod') ui.add_user_pars('xijamod', model.parnames) ui.set_model(1, 'xijamod') fit_parnames = set() for parname, parval in zip(model.parnames, model.parvals): getattr(xijamod, parname).val = parval fit_parnames.add(parname) if any([re.match(x + '$', parname) for x in freeze_pars]): fit_logger.info('Freezing ' + parname) ui.freeze(getattr(xijamod, parname)) fit_parnames.remove(parname) if any([re.match(x + '$', parname) for x in thaw_pars]): fit_logger.info('Thawing ' + parname) ui.thaw(getattr(xijamod, parname)) fit_parnames.add(parname) if 'tau' in parname: getattr(xijamod, parname).min = 0.1 calc_stat = CalcStat(model, comm) ui.load_user_stat('xijastat', calc_stat, lambda x: np.ones_like(x)) ui.set_stat(xijastat) if fit_parnames and not nofit: ui.fit(1) else: model.calc()
def test_set_syserror_none(clean_ui): """What happens when we set the syserror to None?""" staterror = 0.1 * np.ones(3) syserror = 0.5 * np.ones(3) combo = np.sqrt(0.01 + 0.25) * np.ones(3) ui.load_arrays(1, np.arange(3), np.ones(3), staterror, syserror) ui.set_stat('cstat') ui.set_syserror(None) assert ui.get_staterror() == pytest.approx(staterror) with pytest.raises(DataErr) as err: ui.get_syserror() assert str(err.value) == "data set '1' does not specify systematic errors" combo = staterror assert ui.get_error() == pytest.approx(combo)
def mwl_fit_high_level(): """Use high-level Sherpa API. High-level = session and convenience functions Example: http://cxc.harvard.edu/sherpa/threads/simultaneous/ Example: http://python4astronomers.github.io/fitting/spectrum.html """ import sherpa.ui as ui fermi_data = FermiData() ui.load_arrays(fermi_data.name, fermi_data.x, fermi_data.y, fermi_data.staterror) ui.load_user_stat('fermi_stat', FermiStat.calc_stat, FermiStat.calc_staterror) # TODO: is there a good way to get the stat?? # ui.get_stat('fermi_stat') # fermi_stat = ui._session._get_stat_by_name('fermi_stat') ui.set_stat(fermi_stat) # IPython.embed() iact_data = IACTData() ui.load_arrays(iact_data.name, iact_data.x, iact_data.y, iact_data.staterror) spec_model = ui.logparabola.spec_model spec_model.c1 = 0.5 spec_model.c2 = 0.2 spec_model.ampl = 5e-11 ui.set_source(fermi_data.name, spec_model) ui.set_source(iact_data.name, spec_model) ui.notice(lo=1e-3, hi=None) # IPython.embed() ui.fit() return dict(results=ui.get_fit_results(), model=spec_model)
def mwl_fit_high_level(): """Use high-level Sherpa API. High-level = session and convenience functions Example: http://cxc.harvard.edu/sherpa/threads/simultaneous/ Example: http://python4astronomers.github.io/fitting/spectrum.html """ import sherpa.ui as ui fermi_data = FermiData() ui.load_arrays(fermi_data.name, fermi_data.x, fermi_data.y, fermi_data.staterror) ui.load_user_stat('fermi_stat', FermiStat.calc_stat, FermiStat.calc_staterror) # TODO: is there a good way to get the stat?? # ui.get_stat('fermi_stat') # fermi_stat = ui._session._get_stat_by_name('fermi_stat') ui.set_stat(fermi_stat) # IPython.embed() iact_data = IACTData() ui.load_arrays(iact_data.name, iact_data.x, iact_data.y, iact_data.staterror) spec_model = ui.logparabola.spec_model spec_model.c1 = 0.5 spec_model.c2 = 0.2 spec_model.ampl = 5e-11 ui.set_source(fermi_data.name, spec_model) ui.set_source(iact_data.name, spec_model) ui.notice(lo=1e-3, hi=None) # IPython.embed() ui.fit() return Bunch(results=ui.get_fit_results(), model=spec_model)
def test_set_staterror_none(clean_ui): """What happens when we set the staterror to None?""" staterror = 0.1 * np.ones(3) syserror = 0.5 * np.ones(3) combo = np.sqrt(0.01 + 0.25) * np.ones(3) ui.load_arrays(1, np.arange(3), np.ones(3), staterror, syserror) ui.set_stat('cstat') assert ui.get_staterror() == pytest.approx(staterror) assert ui.get_syserror() == pytest.approx(syserror) assert ui.get_error() == pytest.approx(combo) # removing the statistical error means that the statistic is used; # for the likelihood stats we just get 1's # ui.set_staterror(None) assert ui.get_staterror() == pytest.approx(np.ones(3)) assert ui.get_syserror() == pytest.approx(syserror) combo = np.sqrt(1 + 0.25) * np.ones(3) assert ui.get_error() == pytest.approx(combo)
ui.load_user_model(lim_line, '%s_mod' % ftype) ui.add_user_pars('%s_mod' % ftype, ['m', 'b']) ui.set_model(data_id, '%s_mod' % ftype) ui.load_arrays(data_id, times, failures[ftype]) fmod = ui.get_model_component('%s_mod' % ftype) fmod.b.min = 0 fmod.b.max = 1 fmod.m.min = 0 fmod.m.max = 0.5 fmod.b.val = 1e-7 ui.load_user_stat("loglike", llh, my_err) ui.set_stat(loglike) # the tricky part here is that the "model" is the probability polynomial # we've defined evaluated at the data x values. # the model and the data are passed to the user stat/ llh # function as it is minimized. ui.fit(data_id) myfit = ui.get_fit_results() #axplot[ftype] = ui.get_model_plot(data_id) if myfit.succeeded: import pickle pickle.dump(myfit, open('%s_fitfile.pkl' % ftype, 'w')) rep_file = open('%s_fitfile.json' % ftype, 'w') rep_file.write( json.dumps(dict( time0=trend_start,
import re import os from glob import glob import pickle import matplotlib.pyplot as plt import numpy as np from sherpa import ui import dark_models sbp = None # for pychecker g1 = None method = 'levmar' ui.set_stat('cash') ui.set_method('simplex') ui.load_user_model(dark_models.smooth_broken_pow, 'sbp') ui.add_user_pars('sbp', ('gamma1', 'gamma2', 'x_b', 'x_r', 'ampl1')) def fit_gauss_sbp(): g1 = ui.gauss1d.g1 ui.set_model(sbp + g1) ui.set_method('simplex') g1.fwhm = 5.0 g1.pos = 7.0 g1.ampl = 30000. ui.freeze(sbp.gamma1) ui.freeze(sbp.gamma2)
for limit in [50, 75, 100, 125, 150, 200, 1000]: warm_frac = data[range_type][mag][ok]["n{}".format(limit)] print "range_type {}".format(range_type) print "mag {}".format(mag) print "limit is {}".format(limit) extent = np.max(warm_frac) - np.min(warm_frac) wp_min = np.min(warm_frac) warm_frac = warm_frac - wp_min def scaled_warm_frac(pars, x): scaled = pars[1] + warm_frac * pars[0] return scaled data_id = 1 ui.set_method("simplex") ui.set_stat("chi2datavar") # ui.set_stat('leastsq') # ui.load_user_stat("chi2custom", my_chi2, my_err) # ui.set_stat(chi2custom) ui.load_user_model(scaled_warm_frac, "model") ui.add_user_pars("model", ["scale", "offset"]) ui.set_model(data_id, "model") ui.load_arrays(data_id, np.array(times), np.array(bad_frac)) fmod = ui.get_model_component("model") fmod.scale.min = 1e-9 max_err = np.max([data[range_type][mag][ok]["err_high"], data[range_type][mag][ok]["err_low"]], axis=0) ui.set_staterror(data_id, max_err) ui.fit(data_id) f = ui.get_fit_results() scale = f.rstat ** 0.5 ui.set_staterror(data_id, max_err * scale)
ui.load_arrays(data_id, times, failures[ftype]) fmod = ui.get_model_component('%s_mod' % ftype) fmod.b.min = 0 fmod.b.max = 1 fmod.m.min = 0 fmod.m.max = 0.5 fmod.b.val=1e-7 ui.load_user_stat("loglike", llh, my_err) ui.set_stat(loglike) # the tricky part here is that the "model" is the probability polynomial # we've defined evaluated at the data x values. # the model and the data are passed to the user stat/ llh # function as it is minimized. ui.fit(data_id) myfit = ui.get_fit_results() #axplot[ftype] = ui.get_model_plot(data_id) if myfit.succeeded: import pickle pickle.dump(myfit, open('%s_fitfile.pkl' % ftype, 'w')) rep_file = open('%s_fitfile.json' % ftype, 'w') rep_file.write(json.dumps(dict(time0=trend_start, datestop=trend_date_stop, datestart=trend_date_start,
err_low[err_low == 0] = .0001 for limit in warm_limits: print "range type {}".format(range_type) print "mag {}".format(mag) print "limit is {}".format(limit) print "ftype {}".format(limit) warm_frac = data[range_type][mag][ok]['n{}'.format(limit)] extent = np.max(warm_frac) - np.min(warm_frac) wp_min = np.min(warm_frac) warm_frac = warm_frac - wp_min def scaled_warm_frac(pars, x): scaled = pars[1] + warm_frac * pars[0] return scaled data_id = 1 ui.set_method('simplex') ui.set_stat('chi2datavar') #ui.set_stat('leastsq') #ui.load_user_stat("chi2custom", my_chi2, my_err) #ui.set_stat(chi2custom) ui.load_user_model(scaled_warm_frac, 'model') ui.add_user_pars('model', ['scale', 'offset']) ui.set_model(data_id, 'model') ui.load_arrays(data_id, np.array(times), np.array(bad_frac)) fmod = ui.get_model_component('model') fmod.scale.min = 1e-9 fmod.offset.val = 0 ui.freeze(fmod.offset) max_err = np.max([err_high, err_low], axis=0) ui.set_staterror(data_id, max_err)