Example #1
0
def test_show_all_basic(clean_ui):
    """Set up a very basic data/model/fit"""

    ui.load_arrays(1, [1, 2, 4], [3, 5, 5])
    ui.set_source(ui.scale1d.mdl)
    ui.fit()
    ui.conf()
    ui.proj()
    ui.covar()

    def get(value):
        out = StringIO()
        getattr(ui, f"show_{value}")(outfile=out)
        ans = out.getvalue()
        assert len(ans) > 1

        # trim the trailing "\n"
        return ans[:-1]

    # All we are really checking is that the show_all output is the
    # comppsite of the following. We are not checking that the
    # actual output makes sense for any command.
    #
    expected = get("data") + get("model") + get("fit") + get("conf") + \
        get("proj") + get("covar")

    got = get("all")

    assert expected == got
Example #2
0
def test_est_errors_works_single_parameter(mdlcls, method, getter, clean_ui):
    """This is issue #1397.

    Rather than require XSPEC, we create a subclass of the Parameter
    class to check it works. We are not too concerned with the actual
    results hence the relatively low tolerance on the numeric checks.

    """

    mdl = mdlcls()

    ui.load_arrays(1, [1, 2, 3, 4], [4, 2, 1, 3.5])
    ui.set_source(mdl)
    with SherpaVerbosity("ERROR"):
        ui.fit()

        # this is where #1397 fails with Const2
        method(mdl.con)

    atol = 1e-4
    assert ui.calc_stat() == pytest.approx(0.7651548418626658, abs=atol)

    results = getter()
    assert results.parnames == (f"{mdl.name}.con", )
    assert results.sigma == pytest.approx(1.0)

    assert results.parvals == pytest.approx((2.324060647544594, ), abs=atol)

    # The covar errors are -/+ 1.3704388763054511
    #     conf             -1.3704388763054511 / +1.3704388763054514
    #     proj             -1.3704388762971822 / +1.3704388763135826
    #
    err = 1.3704388763054511
    assert results.parmins == pytest.approx((-err, ), abs=atol)
    assert results.parmaxes == pytest.approx((err, ), abs=atol)
Example #3
0
    def test_source_methods_with_full_model(self):
        from sherpa.utils.err import IdentifierErr

        ui.load_data('full', self.ascii)
        ui.set_full_model('full', 'powlaw1d.p1')

        # Test Case 1
        try:
            ui.get_source('full')
        except IdentifierErr as e:
            self.assertRegex(
                str(e),
                "Convolved model\n.*\n is set for dataset full. You should use get_model instead.",
                str(e))
        try:
            ui.plot_source('full')
        except IdentifierErr as e:
            self.assertRegex(
                str(e),
                "Convolved model\n.*\n is set for dataset full. You should use plot_model instead.",
                str(e))

        # Test Case 2
        ui.set_source('full', 'powlaw1d.p2')
        ui.get_source('full')

        # Test Case 3
        ui.load_data('not_full', self.ascii)
        try:
            ui.get_source('not_full')
        except IdentifierErr as e:
            self.assertEqual(
                'source not_full has not been set, consider using set_source() or set_model()',
                str(e))
Example #4
0
def test_thaw_model(string, clean_ui):
    """Can we thaw a model?

    We use a model with an alwaysfrozen parameter.
    """

    mdl = ui.create_model_component("logparabola", "mdl")
    ui.set_source(mdl)
    mdl.c1.freeze()
    mdl.ampl.freeze()

    assert mdl.ref.frozen
    assert mdl.c1.frozen
    assert not mdl.c2.frozen
    assert mdl.ampl.frozen
    assert ui.get_num_par_thawed() == 1
    assert ui.get_num_par_frozen() == 3

    if string:
        ui.thaw("mdl")
    else:
        ui.thaw(mdl)

    assert mdl.ref.frozen
    assert not mdl.c1.frozen
    assert not mdl.c2.frozen
    assert not mdl.ampl.frozen
    assert ui.get_num_par_thawed() == 3
    assert ui.get_num_par_frozen() == 1
Example #5
0
    def test_source_methods_with_full_model(self):
        from sherpa.utils.err import IdentifierErr

        ui.load_data('full', self.ascii)
        ui.set_full_model('full', 'powlaw1d.p1')

        # Test Case 1
        try:
            ui.get_source('full')
        except IdentifierErr as e:
            self.assertRegexpMatches(str(e), "Convolved model\n.*\n is set for dataset full. You should use get_model instead.", str(e))
        try:
            ui.plot_source('full')
        except IdentifierErr as e:
            self.assertEquals("Convolved model\n'p1'\n is set for dataset full. You should use plot_model instead.", str(e))

        # Test Case 2
        ui.set_source('full', 'powlaw1d.p2')
        ui.get_source('full')

        # Test Case 3
        ui.load_data('not_full', self.ascii)
        try:
            ui.get_source('not_full')
        except IdentifierErr as e:
            self.assertEquals('source not_full has not been set, consider using set_source() or set_model()', str(e))
Example #6
0
def test_ui_source_methods_with_full_model(clean_ui, setup_ui_full):

    ui.load_data('full', setup_ui_full.ascii)
    ui.set_full_model('full', 'powlaw1d.p1')

    # Test Case 1
    with pytest.raises(IdentifierErr) as exc:
        ui.get_source('full')

    emsg = "Convolved model\n'powlaw1d.p1'\n is set for dataset full. You should use get_model instead."
    assert str(exc.value) == emsg

    with pytest.raises(IdentifierErr) as exc:
        ui.plot_source('full')

    emsg = "Convolved model\n'powlaw1d.p1'\n is set for dataset full. You should use plot_model instead."
    assert str(exc.value) == emsg

    with pytest.raises(IdentifierErr) as exc:
        ui.get_source_plot('full')

    emsg = "Convolved model\n'powlaw1d.p1'\n is set for dataset full. You should use get_model_plot instead."
    assert str(exc.value) == emsg

    # Test Case 2
    ui.set_source('full', 'powlaw1d.p2')
    ui.get_source('full')

    # Test Case 3
    ui.load_data('not_full', setup_ui_full.ascii)
    with pytest.raises(IdentifierErr) as exc:
        ui.get_source('not_full')

    emsg = 'source not_full has not been set, consider using set_source() or set_model()'
    assert emsg == str(exc.value)
Example #7
0
def test_show_conf_basic(clean_ui):
    """Set up a very basic data/model/fit"""

    ui.load_arrays(1, [1, 2, 4], [3, 5, 5])
    ui.set_source(ui.scale1d.mdl)
    ui.fit()
    ui.conf()

    out = StringIO()
    ui.show_conf(outfile=out)
    got = out.getvalue().split('\n')

    assert len(got) == 12
    assert got[0] == "Confidence:Dataset               = 1"
    assert got[1] == "Confidence Method     = confidence"
    assert got[2] == "Iterative Fit Method  = None"
    assert got[3] == "Fitting Method        = levmar"
    assert got[4] == "Statistic             = chi2gehrels"
    assert got[5] == "confidence 1-sigma (68.2689%) bounds:"
    assert got[6] == "   Param            Best-Fit  Lower Bound  Upper Bound"
    assert got[7] == "   -----            --------  -----------  -----------"
    assert got[8] == "   mdl.c0            4.19798     -1.85955      1.85955"
    assert got[9] == ""
    assert got[10] == ""
    assert got[11] == ""
Example #8
0
def test_err_estimate_model(strings, idval, otherids, clean_ui):
    """Ensure we can use model with conf/proj/covar.

    This is test_err_estimate_multi_ids but

      - added an extra model to each source (that evaluates to 0)
      - we include the model expression in the call.

    The fit and error analysis should be the same however the ordering
    is done.
    """

    # This is a bit ugly
    if strings:
        idval = str(idval)
        if type(otherids) == tuple:
            otherids = (str(otherids[0]), str(otherids[1]))
        else:
            otherids = [str(otherids[0]), str(otherids[1])]

    datasets = tuple([idval] + list(otherids))

    setup_err_estimate_multi_ids(strings=strings)

    zero = ui.create_model_component("scale1d", "zero")
    zero.c0 = 0
    zero.c0.freeze()

    for id in datasets:
        # In this case we have
        #   orig == mdl
        # but let's be explicit in case the code changes
        #
        orig = ui.get_source(id)
        ui.set_source(id, orig + zero)

    ui.fit(idval, *otherids)

    res = ui.get_fit_results()
    assert res.datasets == datasets
    assert res.numpoints == 10
    assert res.statval == pytest.approx(3.379367979541458)
    assert ui.calc_stat() == pytest.approx(4255.615602052843)
    assert mdl.c0.val == pytest.approx(46.046607302070015)
    assert mdl.c1.val == pytest.approx(-1.9783953989993386)

    # I wanted to have zero.co thawed at this stage, but then we can not
    # use the ERR_EST_C0/1_xxx values as the fit has changed (and mdl.c0
    # and zero.c0 are degenerate to boot).
    #
    ui.conf(*datasets, mdl)
    res = ui.get_conf_results()

    assert res.datasets == datasets
    assert res.parnames == ("mdl.c0", "mdl.c1")

    assert res.parmins == pytest.approx([ERR_EST_C0_MIN, ERR_EST_C1_MIN])
    assert res.parmaxes == pytest.approx([ERR_EST_C0_MAX, ERR_EST_C1_MAX])
Example #9
0
def test_err_estimate_errors_on_frozen(method, clean_ui):
    """Check we error out with frozen par with conf/proj/covar.

    """

    ui.load_arrays(1, [1, 2, 3], [1, 2, 3])
    ui.set_source(ui.polynom1d.mdl)
    with pytest.raises(ParameterErr) as exc:
        method(mdl.c0, mdl.c1)

    assert str(exc.value) == "parameter 'mdl.c1' is frozen"
Example #10
0
def test_freeze_no_arguments(clean_ui):
    """This is a no-op"""

    mdl = ui.create_model_component("logparabola", "mdl")
    ui.set_source(mdl)
    assert ui.get_num_par_thawed() == 3
    assert ui.get_num_par_frozen() == 1

    ui.freeze()
    assert ui.get_num_par_thawed() == 3
    assert ui.get_num_par_frozen() == 1
def setUp(clean_ui, hide_logging):

    x = [-13, -5, -3, 2, 7, 12]
    y = [102.3, 16.7, -0.6, -6.7, -9.9, 33.2]
    dy = np.ones(6) * 5
    ui.load_arrays(1, x, y, dy)
    ui.set_source(ui.polynom1d.poly)
    poly.c1.thaw()
    poly.c2.thaw()
    ui.int_proj(poly.c0)
    ui.fit()
Example #12
0
def test_err_estimate_errors_model_all_frozen(method, clean_ui):
    """Check we error out with frozen model with conf/proj/covar.

    """

    ui.load_arrays(1, [1, 2, 3], [1, 2, 3])
    ui.set_source(ui.polynom1d.mdl)
    for par in mdl.pars:
        par.freeze()

    with pytest.raises(ParameterErr) as exc:
        method(mdl)

    assert str(exc.value) == "Model 'polynom1d.mdl' has no thawed parameters"
Example #13
0
def test_user_model1d_fit():
    """Check can use in a fit."""

    mname = "test_model"
    ui.load_user_model(um_line, mname)
    ui.add_user_pars(mname, ["slope", "intercept"],
                     parvals = [1.0, 1.0])

    mdl = ui.get_model_component(mname)

    x = numpy.asarray([-2.4, 2.3, 5.4, 8.7, 12.3])

    # Set up the data to be scattered around y = -0.2 x + 2.8
    # Pick the deltas so that they sum to 0 (except for central
    # point)
    #
    slope = -0.2
    intercept = 2.8

    dy = numpy.asarray([0.1, -0.2, 0.14, -0.1, 0.2])
    ydata = x * slope + intercept + dy

    ui.load_arrays(1, x, ydata)

    ui.set_source(mname)
    ui.ignore(5.0, 6.0)  # drop the central bin

    ui.set_stat('leastsq')
    ui.set_method('simplex')
    ui.fit()

    fres = ui.get_fit_results()
    assert fres.succeeded
    assert fres.parnames == ('test_model.slope', 'test_model.intercept')
    assert fres.numpoints == 4
    assert fres.dof == 2

    # Tolerance has been adjusted to get the tests to pass on my
    # machine. It's really just to check that the values have chanegd
    # from their default values.
    #
    assert fres.parvals[0] == pytest.approx(slope, abs=0.01)
    assert fres.parvals[1] == pytest.approx(intercept, abs=0.05)

    # Thse should be the same values, so no need to use pytest.approx
    # (unless there's some internal translation between types done
    # somewhere?).
    #
    assert mdl.slope.val == fres.parvals[0]
    assert mdl.intercept.val == fres.parvals[1]
Example #14
0
def test_user_model1d_fit():
    """Check can use in a fit."""

    mname = "test_model"
    ui.load_user_model(um_line, mname)
    ui.add_user_pars(mname, ["slope", "intercept"],
                     parvals = [1.0, 1.0])

    mdl = ui.get_model_component(mname)

    x = numpy.asarray([-2.4, 2.3, 5.4, 8.7, 12.3])

    # Set up the data to be scattered around y = -0.2 x + 2.8
    # Pick the deltas so that they sum to 0 (except for central
    # point)
    #
    slope = -0.2
    intercept = 2.8

    dy = numpy.asarray([0.1, -0.2, 0.14, -0.1, 0.2])
    ydata = x * slope + intercept + dy

    ui.load_arrays(1, x, ydata)

    ui.set_source(mname)
    ui.ignore(5.0, 6.0)  # drop the central bin

    ui.set_stat('leastsq')
    ui.set_method('simplex')
    ui.fit()

    fres = ui.get_fit_results()
    assert fres.succeeded
    assert fres.parnames == ('test_model.slope', 'test_model.intercept')
    assert fres.numpoints == 4
    assert fres.dof == 2

    # Tolerance has been adjusted to get the tests to pass on my
    # machine. It's really just to check that the values have chanegd
    # from their default values.
    #
    assert fres.parvals[0] == pytest.approx(slope, abs=0.01)
    assert fres.parvals[1] == pytest.approx(intercept, abs=0.05)

    # Thse should be the same values, so no need to use pytest.approx
    # (unless there's some internal translation between types done
    # somewhere?).
    #
    assert mdl.slope.val == fres.parvals[0]
    assert mdl.intercept.val == fres.parvals[1]
Example #15
0
 def tst_ui(self, thaw_c1):
     ui.load_arrays(1, self._x, self._y, self._e)
     ui.set_source(1, ui.polynom1d.mdl)
     if thaw_c1:
         ui.thaw(mdl.c1)
     ui.thaw(mdl.c2)
     mdl.c2 = 1
     ui.fit()
     if not thaw_c1:
         ui.thaw(mdl.c1)
         ui.fit()
     ui.conf()
     result = ui.get_conf_results()
     self.cmp_results(result)
Example #16
0
def test_guess_warns_no_guess_no_argument(caplog, clean_ui):
    """Do we warn when the (implied) model has no guess"""

    ui.load_arrays(1, [1, 2, 3], [-3, 4, 5])
    cpt = DummyModel('dummy')
    ui.set_source(cpt + cpt)

    assert len(caplog.records) == 0
    ui.guess()

    assert len(caplog.records) == 1
    lname, lvl, msg = caplog.record_tuples[0]
    assert lname == "sherpa.ui.utils"
    assert lvl == logging.INFO
    assert msg == "WARNING: No guess found for (dummy + dummy)"
Example #17
0
def test_thaw_invalid_arguments(string, clean_ui):
    """We error out with an invalid argument"""

    mdl = ui.create_model_component("logparabola", "mdl")
    ui.set_source(mdl)

    with pytest.raises(ArgumentTypeErr) as ae:
        if string:
            ui.thaw("1")
        else:
            ui.thaw(1)

    assert str(
        ae.value
    ) == "'par' must be a parameter or model object or expression string"
    def setUp(self):
        # defensive programming (one of the tests has been seen to fail
        # when the whole test suite is run without this)
        ui.clean()
        self._old_logger_level = logger.getEffectiveLevel()
        logger.setLevel(logging.ERROR)

        x = [-13, -5, -3, 2, 7, 12]
        y = [102.3, 16.7, -0.6, -6.7, -9.9, 33.2]
        dy = np.ones(6) * 5
        ui.load_arrays(1, x, y, dy)
        ui.set_source(ui.polynom1d.poly)
        poly.c1.thaw()
        poly.c2.thaw()
        ui.int_proj(poly.c0)
        ui.fit()
Example #19
0
def test_thaw_thawed_parameter(string, clean_ui):
    """Can we thaw a thawed parameter? String argument"""

    mdl = ui.create_model_component("polynom1d", "mdl")
    ui.set_source(mdl)
    assert not mdl.c0.frozen
    assert ui.get_num_par_thawed() == 1
    assert ui.get_num_par_frozen() == 9

    if string:
        ui.thaw("mdl.c0")
    else:
        ui.thaw(mdl.c0)

    assert not mdl.c0.frozen
    assert ui.get_num_par_thawed() == 1
    assert ui.get_num_par_frozen() == 9
Example #20
0
def test_freeze_frozen_parameter(string, clean_ui):
    """Can we freeze a frozen_parameter?"""

    mdl = ui.create_model_component("polynom1d", "mdl")
    ui.set_source(mdl)
    assert mdl.c1.frozen
    assert ui.get_num_par_thawed() == 1
    assert ui.get_num_par_frozen() == 9

    if string:
        ui.freeze("mdl.c1")
    else:
        ui.freeze(mdl.c1)

    assert mdl.c1.frozen
    assert ui.get_num_par_thawed() == 1
    assert ui.get_num_par_frozen() == 9
Example #21
0
def test_freeze_parameter(string, clean_ui):
    """Can we freeze a parameter?"""

    mdl = ui.create_model_component("logparabola", "mdl")
    ui.set_source(mdl)
    assert not mdl.c1.frozen
    assert ui.get_num_par_thawed() == 3
    assert ui.get_num_par_frozen() == 1

    if string:
        ui.freeze("mdl.c1")
    else:
        ui.freeze(mdl.c1)

    assert mdl.c1.frozen
    assert ui.get_num_par_thawed() == 2
    assert ui.get_num_par_frozen() == 2
Example #22
0
def test_freeze_alwaysfrozen_parameter(string, clean_ui):
    """Can we freeze an always-frozen parameter?"""

    mdl = ui.create_model_component("logparabola", "mdl")
    ui.set_source(mdl)
    assert mdl.ref.alwaysfrozen
    assert mdl.ref.frozen
    assert ui.get_num_par_thawed() == 3
    assert ui.get_num_par_frozen() == 1

    if string:
        ui.freeze("mdl.ref")
    else:
        ui.freeze(mdl.ref)

    assert mdl.ref.frozen
    assert ui.get_num_par_thawed() == 3
    assert ui.get_num_par_frozen() == 1
Example #23
0
def setup_example(idval):
    """Set up a simple dataset for use in the tests.

    Parameters
    ----------
    idval : None, int, str
        The dataset identifier.
    """

    d = example_data()
    m = example_model()
    if idval is None:
        ui.set_data(d)
        ui.set_source(m)

    else:
        ui.set_data(idval, d)
        ui.set_source(idval, m)
Example #24
0
def tst_ui(thaw_c1, setUp, clean_ui):
    data, mdl = setUp

    ui.load_arrays(1, data.x, data.y, data.staterror)
    ui.set_source(1, ui.polynom1d.mdl)
    if thaw_c1:
        ui.thaw(mdl.c1)

    ui.thaw(mdl.c2)
    mdl.c2 = 1
    ui.fit()

    if not thaw_c1:
        ui.thaw(mdl.c1)
        ui.fit()

    ui.conf()
    result = ui.get_conf_results()
    cmp_results(result)
Example #25
0
def test_freeze_multi_arguments(string, clean_ui):
    """Check we can combine model and parameters"""

    mdl1 = ui.create_model_component("logparabola", "mdl1")
    mdl2 = ui.create_model_component("polynom1d", "mdl2")
    ui.set_source(mdl1 + mdl2)
    assert not mdl1.c2.frozen
    assert not mdl2.c0.frozen
    assert ui.get_num_par_thawed() == 4
    assert ui.get_num_par_frozen() == 10

    if string:
        ui.freeze("mdl2", "mdl1.c2")
    else:
        ui.freeze(mdl2, mdl1.c2)

    assert mdl1.c2.frozen
    assert mdl2.c0.frozen
    assert ui.get_num_par_thawed() == 2
    assert ui.get_num_par_frozen() == 12
Example #26
0
def mwl_fit_high_level():
    """Use high-level Sherpa API.

    High-level = session and convenience functions

    Example: http://cxc.harvard.edu/sherpa/threads/simultaneous/
    Example: http://python4astronomers.github.io/fitting/spectrum.html
    """
    import sherpa.ui as ui

    fermi_data = FermiData()
    ui.load_arrays(fermi_data.name, fermi_data.x, fermi_data.y,
                   fermi_data.staterror)

    ui.load_user_stat('fermi_stat', FermiStat.calc_stat,
                      FermiStat.calc_staterror)
    # TODO: is there a good way to get the stat??
    # ui.get_stat('fermi_stat')
    # fermi_stat = ui._session._get_stat_by_name('fermi_stat')
    ui.set_stat(fermi_stat)
    # IPython.embed()

    iact_data = IACTData()
    ui.load_arrays(iact_data.name, iact_data.x, iact_data.y,
                   iact_data.staterror)

    spec_model = ui.logparabola.spec_model
    spec_model.c1 = 0.5
    spec_model.c2 = 0.2
    spec_model.ampl = 5e-11

    ui.set_source(fermi_data.name, spec_model)
    ui.set_source(iact_data.name, spec_model)

    ui.notice(lo=1e-3, hi=None)

    # IPython.embed()
    ui.fit()

    return dict(results=ui.get_fit_results(), model=spec_model)
Example #27
0
def test_thaw_alwaysfrozen_parameter(string, clean_ui):
    """Can we thaw an always-frozen parameter?"""

    mdl = ui.create_model_component("logparabola", "mdl")
    ui.set_source(mdl)
    assert mdl.ref.alwaysfrozen
    assert mdl.ref.frozen
    assert ui.get_num_par_thawed() == 3
    assert ui.get_num_par_frozen() == 1

    with pytest.raises(ParameterErr) as pe:
        if string:
            ui.thaw("mdl.ref")
        else:
            ui.thaw(mdl.ref)

    assert mdl.ref.frozen
    assert ui.get_num_par_thawed() == 3
    assert ui.get_num_par_frozen() == 1

    assert str(
        pe.value) == "parameter mdl.ref is always frozen and cannot be thawed"
Example #28
0
def mwl_fit_high_level():
    """Use high-level Sherpa API.

    High-level = session and convenience functions

    Example: http://cxc.harvard.edu/sherpa/threads/simultaneous/
    Example: http://python4astronomers.github.io/fitting/spectrum.html
    """
    import sherpa.ui as ui

    fermi_data = FermiData()
    ui.load_arrays(fermi_data.name, fermi_data.x, fermi_data.y, fermi_data.staterror)

    ui.load_user_stat('fermi_stat', FermiStat.calc_stat, FermiStat.calc_staterror)
    # TODO: is there a good way to get the stat??
    # ui.get_stat('fermi_stat')
    # fermi_stat = ui._session._get_stat_by_name('fermi_stat')
    ui.set_stat(fermi_stat)
    # IPython.embed()


    iact_data = IACTData()
    ui.load_arrays(iact_data.name, iact_data.x, iact_data.y, iact_data.staterror)

    spec_model = ui.logparabola.spec_model
    spec_model.c1 = 0.5
    spec_model.c2 = 0.2
    spec_model.ampl = 5e-11

    ui.set_source(fermi_data.name, spec_model)
    ui.set_source(iact_data.name, spec_model)

    ui.notice(lo=1e-3, hi=None)

    # IPython.embed()
    ui.fit()

    return Bunch(results=ui.get_fit_results(), model=spec_model)
Example #29
0
def setup_err_estimate_multi_ids(strings=False):
    """Create the environment used in test_err_estimate_xxx tests.

    The model being fit is polynom1d with c0=50 c1=-2
    and was evaluated and passed through sherpa.utils.poisson_noise
    to create the datasets.

    Since we can have string or integer ids we allow either,
    but do not try to mix them.

    """

    if strings:
        id1 = "1"
        id2 = "2"
        id3 = "3"
    else:
        id1 = 1
        id2 = 2
        id3 = 3

    ui.load_arrays(id1, [1, 3, 7, 12], [50, 40, 27, 20])
    ui.load_arrays(id2, [-3, 4, 5], [55, 34, 37])
    ui.load_arrays(id3, [10, 12, 20], [24, 26, 7])

    # NOTE: dataset "not-used" is not used in the fit and is not
    # drawn from the distributino used to create the other datasets.
    #
    ui.load_arrays("not-used", [2000, 2010, 2020], [10, 12, 14])

    mdl = ui.create_model_component("polynom1d", "mdl")
    mdl.c1.thaw()
    ui.set_source(id1, mdl)
    ui.set_source(id2, mdl)
    ui.set_source(id3, mdl)

    # apply the model to dataset not-used just so we can check we
    # don't end up using it
    mdl_not_used = ui.create_model_component("scale1d", "mdl_not_used")
    ui.set_source("not-used", mdl + mdl_not_used)

    # use cstat so we have an approximate goodness-of-fit just to
    # check we are getting sensible results.
    #
    ui.set_stat("cstat")
    ui.set_method("simplex")
Example #30
0
def fitne(ne_data, nemodeltype, tspec_data=None):
    '''
    Fits gas number density profile according to selected profile model.
     The fit is performed using python sherpa with the Levenberg-Marquardt
     method of minimizing chi-squared .


    Args:
    -----
    ne_data (astropy table): observed gas density profile
      in the form established by set_prof_data()
    tspec_data (astropy table): observed temperature profile
      in the form established by set_prof_data()

    Returns:
    --------
    nemodel (dictionary): stores relevant information about the model gas
      density profile
        nemodel['type']: ne model type; one of the following:
          ['single_beta','cusped_beta','double_beta_tied','double_beta']
        nemodel['parnames']: names of the stored ne model parameters
        nemodel['parvals']: parameter values of fitted gas density model
        nemodel['parmins']: lower error bound on parvals
        nemodel['parmaxes']: upper error bound on parvals
        nemodel['chisq']: chi-squared of fit
        nemodel['dof']: degrees of freedom
        nemodel['rchisq']: reduced chi-squared of fit
        nemodel['nefit']: ne model values at radial values matching
          tspec_data (the observed temperature profile)

    References:
    -----------
    python sherpa:    https://github.com/sherpa/
    '''

    # remove any existing models and data
    ui.clean()

    # load data
    ui.load_arrays(1, np.array(ne_data['radius']), np.array(ne_data['ne']),
                   np.array(ne_data['ne_err']))

    # set guess and boundaries on params given selected model

    if nemodeltype == 'single_beta':

        # param estimate
        betaguess = 0.6
        rcguess = 20.  # units?????
        ne0guess = max(ne_data['ne'])

        # beta model
        ui.load_user_model(betamodel, "beta1d")
        ui.add_user_pars("beta1d", ["ne0", "rc", "beta"])
        ui.set_source(beta1d)  # creates model
        ui.set_full_model(beta1d)

        # set parameter values
        ui.set_par(beta1d.ne0, ne0guess, min=0, max=10. * max(ne_data['ne']))
        ui.set_par(beta1d.rc, rcguess, min=0.1, max=max(ne_data['radius']))
        ui.set_par(beta1d.beta, betaguess, min=0.1, max=1.)

    if nemodeltype == 'cusped_beta':

        # param estimate
        betaguess = 0.7
        rcguess = 5.  # [kpc]
        ne0guess = max(ne_data['ne'])
        alphaguess = 10.  # ????

        # beta model
        ui.load_user_model(cuspedbetamodel, "cuspedbeta1d")
        ui.add_user_pars("cuspedbeta1d", ["ne0", "rc", "beta", "alpha"])
        ui.set_source(cuspedbeta1d)  # creates model
        ui.set_full_model(cuspedbeta1d)

        # set parameter values
        ui.set_par(cuspedbeta1d.ne0,
                   ne0guess,
                   min=0.001 * max(ne_data['ne']),
                   max=10. * max(ne_data['ne']))
        ui.set_par(cuspedbeta1d.rc,
                   rcguess,
                   min=0.1,
                   max=max(ne_data['radius']))
        ui.set_par(cuspedbeta1d.beta, betaguess, min=0.1, max=1.)
        ui.set_par(cuspedbeta1d.alpha, alphaguess, min=0., max=100.)

    if nemodeltype == 'double_beta':

        # param estimate
        ne0guess1 = max(ne_data['ne'])  # [cm^-3]
        rcguess1 = 10.  # [kpc]
        betaguess1 = 0.6

        ne0guess2 = 0.01 * max(ne_data['ne'])  # [cm^-3]
        rcguess2 = 100.  # [kpc]
        betaguess2 = 0.6

        # double beta model
        ui.load_user_model(doublebetamodel, "doublebeta1d")
        ui.add_user_pars("doublebeta1d",
                         ["ne01", "rc1", "beta1", "ne02", "rc2", "beta2"])
        ui.set_source(doublebeta1d)  # creates model
        ui.set_full_model(doublebeta1d)

        # set parameter values
        ui.set_par(doublebeta1d.ne01,
                   ne0guess1,
                   min=0.0001 * max(ne_data['ne']),
                   max=100. * max(ne_data['ne']))
        ui.set_par(doublebeta1d.rc1,
                   rcguess1,
                   min=0.1,
                   max=max(ne_data['radius']))
        ui.set_par(doublebeta1d.beta1, betaguess1, min=0.1, max=1.)

        ui.set_par(doublebeta1d.ne02,
                   ne0guess2,
                   min=0.0001 * max(ne_data['ne']),
                   max=100. * max(ne_data['ne']))
        ui.set_par(doublebeta1d.rc2,
                   rcguess2,
                   min=10.,
                   max=max(ne_data['radius']))
        ui.set_par(doublebeta1d.beta2, betaguess2, min=0.1, max=1.)

    if nemodeltype == 'double_beta_tied':

        # param estimate
        ne0guess1 = max(ne_data['ne'])
        rcguess1 = 10.
        betaguess1 = 0.6

        ne0guess2 = 0.01 * max(ne_data['ne'])
        rcguess2 = 100.

        # double beta model
        ui.load_user_model(doublebetamodel_tied, "doublebeta1d_tied")
        ui.add_user_pars("doublebeta1d_tied",
                         ["ne01", "rc1", "beta1", "ne02", "rc2"])
        ui.set_source(doublebeta1d_tied)  # creates model
        ui.set_full_model(doublebeta1d_tied)

        # set parameter values
        ui.set_par(doublebeta1d_tied.ne01,
                   ne0guess1,
                   min=0.00001 * max(ne_data['ne']),
                   max=100. * max(ne_data['ne']))
        ui.set_par(doublebeta1d_tied.rc1,
                   rcguess1,
                   min=0.1,
                   max=max(ne_data['radius']))
        ui.set_par(doublebeta1d_tied.beta1, betaguess1, min=0.1, max=1.)

        ui.set_par(doublebeta1d_tied.ne02,
                   ne0guess2,
                   min=0.00001 * max(ne_data['ne']),
                   max=100. * max(ne_data['ne']))
        ui.set_par(doublebeta1d_tied.rc2,
                   rcguess2,
                   min=10.,
                   max=max(ne_data['radius']))

    # fit model
    ui.fit()

    # fit statistics
    chisq = ui.get_fit_results().statval
    dof = ui.get_fit_results().dof
    rchisq = ui.get_fit_results().rstat

    # error analysis
    ui.set_conf_opt("max_rstat", 1e9)
    ui.conf()

    parvals = np.array(ui.get_conf_results().parvals)
    parmins = np.array(ui.get_conf_results().parmins)
    parmaxes = np.array(ui.get_conf_results().parmaxes)

    parnames = [
        str(x).split('.')[1] for x in list(ui.get_conf_results().parnames)
    ]

    # where errors are stuck on a hard limit, change error to Inf
    if None in list(parmins):
        ind = np.where(parmins == np.array(None))[0]
        parmins[ind] = float('Inf')

    if None in list(parmaxes):
        ind = np.where(parmaxes == np.array(None))[0]
        parmaxes[ind] = float('Inf')

    # set up a dictionary to contain useful results of fit
    nemodel = {}
    nemodel['type'] = nemodeltype
    nemodel['parnames'] = parnames
    nemodel['parvals'] = parvals
    nemodel['parmins'] = parmins
    nemodel['parmaxes'] = parmaxes
    nemodel['chisq'] = chisq
    nemodel['dof'] = dof
    nemodel['rchisq'] = rchisq

    # if tspec_data included, calculate value of ne model at the same radius
    # positions as temperature profile
    if tspec_data is not None:
        if nemodeltype == 'double_beta':
            nefit_arr = doublebetamodel(nemodel['parvals'],
                                        np.array(tspec_data['radius']))
            # [cm-3]

        if nemodeltype == 'single_beta':
            nefit_arr = betamodel(nemodel['parvals'],
                                  np.array(tspec_data['radius']))
            # [cm-3]

        if nemodeltype == 'cusped_beta':
            nefit_arr = cuspedbetamodel(nemodel['parvals'],
                                        np.array(tspec_data['radius']))
            # [cm-3]

        if nemodeltype == 'double_beta_tied':
            nefit_arr = doublebetamodel_tied(nemodel['parvals'],
                                             np.array(tspec_data['radius']))
            # [cm-3]

        nemodel['nefit'] = nefit_arr

    return nemodel
# coding: utf-8
import sherpa.ui as ui

ui.load_data("load_template_without_interpolation-bb_data.dat")
ui.load_template_model('bb1', "bb_index.dat", template_interpolator_name=None)
ui.set_source('bb1')
assert ui.get_source().is_discrete
ui.set_source('bb1*const1d.c1+gauss1d.g2**const1d.c2')
assert ui.get_source().is_discrete
Example #32
0
from matplotlib import pyplot as plt
import numpy as np

from sherpa import ui

x1 = np.arange(-5.0, 30, 0.5)

x2 = np.arange(1.0, 29.0, 0.2)

ui.load_arrays(1, x1, x1 * 0, ui.Data1D)
ui.load_arrays(2, x2, x2 * 0, ui.Data1D)

ui.set_source(1, ui.box1d.box)
box = ui.get_model_component('box')
ui.set_source(2, box)

box.xlow = 10.0
box.xhi = 20.0

# Copy all the objects just to make sure
g1 = ui.gauss1d('g1')
g1.fwhm = 3.0

g2 = ui.gauss1d('g2')
g2.fwhm = 3.0

ui.load_psf('psf1', g1)
ui.load_psf('psf2', g2)

ui.set_psf(1, 'psf1')
ui.set_psf(2, 'psf2')
# coding: utf-8
import sherpa.ui as ui

ui.load_data("load_template_without_interpolation-bb_data.dat")
ui.load_template_model('bb1', "bb_index.dat", template_interpolator_name=None)
ui.set_source('bb1')
ui.fit()