示例#1
0
def test_show_conf_basic(clean_ui):
    """Set up a very basic data/model/fit"""

    ui.load_arrays(1, [1, 2, 4], [3, 5, 5])
    ui.set_source(ui.scale1d.mdl)
    ui.fit()
    ui.conf()

    out = StringIO()
    ui.show_conf(outfile=out)
    got = out.getvalue().split('\n')

    assert len(got) == 12
    assert got[0] == "Confidence:Dataset               = 1"
    assert got[1] == "Confidence Method     = confidence"
    assert got[2] == "Iterative Fit Method  = None"
    assert got[3] == "Fitting Method        = levmar"
    assert got[4] == "Statistic             = chi2gehrels"
    assert got[5] == "confidence 1-sigma (68.2689%) bounds:"
    assert got[6] == "   Param            Best-Fit  Lower Bound  Upper Bound"
    assert got[7] == "   -----            --------  -----------  -----------"
    assert got[8] == "   mdl.c0            4.19798     -1.85955      1.85955"
    assert got[9] == ""
    assert got[10] == ""
    assert got[11] == ""
示例#2
0
def test_err_estimate_single_parameter(strings, idval, otherids, clean_ui):
    """Ensure we can fti a single parameter with conf/proj/covar.

    Since this uses the same logic we only test the conf routine;
    ideally we'd use all but that's harder to test.

    We use the same model as test_err_estimate_multi_ids but
    here we only want to evaluate the error for the mdl.c1 component.

    The fit and error analysis should be the same however the ordering
    is done.
    """

    # This is a bit ugly
    if strings:
        idval = str(idval)
        if type(otherids) == tuple:
            otherids = (str(otherids[0]), str(otherids[1]))
        else:
            otherids = [str(otherids[0]), str(otherids[1])]

    datasets = tuple([idval] + list(otherids))
    setup_err_estimate_multi_ids(strings=strings)
    ui.fit(idval, *otherids)

    # pick an odd ordering just to check we pick it up
    ui.conf(datasets[0], mdl.c1, datasets[1], datasets[2])
    res = ui.get_conf_results()

    assert res.datasets == datasets
    assert res.parnames == ("mdl.c1", )

    assert res.parmins == pytest.approx([ERR_EST_C1_MIN])
    assert res.parmaxes == pytest.approx([ERR_EST_C1_MAX])
示例#3
0
def test_show_all_basic(clean_ui):
    """Set up a very basic data/model/fit"""

    ui.load_arrays(1, [1, 2, 4], [3, 5, 5])
    ui.set_source(ui.scale1d.mdl)
    ui.fit()
    ui.conf()
    ui.proj()
    ui.covar()

    def get(value):
        out = StringIO()
        getattr(ui, f"show_{value}")(outfile=out)
        ans = out.getvalue()
        assert len(ans) > 1

        # trim the trailing "\n"
        return ans[:-1]

    # All we are really checking is that the show_all output is the
    # comppsite of the following. We are not checking that the
    # actual output makes sense for any command.
    #
    expected = get("data") + get("model") + get("fit") + get("conf") + \
        get("proj") + get("covar")

    got = get("all")

    assert expected == got
示例#4
0
def test_err_estimate_model(strings, idval, otherids, clean_ui):
    """Ensure we can use model with conf/proj/covar.

    This is test_err_estimate_multi_ids but

      - added an extra model to each source (that evaluates to 0)
      - we include the model expression in the call.

    The fit and error analysis should be the same however the ordering
    is done.
    """

    # This is a bit ugly
    if strings:
        idval = str(idval)
        if type(otherids) == tuple:
            otherids = (str(otherids[0]), str(otherids[1]))
        else:
            otherids = [str(otherids[0]), str(otherids[1])]

    datasets = tuple([idval] + list(otherids))

    setup_err_estimate_multi_ids(strings=strings)

    zero = ui.create_model_component("scale1d", "zero")
    zero.c0 = 0
    zero.c0.freeze()

    for id in datasets:
        # In this case we have
        #   orig == mdl
        # but let's be explicit in case the code changes
        #
        orig = ui.get_source(id)
        ui.set_source(id, orig + zero)

    ui.fit(idval, *otherids)

    res = ui.get_fit_results()
    assert res.datasets == datasets
    assert res.numpoints == 10
    assert res.statval == pytest.approx(3.379367979541458)
    assert ui.calc_stat() == pytest.approx(4255.615602052843)
    assert mdl.c0.val == pytest.approx(46.046607302070015)
    assert mdl.c1.val == pytest.approx(-1.9783953989993386)

    # I wanted to have zero.co thawed at this stage, but then we can not
    # use the ERR_EST_C0/1_xxx values as the fit has changed (and mdl.c0
    # and zero.c0 are degenerate to boot).
    #
    ui.conf(*datasets, mdl)
    res = ui.get_conf_results()

    assert res.datasets == datasets
    assert res.parnames == ("mdl.c0", "mdl.c1")

    assert res.parmins == pytest.approx([ERR_EST_C0_MIN, ERR_EST_C1_MIN])
    assert res.parmaxes == pytest.approx([ERR_EST_C0_MAX, ERR_EST_C1_MAX])
示例#5
0
 def tst_ui(self, thaw_c1):
     ui.load_arrays(1, self._x, self._y, self._e)
     ui.set_source(1, ui.polynom1d.mdl)
     if thaw_c1:
         ui.thaw(mdl.c1)
     ui.thaw(mdl.c2)
     mdl.c2 = 1
     ui.fit()
     if not thaw_c1:
         ui.thaw(mdl.c1)
         ui.fit()
     ui.conf()
     result = ui.get_conf_results()
     self.cmp_results(result)
示例#6
0
def test_err_estimate_multi_ids(strings, idval, otherids, clean_ui):
    """Ensure we can use multiple ids with conf/proj/covar.

    Since this uses the same logic we only test the conf routine;
    ideally we'd use all but that's harder to test.

    The fit and error analysis should be the same however the ordering
    is done.
    """

    # This is a bit ugly
    if strings:
        idval = str(idval)
        if type(otherids) == tuple:
            otherids = (str(otherids[0]), str(otherids[1]))
        else:
            otherids = [str(otherids[0]), str(otherids[1])]

    datasets = tuple([idval] + list(otherids))

    setup_err_estimate_multi_ids(strings=strings)
    ui.fit(idval, *otherids)

    # The "reduced statistic" is ~0.42 for the fit.
    #
    res = ui.get_fit_results()
    assert res.datasets == datasets
    assert res.numpoints == 10  # sum of datasets 1, 2, 3
    assert res.statval == pytest.approx(3.379367979541458)

    # since there's a model assigned to dataset not-used the
    # overall statistic is not the same as res.statval.
    #
    assert ui.calc_stat() == pytest.approx(4255.615602052843)

    assert mdl.c0.val == pytest.approx(46.046607302070015)
    assert mdl.c1.val == pytest.approx(-1.9783953989993386)

    ui.conf(*datasets)
    res = ui.get_conf_results()

    assert res.datasets == datasets
    assert res.parnames == ("mdl.c0", "mdl.c1")

    assert res.parmins == pytest.approx([ERR_EST_C0_MIN, ERR_EST_C1_MIN])
    assert res.parmaxes == pytest.approx([ERR_EST_C0_MAX, ERR_EST_C1_MAX])
示例#7
0
def tst_ui(thaw_c1, setUp, clean_ui):
    data, mdl = setUp

    ui.load_arrays(1, data.x, data.y, data.staterror)
    ui.set_source(1, ui.polynom1d.mdl)
    if thaw_c1:
        ui.thaw(mdl.c1)

    ui.thaw(mdl.c2)
    mdl.c2 = 1
    ui.fit()

    if not thaw_c1:
        ui.thaw(mdl.c1)
        ui.fit()

    ui.conf()
    result = ui.get_conf_results()
    cmp_results(result)
示例#8
0
def fitne(ne_data, nemodeltype, tspec_data=None):
    '''
    Fits gas number density profile according to selected profile model.
     The fit is performed using python sherpa with the Levenberg-Marquardt
     method of minimizing chi-squared .


    Args:
    -----
    ne_data (astropy table): observed gas density profile
      in the form established by set_prof_data()
    tspec_data (astropy table): observed temperature profile
      in the form established by set_prof_data()

    Returns:
    --------
    nemodel (dictionary): stores relevant information about the model gas
      density profile
        nemodel['type']: ne model type; one of the following:
          ['single_beta','cusped_beta','double_beta_tied','double_beta']
        nemodel['parnames']: names of the stored ne model parameters
        nemodel['parvals']: parameter values of fitted gas density model
        nemodel['parmins']: lower error bound on parvals
        nemodel['parmaxes']: upper error bound on parvals
        nemodel['chisq']: chi-squared of fit
        nemodel['dof']: degrees of freedom
        nemodel['rchisq']: reduced chi-squared of fit
        nemodel['nefit']: ne model values at radial values matching
          tspec_data (the observed temperature profile)

    References:
    -----------
    python sherpa:    https://github.com/sherpa/
    '''

    # remove any existing models and data
    ui.clean()

    # load data
    ui.load_arrays(1, np.array(ne_data['radius']), np.array(ne_data['ne']),
                   np.array(ne_data['ne_err']))

    # set guess and boundaries on params given selected model

    if nemodeltype == 'single_beta':

        # param estimate
        betaguess = 0.6
        rcguess = 20.  # units?????
        ne0guess = max(ne_data['ne'])

        # beta model
        ui.load_user_model(betamodel, "beta1d")
        ui.add_user_pars("beta1d", ["ne0", "rc", "beta"])
        ui.set_source(beta1d)  # creates model
        ui.set_full_model(beta1d)

        # set parameter values
        ui.set_par(beta1d.ne0, ne0guess, min=0, max=10. * max(ne_data['ne']))
        ui.set_par(beta1d.rc, rcguess, min=0.1, max=max(ne_data['radius']))
        ui.set_par(beta1d.beta, betaguess, min=0.1, max=1.)

    if nemodeltype == 'cusped_beta':

        # param estimate
        betaguess = 0.7
        rcguess = 5.  # [kpc]
        ne0guess = max(ne_data['ne'])
        alphaguess = 10.  # ????

        # beta model
        ui.load_user_model(cuspedbetamodel, "cuspedbeta1d")
        ui.add_user_pars("cuspedbeta1d", ["ne0", "rc", "beta", "alpha"])
        ui.set_source(cuspedbeta1d)  # creates model
        ui.set_full_model(cuspedbeta1d)

        # set parameter values
        ui.set_par(cuspedbeta1d.ne0,
                   ne0guess,
                   min=0.001 * max(ne_data['ne']),
                   max=10. * max(ne_data['ne']))
        ui.set_par(cuspedbeta1d.rc,
                   rcguess,
                   min=0.1,
                   max=max(ne_data['radius']))
        ui.set_par(cuspedbeta1d.beta, betaguess, min=0.1, max=1.)
        ui.set_par(cuspedbeta1d.alpha, alphaguess, min=0., max=100.)

    if nemodeltype == 'double_beta':

        # param estimate
        ne0guess1 = max(ne_data['ne'])  # [cm^-3]
        rcguess1 = 10.  # [kpc]
        betaguess1 = 0.6

        ne0guess2 = 0.01 * max(ne_data['ne'])  # [cm^-3]
        rcguess2 = 100.  # [kpc]
        betaguess2 = 0.6

        # double beta model
        ui.load_user_model(doublebetamodel, "doublebeta1d")
        ui.add_user_pars("doublebeta1d",
                         ["ne01", "rc1", "beta1", "ne02", "rc2", "beta2"])
        ui.set_source(doublebeta1d)  # creates model
        ui.set_full_model(doublebeta1d)

        # set parameter values
        ui.set_par(doublebeta1d.ne01,
                   ne0guess1,
                   min=0.0001 * max(ne_data['ne']),
                   max=100. * max(ne_data['ne']))
        ui.set_par(doublebeta1d.rc1,
                   rcguess1,
                   min=0.1,
                   max=max(ne_data['radius']))
        ui.set_par(doublebeta1d.beta1, betaguess1, min=0.1, max=1.)

        ui.set_par(doublebeta1d.ne02,
                   ne0guess2,
                   min=0.0001 * max(ne_data['ne']),
                   max=100. * max(ne_data['ne']))
        ui.set_par(doublebeta1d.rc2,
                   rcguess2,
                   min=10.,
                   max=max(ne_data['radius']))
        ui.set_par(doublebeta1d.beta2, betaguess2, min=0.1, max=1.)

    if nemodeltype == 'double_beta_tied':

        # param estimate
        ne0guess1 = max(ne_data['ne'])
        rcguess1 = 10.
        betaguess1 = 0.6

        ne0guess2 = 0.01 * max(ne_data['ne'])
        rcguess2 = 100.

        # double beta model
        ui.load_user_model(doublebetamodel_tied, "doublebeta1d_tied")
        ui.add_user_pars("doublebeta1d_tied",
                         ["ne01", "rc1", "beta1", "ne02", "rc2"])
        ui.set_source(doublebeta1d_tied)  # creates model
        ui.set_full_model(doublebeta1d_tied)

        # set parameter values
        ui.set_par(doublebeta1d_tied.ne01,
                   ne0guess1,
                   min=0.00001 * max(ne_data['ne']),
                   max=100. * max(ne_data['ne']))
        ui.set_par(doublebeta1d_tied.rc1,
                   rcguess1,
                   min=0.1,
                   max=max(ne_data['radius']))
        ui.set_par(doublebeta1d_tied.beta1, betaguess1, min=0.1, max=1.)

        ui.set_par(doublebeta1d_tied.ne02,
                   ne0guess2,
                   min=0.00001 * max(ne_data['ne']),
                   max=100. * max(ne_data['ne']))
        ui.set_par(doublebeta1d_tied.rc2,
                   rcguess2,
                   min=10.,
                   max=max(ne_data['radius']))

    # fit model
    ui.fit()

    # fit statistics
    chisq = ui.get_fit_results().statval
    dof = ui.get_fit_results().dof
    rchisq = ui.get_fit_results().rstat

    # error analysis
    ui.set_conf_opt("max_rstat", 1e9)
    ui.conf()

    parvals = np.array(ui.get_conf_results().parvals)
    parmins = np.array(ui.get_conf_results().parmins)
    parmaxes = np.array(ui.get_conf_results().parmaxes)

    parnames = [
        str(x).split('.')[1] for x in list(ui.get_conf_results().parnames)
    ]

    # where errors are stuck on a hard limit, change error to Inf
    if None in list(parmins):
        ind = np.where(parmins == np.array(None))[0]
        parmins[ind] = float('Inf')

    if None in list(parmaxes):
        ind = np.where(parmaxes == np.array(None))[0]
        parmaxes[ind] = float('Inf')

    # set up a dictionary to contain useful results of fit
    nemodel = {}
    nemodel['type'] = nemodeltype
    nemodel['parnames'] = parnames
    nemodel['parvals'] = parvals
    nemodel['parmins'] = parmins
    nemodel['parmaxes'] = parmaxes
    nemodel['chisq'] = chisq
    nemodel['dof'] = dof
    nemodel['rchisq'] = rchisq

    # if tspec_data included, calculate value of ne model at the same radius
    # positions as temperature profile
    if tspec_data is not None:
        if nemodeltype == 'double_beta':
            nefit_arr = doublebetamodel(nemodel['parvals'],
                                        np.array(tspec_data['radius']))
            # [cm-3]

        if nemodeltype == 'single_beta':
            nefit_arr = betamodel(nemodel['parvals'],
                                  np.array(tspec_data['radius']))
            # [cm-3]

        if nemodeltype == 'cusped_beta':
            nefit_arr = cuspedbetamodel(nemodel['parvals'],
                                        np.array(tspec_data['radius']))
            # [cm-3]

        if nemodeltype == 'double_beta_tied':
            nefit_arr = doublebetamodel_tied(nemodel['parvals'],
                                             np.array(tspec_data['radius']))
            # [cm-3]

        nemodel['nefit'] = nefit_arr

    return nemodel
示例#9
0
def fit_evol(dateglob='20?????', rootdir='darkhist_peaknorm', outroot='', xmin=25.0, xmax=4000,
             conf=True, gauss=False):
    results = {}
    fileglob = os.path.join(rootdir, '{}.dat'.format(dateglob))

    for i, filename in enumerate(glob(fileglob)):
        filedate = re.search(r'(\d{7})', filename).group(1)
        print "\n\n*************** {} *****************".format(filename)
        plt.figure(1)
        ui.load_data(1, filename, 2)
        data = ui.get_data()
        ui.ignore(None, xmin)
        ui.ignore(xmax, None)

        dark_models.xall = data.x
        # dark_models.imin = np.where(xall > xmin)[0][0]
        # dark_models.imax = np.where(xall > xmax)[0][0]

        sbp.gamma1 = 0.05
        sbp.gamma2 = 3.15
        sbp.gamma2.min = 2.
        sbp.gamma2.max = 4.
        sbp.x_b = 130.
        sbp.x_b.min = 100.
        sbp.x_b.max = 160.
        sbp.x_r = 50.
        ok = (data.x > 40) & (data.x < 60)
        sbp.ampl1 = np.mean(data.y[ok])

        if gauss:
            fit_gauss_sbp()
        else:
            fit_sbp()

        pars = (sbp.gamma1.val, sbp.gamma2.val, sbp.x_b.val, sbp.x_r.val, sbp.ampl1.val)
        fit_y = dark_models.smooth_broken_pow(pars, data.x)

        if conf:
            ui.set_conf_opt('numcores', 1)
            ui.conf()
            res = ui.get_conf_results()
            result = dict((x, getattr(res, x))
                          for x in ('parnames', 'parmins', 'parvals', 'parmaxes'))
            result['x'] = data.x
            result['y'] = data.y
            result['y_fit'] = fit_y
            results[filedate] = result

        if outroot is not None:
            ui.notice(0, xmax)
            ui.set_xlog()
            ui.set_ylog()
            ui.plot_fit()
            plt.xlim(1, 1e4)
            plt.ylim(0.5, 1e5)
            plt.grid(True)
            plt.xlabel('Dark current (e-/sec)')
            outfile = os.path.join(rootdir, '{}{}.png'.format(outroot, filedate))
            print 'Writing', outfile
            plt.savefig(outfile)

            if conf:
                outfile = os.path.join(rootdir, '{}{}.pkl'.format(outroot, filedate))
                print 'Writing', outfile
                pickle.dump(result, open(outfile, 'w'), protocol=-1)

    if outroot is not None:
        outfile = os.path.join(rootdir, '{}fits.pkl'.format(outroot))
        print 'Writing', outfile
        pickle.dump(results, open(outfile, 'w'), protocol=-1)

    return results