Esempio n. 1
0
 def test_load_template_with_interpolation(self):
     self.run_thread('load_template_with_interpolation')
     try:   
             self.assertEqualWithinTol(2023.46, ui.get_fit_results().parvals[0], 0.001)
             self.assertEqualWithinTol(2743.47, ui.get_fit_results().parvals[1], 0.001)
     except:
             self.assertEqualWithinTol(2743.47, ui.get_fit_results().parvals[0], 0.001)
             self.assertEqualWithinTol(2023.46, ui.get_fit_results().parvals[1], 0.001)
Esempio n. 2
0
 def test_load_template_with_interpolation(self):
     self.run_thread('load_template_with_interpolation')
     try:   
         self.assertEqualWithinTol(2023.46, ui.get_fit_results().parvals[0], 0.001)
         self.assertEqualWithinTol(2743.47, ui.get_fit_results().parvals[1], 0.001)
     except:
         self.assertEqualWithinTol(2743.47, ui.get_fit_results().parvals[0], 0.001)
         self.assertEqualWithinTol(2023.46, ui.get_fit_results().parvals[1], 0.001)
Esempio n. 3
0
def fit_pix_values(t_ccd, esec, id=1):
    logger = logging.getLogger("sherpa")
    logger.setLevel(logging.WARN)
    data_id = id
    ui.clean()
    ui.set_method('simplex')
    ui.load_user_model(dark_scale_model, 'model')
    ui.add_user_pars('model', ['scale', 'dark_t_ref'])
    ui.set_model(data_id, 'model')
    ui.load_arrays(
        data_id,
        np.array(t_ccd),
        np.array(esec),
    )
    ui.set_staterror(data_id, 30 * np.ones(len(t_ccd)))
    model.scale.val = 0.588
    model.scale.min = 0.3
    model.scale.max = 1.0
    model.dark_t_ref.val = 500
    ui.freeze(model.scale)
    # If more than 5 degrees in the temperature range,
    # thaw and fit for model.scale.  Else just use/return
    # the fit of dark_t_ref
    if np.max(t_ccd) - np.min(t_ccd) > 2:
        # Fit first for dark_t_ref
        ui.fit(data_id)
        ui.thaw(model.scale)
    ui.fit(data_id)
    return ui.get_fit_results(), ui.get_model(data_id)
Esempio n. 4
0
def test_err_estimate_model(strings, idval, otherids, clean_ui):
    """Ensure we can use model with conf/proj/covar.

    This is test_err_estimate_multi_ids but

      - added an extra model to each source (that evaluates to 0)
      - we include the model expression in the call.

    The fit and error analysis should be the same however the ordering
    is done.
    """

    # This is a bit ugly
    if strings:
        idval = str(idval)
        if type(otherids) == tuple:
            otherids = (str(otherids[0]), str(otherids[1]))
        else:
            otherids = [str(otherids[0]), str(otherids[1])]

    datasets = tuple([idval] + list(otherids))

    setup_err_estimate_multi_ids(strings=strings)

    zero = ui.create_model_component("scale1d", "zero")
    zero.c0 = 0
    zero.c0.freeze()

    for id in datasets:
        # In this case we have
        #   orig == mdl
        # but let's be explicit in case the code changes
        #
        orig = ui.get_source(id)
        ui.set_source(id, orig + zero)

    ui.fit(idval, *otherids)

    res = ui.get_fit_results()
    assert res.datasets == datasets
    assert res.numpoints == 10
    assert res.statval == pytest.approx(3.379367979541458)
    assert ui.calc_stat() == pytest.approx(4255.615602052843)
    assert mdl.c0.val == pytest.approx(46.046607302070015)
    assert mdl.c1.val == pytest.approx(-1.9783953989993386)

    # I wanted to have zero.co thawed at this stage, but then we can not
    # use the ERR_EST_C0/1_xxx values as the fit has changed (and mdl.c0
    # and zero.c0 are degenerate to boot).
    #
    ui.conf(*datasets, mdl)
    res = ui.get_conf_results()

    assert res.datasets == datasets
    assert res.parnames == ("mdl.c0", "mdl.c1")

    assert res.parmins == pytest.approx([ERR_EST_C0_MIN, ERR_EST_C1_MIN])
    assert res.parmaxes == pytest.approx([ERR_EST_C0_MAX, ERR_EST_C1_MAX])
Esempio n. 5
0
def test_load_template_with_interpolation(run_thread, clean_ui):
    run_thread('load_template_with_interpolation')
    pvals = ui.get_fit_results().parvals
    pmin = pvals[0]
    pmax = pvals[1]
    if pmax < pmin:
        (pmin, pmax) = (pmax, pmin)

    tol = 0.001
    assert pmin == pytest.approx(2023.46, rel=tol)
    assert pmax == pytest.approx(2743.47, rel=tol)
Esempio n. 6
0
    def test_load_template_with_interpolation(self):
        self.run_thread('load_template_with_interpolation')
        pvals = ui.get_fit_results().parvals
        pmin = pvals[0]
        pmax = pvals[1]
        if pmax < pmin:
            (pmin, pmax) = (pmax, pmin)

        tol = 0.001
        self.assertEqualWithinTol(2023.46, pmin, tol)
        self.assertEqualWithinTol(2743.47, pmax, tol)
Esempio n. 7
0
def test_user_model1d_fit():
    """Check can use in a fit."""

    mname = "test_model"
    ui.load_user_model(um_line, mname)
    ui.add_user_pars(mname, ["slope", "intercept"],
                     parvals = [1.0, 1.0])

    mdl = ui.get_model_component(mname)

    x = numpy.asarray([-2.4, 2.3, 5.4, 8.7, 12.3])

    # Set up the data to be scattered around y = -0.2 x + 2.8
    # Pick the deltas so that they sum to 0 (except for central
    # point)
    #
    slope = -0.2
    intercept = 2.8

    dy = numpy.asarray([0.1, -0.2, 0.14, -0.1, 0.2])
    ydata = x * slope + intercept + dy

    ui.load_arrays(1, x, ydata)

    ui.set_source(mname)
    ui.ignore(5.0, 6.0)  # drop the central bin

    ui.set_stat('leastsq')
    ui.set_method('simplex')
    ui.fit()

    fres = ui.get_fit_results()
    assert fres.succeeded
    assert fres.parnames == ('test_model.slope', 'test_model.intercept')
    assert fres.numpoints == 4
    assert fres.dof == 2

    # Tolerance has been adjusted to get the tests to pass on my
    # machine. It's really just to check that the values have chanegd
    # from their default values.
    #
    assert fres.parvals[0] == pytest.approx(slope, abs=0.01)
    assert fres.parvals[1] == pytest.approx(intercept, abs=0.05)

    # Thse should be the same values, so no need to use pytest.approx
    # (unless there's some internal translation between types done
    # somewhere?).
    #
    assert mdl.slope.val == fres.parvals[0]
    assert mdl.intercept.val == fres.parvals[1]
Esempio n. 8
0
def test_user_model1d_fit():
    """Check can use in a fit."""

    mname = "test_model"
    ui.load_user_model(um_line, mname)
    ui.add_user_pars(mname, ["slope", "intercept"],
                     parvals = [1.0, 1.0])

    mdl = ui.get_model_component(mname)

    x = numpy.asarray([-2.4, 2.3, 5.4, 8.7, 12.3])

    # Set up the data to be scattered around y = -0.2 x + 2.8
    # Pick the deltas so that they sum to 0 (except for central
    # point)
    #
    slope = -0.2
    intercept = 2.8

    dy = numpy.asarray([0.1, -0.2, 0.14, -0.1, 0.2])
    ydata = x * slope + intercept + dy

    ui.load_arrays(1, x, ydata)

    ui.set_source(mname)
    ui.ignore(5.0, 6.0)  # drop the central bin

    ui.set_stat('leastsq')
    ui.set_method('simplex')
    ui.fit()

    fres = ui.get_fit_results()
    assert fres.succeeded
    assert fres.parnames == ('test_model.slope', 'test_model.intercept')
    assert fres.numpoints == 4
    assert fres.dof == 2

    # Tolerance has been adjusted to get the tests to pass on my
    # machine. It's really just to check that the values have chanegd
    # from their default values.
    #
    assert fres.parvals[0] == pytest.approx(slope, abs=0.01)
    assert fres.parvals[1] == pytest.approx(intercept, abs=0.05)

    # Thse should be the same values, so no need to use pytest.approx
    # (unless there's some internal translation between types done
    # somewhere?).
    #
    assert mdl.slope.val == fres.parvals[0]
    assert mdl.intercept.val == fres.parvals[1]
Esempio n. 9
0
def _fit_poly(fit_data, evt_times, degree, data_id=0):
    """
    Given event data transformed into Y or Z angle positions, and a degree of the desired
    fit polynomial, fit a polynomial to the data.

    :param fit_data: event y or z angle position data
    :param evt_times: times of event/fit_data
    :param degree: degree of polynomial to use for the fit model
    :param data_id: sherpa dataset id to use for the fit

    :returns: (sherpa model plot, sherpa model)
    """
    # Set initial value for fit data position error
    init_error = 1

    ui.clean()
    ui.load_arrays(data_id, evt_times - evt_times[0], fit_data,
                   np.zeros_like(fit_data) + init_error)
    v2("Fitting a line to the data to get reduced stat errors")
    # First just fit a line to get reduced errors on this set
    ui.polynom1d.line
    ui.set_model(data_id, 'line')
    ui.thaw('line.c1')
    ui.fit(data_id)
    fit = ui.get_fit_results()
    calc_error = init_error * np.sqrt(fit.rstat)
    ui.set_staterror(data_id, calc_error)
    # Then fit the specified model
    v2("Fitting a polynomial of degree {} to the data".format(degree))
    ui.polynom1d.fitpoly
    ui.freeze('fitpoly')
    # Thaw the coefficients requested by the degree of the desired polynomial
    ui.thaw('fitpoly.c0')
    fitpoly.c0.val = 0
    for deg in range(1, 1 + degree):
        ui.thaw("fitpoly.c{}".format(deg))
    ui.set_model(data_id, 'fitpoly')
    ui.fit(data_id)
    # Let's screw up Y on purpose
    if data_id == 0:
        fitpoly.c0.val = 0
        fitpoly.c1.val = 7.5e-05
        fitpoly.c2.val = -1.0e-09
        fitpoly.c3.val = 0
        fitpoly.c4.val = 0
    mp = ui.get_model_plot(data_id)
    model = ui.get_model(data_id)
    return mp, model
Esempio n. 10
0
def test_err_estimate_multi_ids(strings, idval, otherids, clean_ui):
    """Ensure we can use multiple ids with conf/proj/covar.

    Since this uses the same logic we only test the conf routine;
    ideally we'd use all but that's harder to test.

    The fit and error analysis should be the same however the ordering
    is done.
    """

    # This is a bit ugly
    if strings:
        idval = str(idval)
        if type(otherids) == tuple:
            otherids = (str(otherids[0]), str(otherids[1]))
        else:
            otherids = [str(otherids[0]), str(otherids[1])]

    datasets = tuple([idval] + list(otherids))

    setup_err_estimate_multi_ids(strings=strings)
    ui.fit(idval, *otherids)

    # The "reduced statistic" is ~0.42 for the fit.
    #
    res = ui.get_fit_results()
    assert res.datasets == datasets
    assert res.numpoints == 10  # sum of datasets 1, 2, 3
    assert res.statval == pytest.approx(3.379367979541458)

    # since there's a model assigned to dataset not-used the
    # overall statistic is not the same as res.statval.
    #
    assert ui.calc_stat() == pytest.approx(4255.615602052843)

    assert mdl.c0.val == pytest.approx(46.046607302070015)
    assert mdl.c1.val == pytest.approx(-1.9783953989993386)

    ui.conf(*datasets)
    res = ui.get_conf_results()

    assert res.datasets == datasets
    assert res.parnames == ("mdl.c0", "mdl.c1")

    assert res.parmins == pytest.approx([ERR_EST_C0_MIN, ERR_EST_C1_MIN])
    assert res.parmaxes == pytest.approx([ERR_EST_C0_MAX, ERR_EST_C1_MAX])
def fit_pix_values(t_ccd, esec, id=1):
    logger = logging.getLogger("sherpa")
    logger.setLevel(logging.WARN)
    data_id = id
    ui.clean()
    ui.set_method("simplex")
    ui.load_user_model(dark_scale_model, "model")
    ui.add_user_pars("model", ["scale", "dark_t_ref"])
    ui.set_model(data_id, "model")
    ui.load_arrays(data_id, np.array(t_ccd), np.array(esec), 0.1 * np.ones(len(t_ccd)))
    model.scale.val = 0.70
    model.dark_t_ref.val = 500
    ui.freeze(model.scale)
    # If more than 5 degrees in the temperature range,
    # thaw and fit for model.scale.  Else just use/return
    # the fit of dark_t_ref
    ui.fit(data_id)
    ui.thaw(model.scale)
    ui.fit(data_id)
    return ui.get_fit_results(), ui.get_model(data_id)
def _fit_poly(fit_data, evt_times, degree, data_id=0):
    """
    Given event data transformed into Y or Z angle positions, and a degree of the desired
    fit polynomial, fit a polynomial to the data.

    :param fit_data: event y or z angle position data
    :param evt_times: times of event/fit_data
    :param degree: degree of polynomial to use for the fit model
    :param data_id: sherpa dataset id to use for the fit

    :returns: (sherpa model plot, sherpa model)
    """
    # Set initial value for fit data position error
    init_error = 1

    ui.clean()
    ui.load_arrays(data_id, evt_times - evt_times[0], fit_data,
                   np.zeros_like(fit_data) + init_error)
    v2("Fitting a line to the data to get reduced stat errors")
    # First just fit a line to get reduced errors on this set
    ui.polynom1d.line
    ui.set_model(data_id, 'line')
    ui.thaw('line.c1')
    ui.fit(data_id)
    fit = ui.get_fit_results()
    calc_error = init_error * np.sqrt(fit.rstat)
    ui.set_staterror(data_id, calc_error)
    # Then fit the specified model
    v2("Fitting a polynomial of degree {} to the data".format(degree))
    ui.polynom1d.fitpoly
    ui.freeze('fitpoly')
    # Thaw the coefficients requested by the degree of the desired polynomial
    ui.thaw('fitpoly.c0')
    fitpoly.c0.val = 0
    for deg in range(1, 1 + degree):
        ui.thaw("fitpoly.c{}".format(deg))
    ui.set_model(data_id, 'fitpoly')
    ui.fit(data_id)
    mp = ui.get_model_plot(data_id)
    model = ui.get_model(data_id)
    return mp, model
Esempio n. 13
0
def mwl_fit_high_level():
    """Use high-level Sherpa API.

    High-level = session and convenience functions

    Example: http://cxc.harvard.edu/sherpa/threads/simultaneous/
    Example: http://python4astronomers.github.io/fitting/spectrum.html
    """
    import sherpa.ui as ui

    fermi_data = FermiData()
    ui.load_arrays(fermi_data.name, fermi_data.x, fermi_data.y,
                   fermi_data.staterror)

    ui.load_user_stat('fermi_stat', FermiStat.calc_stat,
                      FermiStat.calc_staterror)
    # TODO: is there a good way to get the stat??
    # ui.get_stat('fermi_stat')
    # fermi_stat = ui._session._get_stat_by_name('fermi_stat')
    ui.set_stat(fermi_stat)
    # IPython.embed()

    iact_data = IACTData()
    ui.load_arrays(iact_data.name, iact_data.x, iact_data.y,
                   iact_data.staterror)

    spec_model = ui.logparabola.spec_model
    spec_model.c1 = 0.5
    spec_model.c2 = 0.2
    spec_model.ampl = 5e-11

    ui.set_source(fermi_data.name, spec_model)
    ui.set_source(iact_data.name, spec_model)

    ui.notice(lo=1e-3, hi=None)

    # IPython.embed()
    ui.fit()

    return dict(results=ui.get_fit_results(), model=spec_model)
Esempio n. 14
0
def mwl_fit_high_level():
    """Use high-level Sherpa API.

    High-level = session and convenience functions

    Example: http://cxc.harvard.edu/sherpa/threads/simultaneous/
    Example: http://python4astronomers.github.io/fitting/spectrum.html
    """
    import sherpa.ui as ui

    fermi_data = FermiData()
    ui.load_arrays(fermi_data.name, fermi_data.x, fermi_data.y, fermi_data.staterror)

    ui.load_user_stat('fermi_stat', FermiStat.calc_stat, FermiStat.calc_staterror)
    # TODO: is there a good way to get the stat??
    # ui.get_stat('fermi_stat')
    # fermi_stat = ui._session._get_stat_by_name('fermi_stat')
    ui.set_stat(fermi_stat)
    # IPython.embed()


    iact_data = IACTData()
    ui.load_arrays(iact_data.name, iact_data.x, iact_data.y, iact_data.staterror)

    spec_model = ui.logparabola.spec_model
    spec_model.c1 = 0.5
    spec_model.c2 = 0.2
    spec_model.ampl = 5e-11

    ui.set_source(fermi_data.name, spec_model)
    ui.set_source(iact_data.name, spec_model)

    ui.notice(lo=1e-3, hi=None)

    # IPython.embed()
    ui.fit()

    return Bunch(results=ui.get_fit_results(), model=spec_model)
Esempio n. 15
0
def run_fits(obsids, ax, user_pars=None,
             fixed_pars=None, guess_pars=None, label='model',
             per_obs_dir='per_obs_nfits',
             outdir=None, redo=False):

    if len(obsids) == 0:
        print "No obsids, nothing to fit"
        return None
    if user_pars is None:
        user_pars = USER_PARS

    if not os.path.exists(per_obs_dir):
        os.makedirs(per_obs_dir)

    obsfits = []
    for obsid in obsids:

        outdir = os.path.join(per_obs_dir, 'obs{:05d}'.format(obsid))
        if not os.path.exists(outdir):
            os.makedirs(outdir)

        model_file = os.path.join(outdir, '{}.pkl'.format(label))
        if os.path.exists(model_file) and not redo:
            #logger.warn('Using previous fit found in %s' % model_file)
            print model_file
            mod_pick = open(model_file, 'r')
            modelfit = cPickle.load( mod_pick )
            mod_pick.close()
            obsfits.append(modelfit)
            continue

        modelfit = {'label': obsid}

        ui.clean()
        data_id = 0
        obsdir = "%s/obs%05d" % (DATADIR, obsid)
        tf = open(os.path.join(obsdir,'tilt.pkl'), 'r')
        tilt = cPickle.load(tf)
        tf.close()
        pf = open(os.path.join(obsdir, 'pos.pkl'), 'r')
        pos = cPickle.load(pf)
        pf.close()

        pos_data = pos[ax]
        point_error = 5
        pos_data_mean = np.mean(pos_data)
        ui.set_method('simplex')

        # Fit a line to get more reasonable errors
        init_staterror = np.zeros(len(pos_data))+point_error
        ui.load_arrays(data_id,
                       pos['time']-pos['time'][0],
                       pos_data-np.mean(pos_data),
                       init_staterror)
        ui.polynom1d.ypoly
        ui.set_model(data_id, 'ypoly')
        ui.thaw(ypoly.c0, ypoly.c1)
        ui.fit(data_id)
        fit = ui.get_fit_results()
        calc_staterror = init_staterror * np.sqrt(fit.rstat)
        ui.set_staterror(data_id, calc_staterror)
        # Confirm those errors
        ui.fit(data_id)
        fit = ui.get_fit_results()
        if ( abs(fit.rstat-1) > .2):
            raise ValueError('Reduced statistic not close to 1 for error calc')

        # Load up data to do the real model fit
        fit_times = pos['time']
        tm_func = tilt_model(tilt,
                             fit_times,
                             user_pars=user_pars)

        ui.get_data(data_id).name = str(obsid)
        ui.load_user_model(tm_func, 'tiltm%d' % data_id)
        ui.add_user_pars('tiltm%d' % data_id, user_pars)
        ui.set_method('simplex')
        ui.set_model(data_id, 'tiltm%d' % (data_id))
        ui.set_par('tiltm%d.diam' % data_id, 0)

        if fixed_pars is not None and ax in fixed_pars:
            for par in fixed_pars[ax]:
                ui.set_par('tiltm{}.{}'.format(0, par), fixed_pars[ax][par])
                ui.freeze('tiltm{}.{}'.format(0, par))

        if guess_pars is not None and ax in guess_pars:
            for par in guess_pars[ax]:
                ui.set_par('tiltm{}.{}'.format(0, par), guess_pars[ax][par])

        ui.show_all()
        # Fit the tilt model
        ui.fit(data_id)
        fitres = ui.get_fit_results()
        ui.confidence(data_id)
        myconf = ui.get_confidence_results()

#        save_fits(ax=ax, fit=fitres, conf=myconf, outdir=outdir)
#        plot_fits(ids,outdir=os.path.join(outdir,'fit_plots'))

        axmod = dict(fit=fitres, conf=myconf)
        for idx, modpar in enumerate(myconf.parnames):
            par = modpar.lstrip('tiltm0.')
            axmod[par] = ui.get_par('tiltm0.%s' % par).val
            axmod["{}_parmax".format(par)] = myconf.parmaxes[idx]
            axmod["{}_parmin".format(par)] = myconf.parmins[idx]
        modelfit[ax] = axmod

        mod_pick = open(model_file, 'w')
        cPickle.dump( modelfit, mod_pick)
        mod_pick.close()

        obsfits.append(modelfit)

        plot_fits([dict(obsid=obsid, data_id=data_id, ax=ax)],
                  posdir=obsdir,
                  outdir=outdir)


    return obsfits
Esempio n. 16
0
 def test_load_template_interpolator(self):
     self.run_thread('load_template_interpolator')
     pval = ui.get_fit_results().parvals[0]
     self.assertEqualWithinTol(2743.91, pval, 0.001)
Esempio n. 17
0
def test_load_template_interpolator(run_thread, clean_ui):
    run_thread('load_template_interpolator')
    pval = ui.get_fit_results().parvals[0]
    assert pval == pytest.approx(2743.91, rel=0.001)
Esempio n. 18
0
def run_fits(obsids,
             ax,
             user_pars=None,
             fixed_pars=None,
             guess_pars=None,
             label='model',
             per_obs_dir='per_obs_nfits',
             outdir=None,
             redo=False):

    if len(obsids) == 0:
        print "No obsids, nothing to fit"
        return None
    if user_pars is None:
        user_pars = USER_PARS

    if not os.path.exists(per_obs_dir):
        os.makedirs(per_obs_dir)

    obsfits = []
    for obsid in obsids:

        outdir = os.path.join(per_obs_dir, 'obs{:05d}'.format(obsid))
        if not os.path.exists(outdir):
            os.makedirs(outdir)

        model_file = os.path.join(outdir, '{}.pkl'.format(label))
        if os.path.exists(model_file) and not redo:
            #logger.warn('Using previous fit found in %s' % model_file)
            print model_file
            mod_pick = open(model_file, 'r')
            modelfit = cPickle.load(mod_pick)
            mod_pick.close()
            obsfits.append(modelfit)
            continue

        modelfit = {'label': obsid}

        ui.clean()
        data_id = 0
        obsdir = "%s/obs%05d" % (DATADIR, obsid)
        tf = open(os.path.join(obsdir, 'tilt.pkl'), 'r')
        tilt = cPickle.load(tf)
        tf.close()
        pf = open(os.path.join(obsdir, 'pos.pkl'), 'r')
        pos = cPickle.load(pf)
        pf.close()

        pos_data = pos[ax]
        point_error = 5
        pos_data_mean = np.mean(pos_data)
        ui.set_method('simplex')

        # Fit a line to get more reasonable errors
        init_staterror = np.zeros(len(pos_data)) + point_error
        ui.load_arrays(data_id, pos['time'] - pos['time'][0],
                       pos_data - np.mean(pos_data), init_staterror)
        ui.polynom1d.ypoly
        ui.set_model(data_id, 'ypoly')
        ui.thaw(ypoly.c0, ypoly.c1)
        ui.fit(data_id)
        fit = ui.get_fit_results()
        calc_staterror = init_staterror * np.sqrt(fit.rstat)
        ui.set_staterror(data_id, calc_staterror)
        # Confirm those errors
        ui.fit(data_id)
        fit = ui.get_fit_results()
        if (abs(fit.rstat - 1) > .2):
            raise ValueError('Reduced statistic not close to 1 for error calc')

        # Load up data to do the real model fit
        fit_times = pos['time']
        tm_func = tilt_model(tilt, fit_times, user_pars=user_pars)

        ui.get_data(data_id).name = str(obsid)
        ui.load_user_model(tm_func, 'tiltm%d' % data_id)
        ui.add_user_pars('tiltm%d' % data_id, user_pars)
        ui.set_method('simplex')
        ui.set_model(data_id, 'tiltm%d' % (data_id))
        ui.set_par('tiltm%d.diam' % data_id, 0)

        if fixed_pars is not None and ax in fixed_pars:
            for par in fixed_pars[ax]:
                ui.set_par('tiltm{}.{}'.format(0, par), fixed_pars[ax][par])
                ui.freeze('tiltm{}.{}'.format(0, par))

        if guess_pars is not None and ax in guess_pars:
            for par in guess_pars[ax]:
                ui.set_par('tiltm{}.{}'.format(0, par), guess_pars[ax][par])

        ui.show_all()
        # Fit the tilt model
        ui.fit(data_id)
        fitres = ui.get_fit_results()
        ui.confidence(data_id)
        myconf = ui.get_confidence_results()

        #        save_fits(ax=ax, fit=fitres, conf=myconf, outdir=outdir)
        #        plot_fits(ids,outdir=os.path.join(outdir,'fit_plots'))

        axmod = dict(fit=fitres, conf=myconf)
        for idx, modpar in enumerate(myconf.parnames):
            par = modpar.lstrip('tiltm0.')
            axmod[par] = ui.get_par('tiltm0.%s' % par).val
            axmod["{}_parmax".format(par)] = myconf.parmaxes[idx]
            axmod["{}_parmin".format(par)] = myconf.parmins[idx]
        modelfit[ax] = axmod

        mod_pick = open(model_file, 'w')
        cPickle.dump(modelfit, mod_pick)
        mod_pick.close()

        obsfits.append(modelfit)

        plot_fits([dict(obsid=obsid, data_id=data_id, ax=ax)],
                  posdir=obsdir,
                  outdir=outdir)

    return obsfits
Esempio n. 19
0
    fmod.b.min = 0
    fmod.b.max = 1
    fmod.m.min = 0
    fmod.m.max = 0.5
    fmod.b.val=1e-7


    ui.load_user_stat("loglike", llh, my_err)
    ui.set_stat(loglike)
    # the tricky part here is that the "model" is the probability polynomial
    # we've defined evaluated at the data x values.
    # the model and the data are passed to the user stat/ llh
    # function as it is minimized.
    ui.fit(data_id)
    myfit = ui.get_fit_results()
    #axplot[ftype] = ui.get_model_plot(data_id)
    if myfit.succeeded:
        import pickle
        pickle.dump(myfit, open('%s_fitfile.pkl' % ftype, 'w'))

        rep_file = open('%s_fitfile.json' % ftype, 'w')
        rep_file.write(json.dumps(dict(time0=trend_start,
                                       datestop=trend_date_stop,
                                       datestart=trend_date_start,
                                       data=ftype,
                                       bin='unbinned_likelihood',
                                       m=fmod.m.val,
                                       b=fmod.b.val,
                                       comment="mx+b with b at time0 and m = (delta rate)/year"),
                                      sort_keys=True,
Esempio n. 20
0
    def test_fit_template(self):
	self.run_thread('fit_template')
        self.assertEquals(2750, ui.get_fit_results().parvals[0])
Esempio n. 21
0
 #ui.load_user_stat("chi2custom", my_chi2, my_err)
 #ui.set_stat(chi2custom)
 ui.load_user_model(scaled_warm_frac, 'model')
 ui.add_user_pars('model', ['scale', 'offset'])
 ui.set_model(data_id, 'model')
 ui.load_arrays(data_id,
                np.array(times),
                np.array(bad_frac))
 fmod = ui.get_model_component('model')
 fmod.scale.min = 1e-9
 fmod.offset.val = 0
 ui.freeze(fmod.offset)
 max_err = np.max([err_high, err_low], axis=0)
 ui.set_staterror(data_id, max_err)
 ui.fit(data_id)
 f = ui.get_fit_results()
 scale = f.rstat ** .5
 ui.set_staterror(data_id, max_err * scale)
 ui.fit()
 f = ui.get_fit_results()
 if f.rstat > 3:
     raise ValueError
 ui.confidence()
 conf = ui.get_confidence_results()
 fit_info[range_type][mag][ftype][limit] = dict(fit=str(f),
                                                conf=str(conf),
                                                fmod=fmod,
                                                fit_orig=f,
                                                conf_orig=conf,
                                                mag_mean=np.mean(data[range_type][mag][ok]['mag_mean']))
 fig = plt.figure(figsize=(5,3))
Esempio n. 22
0
def fitne(ne_data, nemodeltype, tspec_data=None):
    '''
    Fits gas number density profile according to selected profile model.
     The fit is performed using python sherpa with the Levenberg-Marquardt
     method of minimizing chi-squared .


    Args:
    -----
    ne_data (astropy table): observed gas density profile
      in the form established by set_prof_data()
    tspec_data (astropy table): observed temperature profile
      in the form established by set_prof_data()

    Returns:
    --------
    nemodel (dictionary): stores relevant information about the model gas
      density profile
        nemodel['type']: ne model type; one of the following:
          ['single_beta','cusped_beta','double_beta_tied','double_beta']
        nemodel['parnames']: names of the stored ne model parameters
        nemodel['parvals']: parameter values of fitted gas density model
        nemodel['parmins']: lower error bound on parvals
        nemodel['parmaxes']: upper error bound on parvals
        nemodel['chisq']: chi-squared of fit
        nemodel['dof']: degrees of freedom
        nemodel['rchisq']: reduced chi-squared of fit
        nemodel['nefit']: ne model values at radial values matching
          tspec_data (the observed temperature profile)

    References:
    -----------
    python sherpa:    https://github.com/sherpa/
    '''

    # remove any existing models and data
    ui.clean()

    # load data
    ui.load_arrays(1, np.array(ne_data['radius']), np.array(ne_data['ne']),
                   np.array(ne_data['ne_err']))

    # set guess and boundaries on params given selected model

    if nemodeltype == 'single_beta':

        # param estimate
        betaguess = 0.6
        rcguess = 20.  # units?????
        ne0guess = max(ne_data['ne'])

        # beta model
        ui.load_user_model(betamodel, "beta1d")
        ui.add_user_pars("beta1d", ["ne0", "rc", "beta"])
        ui.set_source(beta1d)  # creates model
        ui.set_full_model(beta1d)

        # set parameter values
        ui.set_par(beta1d.ne0, ne0guess, min=0, max=10. * max(ne_data['ne']))
        ui.set_par(beta1d.rc, rcguess, min=0.1, max=max(ne_data['radius']))
        ui.set_par(beta1d.beta, betaguess, min=0.1, max=1.)

    if nemodeltype == 'cusped_beta':

        # param estimate
        betaguess = 0.7
        rcguess = 5.  # [kpc]
        ne0guess = max(ne_data['ne'])
        alphaguess = 10.  # ????

        # beta model
        ui.load_user_model(cuspedbetamodel, "cuspedbeta1d")
        ui.add_user_pars("cuspedbeta1d", ["ne0", "rc", "beta", "alpha"])
        ui.set_source(cuspedbeta1d)  # creates model
        ui.set_full_model(cuspedbeta1d)

        # set parameter values
        ui.set_par(cuspedbeta1d.ne0,
                   ne0guess,
                   min=0.001 * max(ne_data['ne']),
                   max=10. * max(ne_data['ne']))
        ui.set_par(cuspedbeta1d.rc,
                   rcguess,
                   min=0.1,
                   max=max(ne_data['radius']))
        ui.set_par(cuspedbeta1d.beta, betaguess, min=0.1, max=1.)
        ui.set_par(cuspedbeta1d.alpha, alphaguess, min=0., max=100.)

    if nemodeltype == 'double_beta':

        # param estimate
        ne0guess1 = max(ne_data['ne'])  # [cm^-3]
        rcguess1 = 10.  # [kpc]
        betaguess1 = 0.6

        ne0guess2 = 0.01 * max(ne_data['ne'])  # [cm^-3]
        rcguess2 = 100.  # [kpc]
        betaguess2 = 0.6

        # double beta model
        ui.load_user_model(doublebetamodel, "doublebeta1d")
        ui.add_user_pars("doublebeta1d",
                         ["ne01", "rc1", "beta1", "ne02", "rc2", "beta2"])
        ui.set_source(doublebeta1d)  # creates model
        ui.set_full_model(doublebeta1d)

        # set parameter values
        ui.set_par(doublebeta1d.ne01,
                   ne0guess1,
                   min=0.0001 * max(ne_data['ne']),
                   max=100. * max(ne_data['ne']))
        ui.set_par(doublebeta1d.rc1,
                   rcguess1,
                   min=0.1,
                   max=max(ne_data['radius']))
        ui.set_par(doublebeta1d.beta1, betaguess1, min=0.1, max=1.)

        ui.set_par(doublebeta1d.ne02,
                   ne0guess2,
                   min=0.0001 * max(ne_data['ne']),
                   max=100. * max(ne_data['ne']))
        ui.set_par(doublebeta1d.rc2,
                   rcguess2,
                   min=10.,
                   max=max(ne_data['radius']))
        ui.set_par(doublebeta1d.beta2, betaguess2, min=0.1, max=1.)

    if nemodeltype == 'double_beta_tied':

        # param estimate
        ne0guess1 = max(ne_data['ne'])
        rcguess1 = 10.
        betaguess1 = 0.6

        ne0guess2 = 0.01 * max(ne_data['ne'])
        rcguess2 = 100.

        # double beta model
        ui.load_user_model(doublebetamodel_tied, "doublebeta1d_tied")
        ui.add_user_pars("doublebeta1d_tied",
                         ["ne01", "rc1", "beta1", "ne02", "rc2"])
        ui.set_source(doublebeta1d_tied)  # creates model
        ui.set_full_model(doublebeta1d_tied)

        # set parameter values
        ui.set_par(doublebeta1d_tied.ne01,
                   ne0guess1,
                   min=0.00001 * max(ne_data['ne']),
                   max=100. * max(ne_data['ne']))
        ui.set_par(doublebeta1d_tied.rc1,
                   rcguess1,
                   min=0.1,
                   max=max(ne_data['radius']))
        ui.set_par(doublebeta1d_tied.beta1, betaguess1, min=0.1, max=1.)

        ui.set_par(doublebeta1d_tied.ne02,
                   ne0guess2,
                   min=0.00001 * max(ne_data['ne']),
                   max=100. * max(ne_data['ne']))
        ui.set_par(doublebeta1d_tied.rc2,
                   rcguess2,
                   min=10.,
                   max=max(ne_data['radius']))

    # fit model
    ui.fit()

    # fit statistics
    chisq = ui.get_fit_results().statval
    dof = ui.get_fit_results().dof
    rchisq = ui.get_fit_results().rstat

    # error analysis
    ui.set_conf_opt("max_rstat", 1e9)
    ui.conf()

    parvals = np.array(ui.get_conf_results().parvals)
    parmins = np.array(ui.get_conf_results().parmins)
    parmaxes = np.array(ui.get_conf_results().parmaxes)

    parnames = [
        str(x).split('.')[1] for x in list(ui.get_conf_results().parnames)
    ]

    # where errors are stuck on a hard limit, change error to Inf
    if None in list(parmins):
        ind = np.where(parmins == np.array(None))[0]
        parmins[ind] = float('Inf')

    if None in list(parmaxes):
        ind = np.where(parmaxes == np.array(None))[0]
        parmaxes[ind] = float('Inf')

    # set up a dictionary to contain useful results of fit
    nemodel = {}
    nemodel['type'] = nemodeltype
    nemodel['parnames'] = parnames
    nemodel['parvals'] = parvals
    nemodel['parmins'] = parmins
    nemodel['parmaxes'] = parmaxes
    nemodel['chisq'] = chisq
    nemodel['dof'] = dof
    nemodel['rchisq'] = rchisq

    # if tspec_data included, calculate value of ne model at the same radius
    # positions as temperature profile
    if tspec_data is not None:
        if nemodeltype == 'double_beta':
            nefit_arr = doublebetamodel(nemodel['parvals'],
                                        np.array(tspec_data['radius']))
            # [cm-3]

        if nemodeltype == 'single_beta':
            nefit_arr = betamodel(nemodel['parvals'],
                                  np.array(tspec_data['radius']))
            # [cm-3]

        if nemodeltype == 'cusped_beta':
            nefit_arr = cuspedbetamodel(nemodel['parvals'],
                                        np.array(tspec_data['radius']))
            # [cm-3]

        if nemodeltype == 'double_beta_tied':
            nefit_arr = doublebetamodel_tied(nemodel['parvals'],
                                             np.array(tspec_data['radius']))
            # [cm-3]

        nemodel['nefit'] = nefit_arr

    return nemodel
Esempio n. 23
0
    ui.set_method('simplex')
    ui.load_arrays(data_id,
                   rates['time'],
                   rates['rate'])
    ui.set_staterror(data_id,
                     rates['err'])

    ftype_poly = ui.polynom1d(ftype)
    ui.set_model(data_id, ftype_poly)
    ui.thaw(ftype_poly.c0)
    ui.thaw(ftype_poly.c1)
    ui.notice(DateTime(trend_date_start).frac_year)
    ui.fit(data_id)
    ui.notice()
    myfit = ui.get_fit_results()
    axplot = ui.get_model_plot(data_id)
    if myfit.succeeded:
        b = ftype_poly.c1.val * DateTime(trend_date_start).frac_year + ftype_poly.c0.val
        m = ftype_poly.c1.val
        rep_file = open('%s_fitfile.json' % ftype, 'w')
        rep_file.write(json.dumps(dict(time0=DateTime(trend_date_start).frac_year,
                                       datestart=trend_date_start,
                                       datestop=data_stop,
                                       bin=trend_type,
                                       m=m,
                                       b=b,
                                       comment="mx+b with b at time0 and m = (delta rate)/year"),
                                  sort_keys=True,
                                  indent=4))
        rep_file.close()