Ejemplo n.º 1
0
def fit_pix_values(t_ccd, esec, id=1):
    logger = logging.getLogger("sherpa")
    logger.setLevel(logging.WARN)
    data_id = id
    ui.clean()
    ui.set_method('simplex')
    ui.load_user_model(dark_scale_model, 'model')
    ui.add_user_pars('model', ['scale', 'dark_t_ref'])
    ui.set_model(data_id, 'model')
    ui.load_arrays(
        data_id,
        np.array(t_ccd),
        np.array(esec),
    )
    ui.set_staterror(data_id, 30 * np.ones(len(t_ccd)))
    model.scale.val = 0.588
    model.scale.min = 0.3
    model.scale.max = 1.0
    model.dark_t_ref.val = 500
    ui.freeze(model.scale)
    # If more than 5 degrees in the temperature range,
    # thaw and fit for model.scale.  Else just use/return
    # the fit of dark_t_ref
    if np.max(t_ccd) - np.min(t_ccd) > 2:
        # Fit first for dark_t_ref
        ui.fit(data_id)
        ui.thaw(model.scale)
    ui.fit(data_id)
    return ui.get_fit_results(), ui.get_model(data_id)
Ejemplo n.º 2
0
    def tearDown(self):
        ui.clean()

        try:
            logger.setLevel(self._old_logger_level)
        except AttributeError:
            pass
Ejemplo n.º 3
0
 def setUp(self):
     # defensive programming (one of the tests has been seen to fail
     # when the whole test suite is run without this)
     ui.clean()
     self._old_logger_level = logger.getEffectiveLevel()
     logger.setLevel(logging.ERROR)
     self.data = Data1D('tst', self._x, self._y, self._e)
     self.mdl = Polynom1D('mdl')
Ejemplo n.º 4
0
 def run_thread(self, name, scriptname='fit.py'):
     ui.clean()
     ui.set_model_autoassign_func(self.assign_model)
     self.locals = {}
     cwd = os.getcwd()
     os.chdir(self.make_path('ciao4.3', name))
     try:
         execfile(scriptname, {}, self.locals)
     finally:
         os.chdir(cwd)
Ejemplo n.º 5
0
    def setUp(self):
        ui.clean()
        self._old_logger_level = logger.getEffectiveLevel()
        logger.setLevel(logging.ERROR)

        self.ascii = self.make_path('sim.poisson.1.dat')
        self.single = self.make_path('single.dat')
        self.double = self.make_path('double.dat')
        self.filter = self.make_path('filter_single_integer.dat')
        self.func = lambda x: x

        ui.dataspace1d(1,1000,dstype=ui.Data1D)
Ejemplo n.º 6
0
    def setUp(self):
        ui.clean()
        self._old_logger_level = logger.getEffectiveLevel()
        logger.setLevel(logging.ERROR)

        self.ascii = self.make_path('sim.poisson.1.dat')
        self.single = self.make_path('single.dat')
        self.double = self.make_path('double.dat')
        self.filter = self.make_path('filter_single_integer.dat')
        self.func = lambda x: x

        ui.dataspace1d(1, 1000, dstype=ui.Data1D)
Ejemplo n.º 7
0
def _fit_poly(fit_data, evt_times, degree, data_id=0):
    """
    Given event data transformed into Y or Z angle positions, and a degree of the desired
    fit polynomial, fit a polynomial to the data.

    :param fit_data: event y or z angle position data
    :param evt_times: times of event/fit_data
    :param degree: degree of polynomial to use for the fit model
    :param data_id: sherpa dataset id to use for the fit

    :returns: (sherpa model plot, sherpa model)
    """
    # Set initial value for fit data position error
    init_error = 1

    ui.clean()
    ui.load_arrays(data_id, evt_times - evt_times[0], fit_data,
                   np.zeros_like(fit_data) + init_error)
    v2("Fitting a line to the data to get reduced stat errors")
    # First just fit a line to get reduced errors on this set
    ui.polynom1d.line
    ui.set_model(data_id, 'line')
    ui.thaw('line.c1')
    ui.fit(data_id)
    fit = ui.get_fit_results()
    calc_error = init_error * np.sqrt(fit.rstat)
    ui.set_staterror(data_id, calc_error)
    # Then fit the specified model
    v2("Fitting a polynomial of degree {} to the data".format(degree))
    ui.polynom1d.fitpoly
    ui.freeze('fitpoly')
    # Thaw the coefficients requested by the degree of the desired polynomial
    ui.thaw('fitpoly.c0')
    fitpoly.c0.val = 0
    for deg in range(1, 1 + degree):
        ui.thaw("fitpoly.c{}".format(deg))
    ui.set_model(data_id, 'fitpoly')
    ui.fit(data_id)
    # Let's screw up Y on purpose
    if data_id == 0:
        fitpoly.c0.val = 0
        fitpoly.c1.val = 7.5e-05
        fitpoly.c2.val = -1.0e-09
        fitpoly.c3.val = 0
        fitpoly.c4.val = 0
    mp = ui.get_model_plot(data_id)
    model = ui.get_model(data_id)
    return mp, model
Ejemplo n.º 8
0
    def setUp(self):
        self._old_logger_level = logger.getEffectiveLevel()
        logger.setLevel(logging.ERROR)
        ui.clean()

        self.ascii = self.make_path('sim.poisson.1.dat')

        self.wrong_stat_msg = "Fit statistic must be cash, cstat or wstat, not {}"
        self.wstat_err_msg = "No background data has been supplied. Use cstat"
        self.no_covar_msg = "covariance has not been performed"
        self.fail_msg = "Call should not have succeeded"
        self.right_stats = {'cash', 'cstat', 'wstat'}
        self.model = PowLaw1D("p1")

        ui.load_data(self.ascii)
        ui.set_model(self.model)
Ejemplo n.º 9
0
    def setUp(self):
        self._old_logger_level = logger.getEffectiveLevel()
        logger.setLevel(logging.ERROR)
        ui.clean()

        self.ascii = self.make_path('sim.poisson.1.dat')

        self.wrong_stat_msg = "Fit statistic must be cash, cstat or wstat, not {}"
        self.wstat_err_msg = "No background data has been supplied. Use cstat"
        self.no_covar_msg = "covariance has not been performed"
        self.fail_msg = "Call should not have succeeded"
        self.right_stats = {'cash', 'cstat', 'wstat'}
        self.model = PowLaw1D("p1")

        ui.load_data(self.ascii)
        ui.set_model(self.model)
Ejemplo n.º 10
0
def test_does_user_model_get_cleaned(clean_ui):
    """Do user models get removed from the session by clean?"""

    mname = "test_model"
    with pytest.raises(IdentifierErr):
        ui.get_model_component(mname)

    ui.load_user_model(um_line, mname)
    mdl = ui.get_model_component(mname)
    assert mdl.name == "usermodel.{}".format(mname)
    assert isinstance(mdl, sherpa.models.basic.UserModel)

    ui.clean()

    with pytest.raises(IdentifierErr):
        ui.get_model_component(mname)
Ejemplo n.º 11
0
    def setUp(self):
        # defensive programming (one of the tests has been seen to fail
        # when the whole test suite is run without this)
        ui.clean()
        self._old_logger_level = logger.getEffectiveLevel()
        logger.setLevel(logging.ERROR)

        x = [-13, -5, -3, 2, 7, 12]
        y = [102.3, 16.7, -0.6, -6.7, -9.9, 33.2]
        dy = np.ones(6) * 5
        ui.load_arrays(1, x, y, dy)
        ui.set_source(ui.polynom1d.poly)
        poly.c1.thaw()
        poly.c2.thaw()
        ui.int_proj(poly.c0)
        ui.fit()
Ejemplo n.º 12
0
def test_does_user_model_get_cleaned():
    """Do user models get removed from the session by clean?"""

    mname = "test_model"
    with pytest.raises(IdentifierErr):
        ui.get_model_component(mname)

    ui.load_user_model(um_line, mname)
    mdl = ui.get_model_component(mname)
    assert mdl.name == "usermodel.{}".format(mname)
    assert isinstance(mdl, sherpa.models.basic.UserModel)

    ui.clean()

    with pytest.raises(IdentifierErr):
        ui.get_model_component(mname)
Ejemplo n.º 13
0
def test_user_model_change_par():

    mname = "test_model"
    ui.load_user_model(um_line, mname)
    ui.add_user_pars(mname, ['xXx', 'Y2'])

    mdl = ui.get_model_component(mname)
    assert len(mdl.pars) == 2
    p0 = mdl.pars[0]
    p1 = mdl.pars[1]

    assert p0.name == 'xXx'
    assert p1.name == 'Y2'
    assert p0.val == pytest.approx(0.0)
    assert p1.val == pytest.approx(0.0)

    # Use the user-supplied names:
    #
    mdl.xXx = 2.0
    assert p0.val == pytest.approx(2.0)

    mdl.Y2 = 3.0
    assert p1.val == pytest.approx(3.0)

    # Now all lower case
    #
    mdl.xxx = 4.0
    assert p0.val == pytest.approx(4.0)

    mdl.y2 = 12.0
    assert p1.val == pytest.approx(12.0)

    # Try with the set_par function
    #
    ui.set_par('test_model.xxx', 12.2)
    assert p0.val == pytest.approx(12.2)

    ui.set_par('test_model.y2', 14.0, frozen=True)
    assert p1.val == pytest.approx(14.0)
    assert p1.frozen

    ui.clean()
Ejemplo n.º 14
0
def test_user_model_change_par(clean_ui):

    mname = "test_model"
    ui.load_user_model(um_line, mname)
    ui.add_user_pars(mname, ['xXx', 'Y2'])

    mdl = ui.get_model_component(mname)
    assert len(mdl.pars) == 2
    p0 = mdl.pars[0]
    p1 = mdl.pars[1]

    assert p0.name == 'xXx'
    assert p1.name == 'Y2'
    assert p0.val == pytest.approx(0.0)
    assert p1.val == pytest.approx(0.0)

    # Use the user-supplied names:
    #
    mdl.xXx = 2.0
    assert p0.val == pytest.approx(2.0)

    mdl.Y2 = 3.0
    assert p1.val == pytest.approx(3.0)

    # Now all lower case
    #
    mdl.xxx = 4.0
    assert p0.val == pytest.approx(4.0)

    mdl.y2 = 12.0
    assert p1.val == pytest.approx(12.0)

    # Try with the set_par function
    #
    ui.set_par('test_model.xxx', 12.2)
    assert p0.val == pytest.approx(12.2)

    ui.set_par('test_model.y2', 14.0, frozen=True)
    assert p1.val == pytest.approx(14.0)
    assert p1.frozen

    ui.clean()
def fit_pix_values(t_ccd, esec, id=1):
    logger = logging.getLogger("sherpa")
    logger.setLevel(logging.WARN)
    data_id = id
    ui.clean()
    ui.set_method("simplex")
    ui.load_user_model(dark_scale_model, "model")
    ui.add_user_pars("model", ["scale", "dark_t_ref"])
    ui.set_model(data_id, "model")
    ui.load_arrays(data_id, np.array(t_ccd), np.array(esec), 0.1 * np.ones(len(t_ccd)))
    model.scale.val = 0.70
    model.dark_t_ref.val = 500
    ui.freeze(model.scale)
    # If more than 5 degrees in the temperature range,
    # thaw and fit for model.scale.  Else just use/return
    # the fit of dark_t_ref
    ui.fit(data_id)
    ui.thaw(model.scale)
    ui.fit(data_id)
    return ui.get_fit_results(), ui.get_model(data_id)
def _fit_poly(fit_data, evt_times, degree, data_id=0):
    """
    Given event data transformed into Y or Z angle positions, and a degree of the desired
    fit polynomial, fit a polynomial to the data.

    :param fit_data: event y or z angle position data
    :param evt_times: times of event/fit_data
    :param degree: degree of polynomial to use for the fit model
    :param data_id: sherpa dataset id to use for the fit

    :returns: (sherpa model plot, sherpa model)
    """
    # Set initial value for fit data position error
    init_error = 1

    ui.clean()
    ui.load_arrays(data_id, evt_times - evt_times[0], fit_data,
                   np.zeros_like(fit_data) + init_error)
    v2("Fitting a line to the data to get reduced stat errors")
    # First just fit a line to get reduced errors on this set
    ui.polynom1d.line
    ui.set_model(data_id, 'line')
    ui.thaw('line.c1')
    ui.fit(data_id)
    fit = ui.get_fit_results()
    calc_error = init_error * np.sqrt(fit.rstat)
    ui.set_staterror(data_id, calc_error)
    # Then fit the specified model
    v2("Fitting a polynomial of degree {} to the data".format(degree))
    ui.polynom1d.fitpoly
    ui.freeze('fitpoly')
    # Thaw the coefficients requested by the degree of the desired polynomial
    ui.thaw('fitpoly.c0')
    fitpoly.c0.val = 0
    for deg in range(1, 1 + degree):
        ui.thaw("fitpoly.c{}".format(deg))
    ui.set_model(data_id, 'fitpoly')
    ui.fit(data_id)
    mp = ui.get_model_plot(data_id)
    model = ui.get_model(data_id)
    return mp, model
Ejemplo n.º 17
0
def test_plot_prefs_xxx(get_prefs):
    """Can we change and reset a preference.

    Pick the 'xlog' field, since the assumption is that: a) this
    defaults to 'False'; b) each plot type has this setting; c) we
    do not need to check all settings.

    Some tests fail due to missing plot preferences when there's
    no plotting backend (e.g. missing 'xlog' settings), so skip
    these tests in this case.
    """

    prefs1 = get_prefs()
    assert not prefs1['xlog']
    prefs1['xlog'] = True

    prefs2 = get_prefs()
    assert prefs2['xlog']

    ui.clean()
    prefs3 = get_prefs()
    assert prefs1['xlog']
    assert prefs2['xlog']
    assert not prefs3['xlog']
Ejemplo n.º 18
0
def run_fits(obsids,
             ax,
             user_pars=None,
             fixed_pars=None,
             guess_pars=None,
             label='model',
             per_obs_dir='per_obs_nfits',
             outdir=None,
             redo=False):

    if len(obsids) == 0:
        print "No obsids, nothing to fit"
        return None
    if user_pars is None:
        user_pars = USER_PARS

    if not os.path.exists(per_obs_dir):
        os.makedirs(per_obs_dir)

    obsfits = []
    for obsid in obsids:

        outdir = os.path.join(per_obs_dir, 'obs{:05d}'.format(obsid))
        if not os.path.exists(outdir):
            os.makedirs(outdir)

        model_file = os.path.join(outdir, '{}.pkl'.format(label))
        if os.path.exists(model_file) and not redo:
            #logger.warn('Using previous fit found in %s' % model_file)
            print model_file
            mod_pick = open(model_file, 'r')
            modelfit = cPickle.load(mod_pick)
            mod_pick.close()
            obsfits.append(modelfit)
            continue

        modelfit = {'label': obsid}

        ui.clean()
        data_id = 0
        obsdir = "%s/obs%05d" % (DATADIR, obsid)
        tf = open(os.path.join(obsdir, 'tilt.pkl'), 'r')
        tilt = cPickle.load(tf)
        tf.close()
        pf = open(os.path.join(obsdir, 'pos.pkl'), 'r')
        pos = cPickle.load(pf)
        pf.close()

        pos_data = pos[ax]
        point_error = 5
        pos_data_mean = np.mean(pos_data)
        ui.set_method('simplex')

        # Fit a line to get more reasonable errors
        init_staterror = np.zeros(len(pos_data)) + point_error
        ui.load_arrays(data_id, pos['time'] - pos['time'][0],
                       pos_data - np.mean(pos_data), init_staterror)
        ui.polynom1d.ypoly
        ui.set_model(data_id, 'ypoly')
        ui.thaw(ypoly.c0, ypoly.c1)
        ui.fit(data_id)
        fit = ui.get_fit_results()
        calc_staterror = init_staterror * np.sqrt(fit.rstat)
        ui.set_staterror(data_id, calc_staterror)
        # Confirm those errors
        ui.fit(data_id)
        fit = ui.get_fit_results()
        if (abs(fit.rstat - 1) > .2):
            raise ValueError('Reduced statistic not close to 1 for error calc')

        # Load up data to do the real model fit
        fit_times = pos['time']
        tm_func = tilt_model(tilt, fit_times, user_pars=user_pars)

        ui.get_data(data_id).name = str(obsid)
        ui.load_user_model(tm_func, 'tiltm%d' % data_id)
        ui.add_user_pars('tiltm%d' % data_id, user_pars)
        ui.set_method('simplex')
        ui.set_model(data_id, 'tiltm%d' % (data_id))
        ui.set_par('tiltm%d.diam' % data_id, 0)

        if fixed_pars is not None and ax in fixed_pars:
            for par in fixed_pars[ax]:
                ui.set_par('tiltm{}.{}'.format(0, par), fixed_pars[ax][par])
                ui.freeze('tiltm{}.{}'.format(0, par))

        if guess_pars is not None and ax in guess_pars:
            for par in guess_pars[ax]:
                ui.set_par('tiltm{}.{}'.format(0, par), guess_pars[ax][par])

        ui.show_all()
        # Fit the tilt model
        ui.fit(data_id)
        fitres = ui.get_fit_results()
        ui.confidence(data_id)
        myconf = ui.get_confidence_results()

        #        save_fits(ax=ax, fit=fitres, conf=myconf, outdir=outdir)
        #        plot_fits(ids,outdir=os.path.join(outdir,'fit_plots'))

        axmod = dict(fit=fitres, conf=myconf)
        for idx, modpar in enumerate(myconf.parnames):
            par = modpar.lstrip('tiltm0.')
            axmod[par] = ui.get_par('tiltm0.%s' % par).val
            axmod["{}_parmax".format(par)] = myconf.parmaxes[idx]
            axmod["{}_parmin".format(par)] = myconf.parmins[idx]
        modelfit[ax] = axmod

        mod_pick = open(model_file, 'w')
        cPickle.dump(modelfit, mod_pick)
        mod_pick.close()

        obsfits.append(modelfit)

        plot_fits([dict(obsid=obsid, data_id=data_id, ax=ax)],
                  posdir=obsdir,
                  outdir=outdir)

    return obsfits
Ejemplo n.º 19
0
 def run_thread(self, name, scriptname='fit.py'):
     ui.clean()
     ui.set_model_autoassign_func(self.assign_model)
     self.locals = {}
     os.chdir(os.path.join(self.datadir, 'ciao4.3', name))
     execfile(scriptname, {}, self.locals)
Ejemplo n.º 20
0
#        dtable.write("%.2f,%.6f,%.6f,%d,%d,%.4f,%.4f\n" % (
#            mid_frac, fail_rate, np.max([err_high, err_low]),
#            n_stars, n_failed, err_high, err_low)) 
#
#        next_range = get_next(timerange(curr_unit))
#        curr_unit = in_range(trend_type, next_range['start'])
#
#    dtable.close()

trend_date_start = '2008:001:00:00:00.000'

fail_types = {'no_trak' : 1,
              'bad_trak' : 2,
              'obc_bad' : 3}

ui.clean()
for ftype in fail_types:

    filename = "by%s_data_%s.txt" % (trend_type, ftype)
    rates = asciitable.read(filename)

    data_id = fail_types[ftype]

    ui.set_method('simplex')
    ui.load_arrays(data_id,
                   rates['time'],
                   rates['rate'])
    ui.set_staterror(data_id,
                     rates['err'])

    ftype_poly = ui.polynom1d(ftype)
Ejemplo n.º 21
0
def fitne(ne_data, nemodeltype, tspec_data=None):
    '''
    Fits gas number density profile according to selected profile model.
     The fit is performed using python sherpa with the Levenberg-Marquardt
     method of minimizing chi-squared .


    Args:
    -----
    ne_data (astropy table): observed gas density profile
      in the form established by set_prof_data()
    tspec_data (astropy table): observed temperature profile
      in the form established by set_prof_data()

    Returns:
    --------
    nemodel (dictionary): stores relevant information about the model gas
      density profile
        nemodel['type']: ne model type; one of the following:
          ['single_beta','cusped_beta','double_beta_tied','double_beta']
        nemodel['parnames']: names of the stored ne model parameters
        nemodel['parvals']: parameter values of fitted gas density model
        nemodel['parmins']: lower error bound on parvals
        nemodel['parmaxes']: upper error bound on parvals
        nemodel['chisq']: chi-squared of fit
        nemodel['dof']: degrees of freedom
        nemodel['rchisq']: reduced chi-squared of fit
        nemodel['nefit']: ne model values at radial values matching
          tspec_data (the observed temperature profile)

    References:
    -----------
    python sherpa:    https://github.com/sherpa/
    '''

    # remove any existing models and data
    ui.clean()

    # load data
    ui.load_arrays(1, np.array(ne_data['radius']), np.array(ne_data['ne']),
                   np.array(ne_data['ne_err']))

    # set guess and boundaries on params given selected model

    if nemodeltype == 'single_beta':

        # param estimate
        betaguess = 0.6
        rcguess = 20.  # units?????
        ne0guess = max(ne_data['ne'])

        # beta model
        ui.load_user_model(betamodel, "beta1d")
        ui.add_user_pars("beta1d", ["ne0", "rc", "beta"])
        ui.set_source(beta1d)  # creates model
        ui.set_full_model(beta1d)

        # set parameter values
        ui.set_par(beta1d.ne0, ne0guess, min=0, max=10. * max(ne_data['ne']))
        ui.set_par(beta1d.rc, rcguess, min=0.1, max=max(ne_data['radius']))
        ui.set_par(beta1d.beta, betaguess, min=0.1, max=1.)

    if nemodeltype == 'cusped_beta':

        # param estimate
        betaguess = 0.7
        rcguess = 5.  # [kpc]
        ne0guess = max(ne_data['ne'])
        alphaguess = 10.  # ????

        # beta model
        ui.load_user_model(cuspedbetamodel, "cuspedbeta1d")
        ui.add_user_pars("cuspedbeta1d", ["ne0", "rc", "beta", "alpha"])
        ui.set_source(cuspedbeta1d)  # creates model
        ui.set_full_model(cuspedbeta1d)

        # set parameter values
        ui.set_par(cuspedbeta1d.ne0,
                   ne0guess,
                   min=0.001 * max(ne_data['ne']),
                   max=10. * max(ne_data['ne']))
        ui.set_par(cuspedbeta1d.rc,
                   rcguess,
                   min=0.1,
                   max=max(ne_data['radius']))
        ui.set_par(cuspedbeta1d.beta, betaguess, min=0.1, max=1.)
        ui.set_par(cuspedbeta1d.alpha, alphaguess, min=0., max=100.)

    if nemodeltype == 'double_beta':

        # param estimate
        ne0guess1 = max(ne_data['ne'])  # [cm^-3]
        rcguess1 = 10.  # [kpc]
        betaguess1 = 0.6

        ne0guess2 = 0.01 * max(ne_data['ne'])  # [cm^-3]
        rcguess2 = 100.  # [kpc]
        betaguess2 = 0.6

        # double beta model
        ui.load_user_model(doublebetamodel, "doublebeta1d")
        ui.add_user_pars("doublebeta1d",
                         ["ne01", "rc1", "beta1", "ne02", "rc2", "beta2"])
        ui.set_source(doublebeta1d)  # creates model
        ui.set_full_model(doublebeta1d)

        # set parameter values
        ui.set_par(doublebeta1d.ne01,
                   ne0guess1,
                   min=0.0001 * max(ne_data['ne']),
                   max=100. * max(ne_data['ne']))
        ui.set_par(doublebeta1d.rc1,
                   rcguess1,
                   min=0.1,
                   max=max(ne_data['radius']))
        ui.set_par(doublebeta1d.beta1, betaguess1, min=0.1, max=1.)

        ui.set_par(doublebeta1d.ne02,
                   ne0guess2,
                   min=0.0001 * max(ne_data['ne']),
                   max=100. * max(ne_data['ne']))
        ui.set_par(doublebeta1d.rc2,
                   rcguess2,
                   min=10.,
                   max=max(ne_data['radius']))
        ui.set_par(doublebeta1d.beta2, betaguess2, min=0.1, max=1.)

    if nemodeltype == 'double_beta_tied':

        # param estimate
        ne0guess1 = max(ne_data['ne'])
        rcguess1 = 10.
        betaguess1 = 0.6

        ne0guess2 = 0.01 * max(ne_data['ne'])
        rcguess2 = 100.

        # double beta model
        ui.load_user_model(doublebetamodel_tied, "doublebeta1d_tied")
        ui.add_user_pars("doublebeta1d_tied",
                         ["ne01", "rc1", "beta1", "ne02", "rc2"])
        ui.set_source(doublebeta1d_tied)  # creates model
        ui.set_full_model(doublebeta1d_tied)

        # set parameter values
        ui.set_par(doublebeta1d_tied.ne01,
                   ne0guess1,
                   min=0.00001 * max(ne_data['ne']),
                   max=100. * max(ne_data['ne']))
        ui.set_par(doublebeta1d_tied.rc1,
                   rcguess1,
                   min=0.1,
                   max=max(ne_data['radius']))
        ui.set_par(doublebeta1d_tied.beta1, betaguess1, min=0.1, max=1.)

        ui.set_par(doublebeta1d_tied.ne02,
                   ne0guess2,
                   min=0.00001 * max(ne_data['ne']),
                   max=100. * max(ne_data['ne']))
        ui.set_par(doublebeta1d_tied.rc2,
                   rcguess2,
                   min=10.,
                   max=max(ne_data['radius']))

    # fit model
    ui.fit()

    # fit statistics
    chisq = ui.get_fit_results().statval
    dof = ui.get_fit_results().dof
    rchisq = ui.get_fit_results().rstat

    # error analysis
    ui.set_conf_opt("max_rstat", 1e9)
    ui.conf()

    parvals = np.array(ui.get_conf_results().parvals)
    parmins = np.array(ui.get_conf_results().parmins)
    parmaxes = np.array(ui.get_conf_results().parmaxes)

    parnames = [
        str(x).split('.')[1] for x in list(ui.get_conf_results().parnames)
    ]

    # where errors are stuck on a hard limit, change error to Inf
    if None in list(parmins):
        ind = np.where(parmins == np.array(None))[0]
        parmins[ind] = float('Inf')

    if None in list(parmaxes):
        ind = np.where(parmaxes == np.array(None))[0]
        parmaxes[ind] = float('Inf')

    # set up a dictionary to contain useful results of fit
    nemodel = {}
    nemodel['type'] = nemodeltype
    nemodel['parnames'] = parnames
    nemodel['parvals'] = parvals
    nemodel['parmins'] = parmins
    nemodel['parmaxes'] = parmaxes
    nemodel['chisq'] = chisq
    nemodel['dof'] = dof
    nemodel['rchisq'] = rchisq

    # if tspec_data included, calculate value of ne model at the same radius
    # positions as temperature profile
    if tspec_data is not None:
        if nemodeltype == 'double_beta':
            nefit_arr = doublebetamodel(nemodel['parvals'],
                                        np.array(tspec_data['radius']))
            # [cm-3]

        if nemodeltype == 'single_beta':
            nefit_arr = betamodel(nemodel['parvals'],
                                  np.array(tspec_data['radius']))
            # [cm-3]

        if nemodeltype == 'cusped_beta':
            nefit_arr = cuspedbetamodel(nemodel['parvals'],
                                        np.array(tspec_data['radius']))
            # [cm-3]

        if nemodeltype == 'double_beta_tied':
            nefit_arr = doublebetamodel_tied(nemodel['parvals'],
                                             np.array(tspec_data['radius']))
            # [cm-3]

        nemodel['nefit'] = nefit_arr

    return nemodel
Ejemplo n.º 22
0
 def tearDown(self):
     if hasattr(self, '_old_logger_level'):
         logger.setLevel(self._old_logger_level)
     ui.clean()
Ejemplo n.º 23
0
def run_fits(obsids, ax, user_pars=None,
             fixed_pars=None, guess_pars=None, label='model',
             per_obs_dir='per_obs_nfits',
             outdir=None, redo=False):

    if len(obsids) == 0:
        print "No obsids, nothing to fit"
        return None
    if user_pars is None:
        user_pars = USER_PARS

    if not os.path.exists(per_obs_dir):
        os.makedirs(per_obs_dir)

    obsfits = []
    for obsid in obsids:

        outdir = os.path.join(per_obs_dir, 'obs{:05d}'.format(obsid))
        if not os.path.exists(outdir):
            os.makedirs(outdir)

        model_file = os.path.join(outdir, '{}.pkl'.format(label))
        if os.path.exists(model_file) and not redo:
            #logger.warn('Using previous fit found in %s' % model_file)
            print model_file
            mod_pick = open(model_file, 'r')
            modelfit = cPickle.load( mod_pick )
            mod_pick.close()
            obsfits.append(modelfit)
            continue

        modelfit = {'label': obsid}

        ui.clean()
        data_id = 0
        obsdir = "%s/obs%05d" % (DATADIR, obsid)
        tf = open(os.path.join(obsdir,'tilt.pkl'), 'r')
        tilt = cPickle.load(tf)
        tf.close()
        pf = open(os.path.join(obsdir, 'pos.pkl'), 'r')
        pos = cPickle.load(pf)
        pf.close()

        pos_data = pos[ax]
        point_error = 5
        pos_data_mean = np.mean(pos_data)
        ui.set_method('simplex')

        # Fit a line to get more reasonable errors
        init_staterror = np.zeros(len(pos_data))+point_error
        ui.load_arrays(data_id,
                       pos['time']-pos['time'][0],
                       pos_data-np.mean(pos_data),
                       init_staterror)
        ui.polynom1d.ypoly
        ui.set_model(data_id, 'ypoly')
        ui.thaw(ypoly.c0, ypoly.c1)
        ui.fit(data_id)
        fit = ui.get_fit_results()
        calc_staterror = init_staterror * np.sqrt(fit.rstat)
        ui.set_staterror(data_id, calc_staterror)
        # Confirm those errors
        ui.fit(data_id)
        fit = ui.get_fit_results()
        if ( abs(fit.rstat-1) > .2):
            raise ValueError('Reduced statistic not close to 1 for error calc')

        # Load up data to do the real model fit
        fit_times = pos['time']
        tm_func = tilt_model(tilt,
                             fit_times,
                             user_pars=user_pars)

        ui.get_data(data_id).name = str(obsid)
        ui.load_user_model(tm_func, 'tiltm%d' % data_id)
        ui.add_user_pars('tiltm%d' % data_id, user_pars)
        ui.set_method('simplex')
        ui.set_model(data_id, 'tiltm%d' % (data_id))
        ui.set_par('tiltm%d.diam' % data_id, 0)

        if fixed_pars is not None and ax in fixed_pars:
            for par in fixed_pars[ax]:
                ui.set_par('tiltm{}.{}'.format(0, par), fixed_pars[ax][par])
                ui.freeze('tiltm{}.{}'.format(0, par))

        if guess_pars is not None and ax in guess_pars:
            for par in guess_pars[ax]:
                ui.set_par('tiltm{}.{}'.format(0, par), guess_pars[ax][par])

        ui.show_all()
        # Fit the tilt model
        ui.fit(data_id)
        fitres = ui.get_fit_results()
        ui.confidence(data_id)
        myconf = ui.get_confidence_results()

#        save_fits(ax=ax, fit=fitres, conf=myconf, outdir=outdir)
#        plot_fits(ids,outdir=os.path.join(outdir,'fit_plots'))

        axmod = dict(fit=fitres, conf=myconf)
        for idx, modpar in enumerate(myconf.parnames):
            par = modpar.lstrip('tiltm0.')
            axmod[par] = ui.get_par('tiltm0.%s' % par).val
            axmod["{}_parmax".format(par)] = myconf.parmaxes[idx]
            axmod["{}_parmin".format(par)] = myconf.parmins[idx]
        modelfit[ax] = axmod

        mod_pick = open(model_file, 'w')
        cPickle.dump( modelfit, mod_pick)
        mod_pick.close()

        obsfits.append(modelfit)

        plot_fits([dict(obsid=obsid, data_id=data_id, ax=ax)],
                  posdir=obsdir,
                  outdir=outdir)


    return obsfits
Ejemplo n.º 24
0
 def tearDown(self):
     if hasattr(self, '_old_logger_level'):
         logger.setLevel(self._old_logger_level)
     ui.clean()
Ejemplo n.º 25
0
 def run_thread(self, name, scriptname='fit.py'):
     ui.clean()
     ui.set_model_autoassign_func(self.assign_model)
     self.locals = {}
     os.chdir(os.path.join(self.datadir, 'ciao4.3', name))
     execfile(scriptname, {}, self.locals)
Ejemplo n.º 26
0
 def run_thread(self, name, scriptname='fit.py'):
     ui.clean()
     ui.set_model_autoassign_func(self.assign_model)
     super(test_new_templates_ui, self).run_thread(name, scriptname=scriptname)