示例#1
0
def data_simul_fit():
    data_one = Data1D("data_one", X_ARRAY, Y_ARRAY, STATISTICAL_ERROR_ARRAY,
                      SYSTEMATIC_ERROR_ARRAY)
    data_two = Data1D("data_two", MULTIPLIER * X_ARRAY, MULTIPLIER * Y_ARRAY,
                      MULTIPLIER * STATISTICAL_ERROR_ARRAY,
                      MULTIPLIER * SYSTEMATIC_ERROR_ARRAY)
    return DataSimulFit(NAME, (data_one, data_two))
示例#2
0
    def _bundle_inputs(data, model):
        """Convert input into SimulFit instances.

        Convert the inputs into `sherpa.data.DataSimulFit` and
        `sherpa.models.model.SimulFitModel`
        instances.

        Parameters
        ----------
        data : `sherpa.data.Data` or `sherpa.data.DataSimulFit`
            The data set, or sets, to use.
        model : `sherpa.models.model.Model` or `sherpa.models.model.SimulFitModel`
            The model expression, or expressions. If a
            `~sherpa.models.model.SimulFitModel`
            is given then it must match the number of data sets in the
            data parameter.

        Returns
        -------
        data : `sherpa.data.DataSimulFit`
            If the input was a `~sherpa.data.DataSimulFit` object
            then this is just the input value.
        model : `sherpa.models.model.SimulFitModel`
            If the input was a `~sherpa.models.model.SimulFitModel`
            object then this is just the input value.
        """

        if not isinstance(data, DataSimulFit):
            data = DataSimulFit('simulfit data', (data,))

        if not isinstance(model, SimulFitModel):
            model = SimulFitModel('simulfit model', (model,))

        return data, model
示例#3
0
def mwl_fit_low_level():
    """Use high-level Sherpa API.

    Low-level = no session, classes.

    Example: http://python4astronomers.github.io/fitting/low-level.html
    """
    fermi_data = FermiData().sherpa_data
    hess_data = IACTData().sherpa_data

    # spec_model = PowLaw1D('spec_model')
    spec_model = LogParabola('spec_model')
    spec_model.c1 = 0.5
    spec_model.c2 = 0.2
    spec_model.ampl = 5e-11

    data = DataSimulFit(name='global_data', datasets=[fermi_data, hess_data])
    # TODO: Figure out how to notice using the low-level API
    # data.notice(mins=1e-3, maxes=None, axislist=None)
    model = SimulFitModel(name='global_model', parts=[spec_model, spec_model])
    stat = FermiStat()
    method = LevMar()
    fit = Fit(data=data, model=model, stat=stat, method=method)
    result = fit.fit()

    # IPython.embed()
    return Bunch(results=result, model=spec_model)
示例#4
0
文件: main.py 项目: pllim/saba
    def __init__(self, n_dim, x, y, z=None, xbinsize=None, ybinsize=None, err=None, bkg=None, bkg_scale=1):

        x = np.array(x)
        y = np.array(y)
        if x.ndim == 2 or (x.dtype == np.object or y.dtype == np.object):
            data = []
            if z is None:
                z = len(x) * [None]

            if xbinsize is None:
                xbinsize = len(x) * [None]

            if ybinsize is None:
                ybinsize = len(y) * [None]

            if err is None:
                err = len(z) * [None]

            if bkg is None:
                bkg = len(x) * [None]
            try:
                iter(bkg_scale)
            except TypeError:
                bkg_scale = len(x) * [bkg_scale]


            for nn, (xx, yy, zz, xxe, yye, zze, bkg, bkg_scale) in enumerate(zip(x, y, z, xbinsize, ybinsize, err, bkg, bkg_scale)):
                data.append(self._make_dataset(n_dim, x=xx, y=yy, z=zz, xbinsize=xxe, ybinsize=yye, err=zze, bkg=bkg, bkg_scale=bkg_scale, n=nn))
            self.data = DataSimulFit("wrapped_data", data)
            self.ndata = nn + 1
        else:
            self.data = self._make_dataset(n_dim, x=x, y=y, z=z, xbinsize=xbinsize, ybinsize=ybinsize, err=err, bkg=bkg, bkg_scale=bkg_scale)
            self.ndata = 1
def test_fitresults_multi(method):
    """Fit multiple datasets"""

    d1 = Data1D('dx', [1, 2, 3], [4, 2, 2])
    d2 = Data1D('dx', [4, 5, 6, 10], [4, 4, 2, 4])
    d = DataSimulFit('combined', (d1, d2))

    m1 = Const1D()
    m1.c0 = 3
    m = SimulFitModel('silly', (m1, m1))

    fr = fit.Fit(d, m, method=method(), stat=LeastSq()).fit()
    fr.datasets = ['ddx', 'ddy']
    r = fr._repr_html_()

    assert r is not None

    assert '<summary>Summary (9)</summary>' in r
    assert '<td>const1d.c0</td>' in r

    assert '<div class="dataname">Datasets</div><div class="dataval">ddx,ddy</div>' in r
    assert '<div class="dataname">Method</div><div class="dataval">{}</div>'.format(fr.methodname) in r
    assert '<div class="dataname">Statistic</div><div class="dataval">leastsq</div>' in r

    assert '<div class="dataname">&#916; statistic</div><div class="dataval">0.142857</div>' in r
    assert '<div class="dataname">Number of data points</div><div class="dataval">7</div>' in r
    assert '<div class="dataname">Degrees of freedom</div><div class="dataval">6</div>' in r
示例#6
0
def setup_multiple(usestat, usesys):
    """Return multiple data sets and model (as SimulFit objects).

    To save time, the first dataset is re-used, excluding the third point.

    Parameters
    ----------
    stat, sys : bool
        Should statistical and systematic errors be explicitly set
        (True) or taken from the statistic (False)?

    Returns
    -------
    data, model
        DataSimulFit and SimulFitModel objects. The data sets
        are Data1D objects.
    """

    data1, model1 = setup_single(usestat, usesys)
    data2, _ = setup_single(usestat, usesys)
    data2.ignore(1, 3.5)

    # not an essential part of the stats code; more a check that
    # things are working correctly (i.e. that this invariant hasn't
    # been broken by some change to the test).
    assert_equal(data2.mask, np.asarray([True, True, False, True]))

    mdata = DataSimulFit('simul', (data1, data2))
    mmodel = SimulFitModel('simul', (model1, model1))
    return mdata, mmodel
示例#7
0
def test_simul_stat_fit(stat, hide_logging, reset_xspec, setup_two):
    data1 = setup_two['data_pi2278']
    data2 = setup_two['data_pi2286']
    model1 = setup_two['model_pi2278']
    model2 = setup_two['model_pi2286']
    data = DataSimulFit(name='data1data2', datasets=[data1, data2])
    model = SimulFitModel(name='model1model2', parts=[model1, model2])
    fit = Fit(data=data, model=model, stat=stat(), method=NelderMead())
    result = fit.fit()

    _fit_simul_datavarstat_results_bench = {
        'succeeded':
        1,
        'numpoints':
        18,
        'dof':
        15,
        'istatval':
        56609.70689926489,
        'statval':
        126.1509268988255,
        'parvals':
        numpy.array(
            [0.8417576197443695, 1.6496933246579941, 0.2383939869443424])
    }

    compare_results(_fit_simul_datavarstat_results_bench, result)
示例#8
0
def setup_multiple_1dint(stat, sys):
    """Return multiple data sets and models (as SimulFit objects).

    To save time, the first dataset is re-used, excluding the third point.

    Parameters
    ----------
    stat, sys : bool
        Should statistical and systematic errors be explicitly set
        (True) or taken from the statistic (False)?

    Returns
    -------
    data, model
        DataSimulFit and SimulFitModel objects. The data sets are
        Data1DInt objects.

    """

    data1, model1 = setup_single_1dint(stat, sys)
    data2, _ = setup_single_1dint(stat, sys)

    # The bins cover (-10,-5), (-5,2), (3,4), (4,7)
    data2.ignore(2.5, 3.5)

    # not an essential part of the stats code; more a check that
    # things are working correctly
    assert_equal(data2.mask, np.asarray([True, True, False, True]))

    mdata = DataSimulFit('simul', (data1, data2))
    mmodel = SimulFitModel('simul', (model1, model1))
    return mdata, mmodel
示例#9
0
 def __init__(self, data, model, stat, method, itermethod_opts={'name':'none'}):
     # Even if there is only a single data set, I will
     # want to treat the data and models I am given as
     # collections of data and models -- so, put data and
     # models into the objects needed for simultaneous fitting,
     # if they are not already in such objects.
     self.data = data
     if (type(data) is not DataSimulFit):
         self.data = DataSimulFit('simulfit data', (data,))
     self.model = model
     if (type(model) is not SimulFitModel):
         self.model = SimulFitModel('simulfit model', (model,))
     self.stat = stat
     self.method = method
     # Data set attributes needed to store fitting values between
     # calls to fit
     self._dep = None
     self._staterror = None
     self._syserror = None
     self._nfev = 0
     self._file = None
     # Options to send to iterative fitting method
     self.itermethod_opts = itermethod_opts
     self.iterate = False
     self.funcs = {'primini':self.primini, 'sigmarej':self.sigmarej}
     self.current_func = None
     if (itermethod_opts['name'] != 'none'):
         self.current_func = self.funcs[itermethod_opts['name']]
         self.iterate = True
示例#10
0
    def _bundle_inputs(data, model):
        """Convert input into SimulFit instances.

        Convert the inputs into DataSimulFit and SimulFitModel
        instances.

        Parameters
        ----------
        data : a Data or DataSimulFit instance
            The data set, or sets, to use.
        model : a Model or SimulFitModel instance
            The model expression, or expressions. If a SimulFitModel
            is given then it must match the number of data sets in the
            data parameter.

        Returns
        -------
        data, model : DataSimulFit instance, SimulFitModel instance
            If the input was a SimulFit object then this is just
            the input value.
        """

        if not isinstance(data, DataSimulFit):
            data = DataSimulFit('simulfit data', (data, ))

        if not isinstance(model, SimulFitModel):
            model = SimulFitModel('simulfit model', (model, ))

        return data, model
示例#11
0
def setup_multiple_pha(stat, sys, background=True):
    """Return multiple DataPHA sets and model (as SimulFit objects).

    This is aimed at wstat calculation, and so the DataPHA object has
    no attached response. The data set is grouped. As with
    setup_multiple, the second dataset is a filtered version of the
    first one.

    Parameters
    ----------
    stat, sys : bool
        Should statistical and systematic errors be explicitly set
        (True) or taken from the statistic (False)?
    background : bool
        Should a background data set be included (True) or not (False)?
        The background is *not* subtracted when True.

    Returns
    -------
    data, model
        DataSimulFit and SimulFitModel objects. The data sets
        are DataPHA objects.

    """

    data1, model1 = setup_single_pha(stat, sys, background=background)
    data2, _ = setup_single_pha(stat, sys, background=background)
    data2.ignore(3, 3.8)

    # sanity check
    assert_equal(data2.mask, np.asarray([True, False, True]))

    mdata = DataSimulFit('simul', (data1, data2))
    mmodel = SimulFitModel('simul', (model1, model1))
    return mdata, mmodel
示例#12
0
 def test_same_cache(self):
     poly = Polynom1D()
     poly.pars[1].thaw()
     sdata = DataSimulFit('d1d2d3', (self.d1, self.d2, self.d3))
     smodel = SimulFitModel('same', (poly, poly, poly))
     sfit = Fit(sdata, smodel, method=NelderMead(), stat=Cash())
     result = sfit.fit()
     self.compare_results(self._fit_same_poly_bench, result)
示例#13
0
    def fit(self):
        """Fit spectrum"""
        from sherpa.fit import Fit
        from sherpa.models import ArithmeticModel, SimulFitModel
        from sherpa.astro.instrument import Response1D
        from sherpa.data import DataSimulFit

        # Translate model to sherpa model if necessary
        if isinstance(self.model, models.SpectralModel):
            model = self.model.to_sherpa()
        else:
            model = self.model

        if not isinstance(model, ArithmeticModel):
            raise ValueError('Model not understood: {}'.format(model))

        # Make model amplitude O(1e0)
        val = model.ampl.val * self.FLUX_FACTOR ** (-1)
        model.ampl = val

        if self.fit_range is not None:
            log.info('Restricting fit range to {}'.format(self.fit_range))
            fitmin = self.fit_range[0].to('keV').value
            fitmax = self.fit_range[1].to('keV').value

        # Loop over observations
        pha = list()
        folded_model = list()
        nobs = len(self.obs_list)
        for ii in range(nobs):
            temp = self.obs_list[ii].to_sherpa()
            if self.fit_range is not None:
                temp.notice(fitmin, fitmax)
                if temp.get_background() is not None:
                    temp.get_background().notice(fitmin, fitmax)
            temp.ignore_bad()
            if temp.get_background() is not None:
                temp.get_background().ignore_bad()
            pha.append(temp)
            # Forward folding
            resp = Response1D(pha[ii])
            folded_model.append(resp(model) * self.FLUX_FACTOR)

        data = DataSimulFit('simul fit data', pha)
        fitmodel = SimulFitModel('simul fit model', folded_model)

        log.debug(fitmodel)
        fit = Fit(data, fitmodel, self.statistic)
        fitresult = fit.fit()
        log.debug(fitresult)
        # The model instance passed to the Fit now holds the best fit values
        covar = fit.est_errors()
        log.debug(covar)

        for ii in range(nobs):
            efilter = pha[ii].get_filter()
            shmodel = fitmodel.parts[ii]
            self.result[ii].fit = _sherpa_to_fitresult(shmodel, covar, efilter, fitresult)
示例#14
0
 def test_gauss_gauss(self):
     g1, g2 = Gauss1D(), Gauss1D()
     g1.fwhm = 1.3
     g1.pos = 1.5
     g2.fwhm = 4.
     g2.pos = -2.0
     sdata = DataSimulFit('d4d5', (self.d4, self.d5))
     smodel = SimulFitModel('g1g2', (g1, g2))
     sfit = Fit(sdata, smodel, method=LevMar(), stat=LeastSq())
     result = sfit.fit()
     self.compare_results(self._fit_g2g2_bench, result)
示例#15
0
 def test_diff_cache(self):
     poly1 = Polynom1D()
     poly2 = Polynom1D()
     poly3 = Polynom1D()
     poly1.pars[1].thaw()
     poly2.pars[1].thaw()
     poly3.pars[1].thaw()
     sdata = DataSimulFit('d123', (self.d1, self.d2, self.d3))
     smodel = SimulFitModel('diff', (poly1, poly2, poly3))
     sfit = Fit(sdata, smodel, method=NelderMead(), stat=Cash())
     result = sfit.fit()
     self.compare_results(self._fit_diff_poly_bench, result)
示例#16
0
文件: main.py 项目: pllim/saba
    def make_simfit(self, numdata):
        """
        This makes a single datasets into a simdatafit at allow fitting of multiple models by copying the single dataset!

        Parameters
        ----------
        numdata: int
            the number of times you want to copy the dataset i.e if you want 2 datasets total you put 1!
        """

        self.data = DataSimulFit("wrapped_data", [self.data for _ in range(numdata)])
        self.ndata = numdata + 1
示例#17
0
 def test_simul_stat_fit(self):
     data1 = self.data_pi2278
     data2 = self.data_pi2286
     model1 = self.model_mult
     model2 = self.model_mult
     data = DataSimulFit(name='data1data2', datasets=[data1, data2])
     model = SimulFitModel(name='model1model2', parts=[model1, model2])
     fit = Fit(data=data, model=model, stat=MyChiNoBkg(),
               method=NelderMead())
     result = fit.fit()
     self.compare_results(self._fit_simul_datavarstat_results_bench,
                          result)
示例#18
0
    def _setup_sherpa_fit(self, data, model):
        """Fit flux point using sherpa"""
        from sherpa.fit import Fit
        from sherpa.data import DataSimulFit
        from ..utils.sherpa import (SherpaDataWrapper, SherpaStatWrapper,
                                    SherpaModelWrapper, SHERPA_OPTMETHODS)

        optimizer = self.parameters['optimizer']

        if data.sed_type == 'dnde':
            data = SherpaDataWrapper(data)
        else:
            raise NotImplementedError('Only fitting of differential flux points data '
                                      'is supported.')

        stat = SherpaStatWrapper(self.stat)
        data = DataSimulFit(name='GPFluxPoints', datasets=[data])
        method = SHERPA_OPTMETHODS[optimizer]
        models = SherpaModelWrapper(model)
        return Fit(data=data, model=models, stat=stat, method=method)
def test_errresults_multi():
    d1 = Data1D('dx', [1, 2, 3], [4, 2, 2], [1.2, 0.9, 0.9])
    d2 = Data1D('dx', [10, 11, 12, 13], [4, 4, 2, 4], [0.8, 1.1, 1.1, 0.9])
    d = DataSimulFit('combined', (d1, d2))

    m1 = Const1D()
    m1.c0 = 3
    m = SimulFitModel('silly', (m1, m1))

    f = fit.Fit(d, m, stat=Chi2())
    er = f.est_errors()
    r = er._repr_html_()

    assert r is not None

    assert '<summary>covariance 1&#963; (68.2689%) bounds</summary>' in r
    assert '<summary>Summary (2)' in r
    assert '<td>const1d.c0</td>' in r
    assert '<div class="dataname">Fitting Method</div><div class="dataval">levmar</div>' in r
    assert '<div class="dataname">Statistic</div><div class="dataval">chi2</div>' in r

    assert '<tr><td>const1d.c0</td><td>           3</td><td>   -0.362415</td><td>    0.362415</td></tr>' in r
示例#20
0
文件: fit.py 项目: dltiziani/gammapy
    def fit(self):
        """Fit spectrum"""
        from sherpa.fit import Fit
        from sherpa.models import ArithmeticModel, SimulFitModel
        from sherpa.astro.instrument import Response1D
        from sherpa.data import DataSimulFit

        # Reset results
        self._result = list()

        # Translate model to sherpa model if necessary
        if isinstance(self.model, models.SpectralModel):
            model = self.model.to_sherpa()
        else:
            model = self.model

        if not isinstance(model, ArithmeticModel):
            raise ValueError('Model not understood: {}'.format(model))

        # Make model amplitude O(1e0)
        val = model.ampl.val * self.FLUX_FACTOR**(-1)
        model.ampl = val

        if self.fit_range is not None:
            log.info('Restricting fit range to {}'.format(self.fit_range))
            fitmin = self.fit_range[0].to('keV').value
            fitmax = self.fit_range[1].to('keV').value

        # Loop over observations
        pha = list()
        folded_model = list()
        nobs = len(self.obs_list)
        for ii in range(nobs):
            temp = self.obs_list[ii].to_sherpa()
            if self.fit_range is not None:
                temp.notice(fitmin, fitmax)
                if temp.get_background() is not None:
                    temp.get_background().notice(fitmin, fitmax)
            temp.ignore_bad()
            if temp.get_background() is not None:
                temp.get_background().ignore_bad()
            pha.append(temp)
            log.debug('Noticed channels obs {}: {}'.format(
                ii, temp.get_noticed_channels()))
            # Forward folding
            resp = Response1D(pha[ii])
            folded_model.append(resp(model) * self.FLUX_FACTOR)

        if (len(pha) == 1 and len(pha[0].get_noticed_channels()) == 1):
            raise ValueError('You are trying to fit one observation in only '
                             'one bin, error estimation will fail')

        data = DataSimulFit('simul fit data', pha)
        log.debug(data)
        fitmodel = SimulFitModel('simul fit model', folded_model)
        log.debug(fitmodel)

        fit = Fit(data, fitmodel, self.statistic)

        fitresult = fit.fit()
        log.debug(fitresult)
        # The model instance passed to the Fit now holds the best fit values
        covar = fit.est_errors()
        log.debug(covar)

        for ii in range(nobs):
            efilter = pha[ii].get_filter()
            # Skip observations not participating in the fit
            if efilter != '':
                shmodel = fitmodel.parts[ii]
                result = _sherpa_to_fitresult(shmodel, covar, efilter,
                                              fitresult)
                result.obs = self.obs_list[ii]
            else:
                result = None
            self._result.append(result)

        valid_result = np.nonzero(self.result)[0][0]
        global_result = copy.deepcopy(self.result[valid_result])
        global_result.npred = None
        global_result.obs = None
        all_fitranges = [_.fit_range for _ in self._result if _ is not None]
        fit_range_min = min([_[0] for _ in all_fitranges])
        fit_range_max = max([_[1] for _ in all_fitranges])
        global_result.fit_range = u.Quantity((fit_range_min, fit_range_max))
        self._global_result = global_result
示例#21
0
def multifit(star_name, data_list, model_list, silent=False):
    """A function that will fit 2 models to 2 spectra simultaneously.
        This was created to fit the NaI doublets at ~3300 and ~5890 Angstroms.

    :param star_name: Name of the target star
    :type star_name: str
    :param data_list: List of spectrum data in the form [(wave, flux), (wave, flux),...]
    :type data_list: tuple
    :param model_list:  A list of unfit spectrum models
    :type model_list: list
    :param silent:  If true, no plots will generate, defaults to False
    :type silent: bool

    :return: models that are fit to the data
    :rtype: list

    """

    wave1, flux1 = data_list[0]
    wave2, flux2 = data_list[1]

    model1 = model_list[0]
    model2 = model_list[1]

    name_1 = star_name + " 1"
    name_2 = star_name + " 2"

    d1 = Data1D(name_1, wave1, flux1)
    d2 = Data1D(name_2, wave2, flux2)

    dall = DataSimulFit("combined", (d1, d2))
    mall = SimulFitModel("combined", (model1, model2))

    # # ==========================================
    # # Initial guesses

    # Dataset 1
    dplot1 = DataPlot()
    dplot1.prepare(d1)
    if silent is False:
        dplot1.plot()

    mplot1 = ModelPlot()
    mplot1.prepare(d1, model1)
    if silent is False:
        dplot1.plot()
        mplot1.overplot()
        plt.show()

        # Dataset 2
    dplot2 = DataPlot()
    dplot2.prepare(d2)
    if silent is False:
        dplot2.plot()

    mplot2 = ModelPlot()
    mplot2.prepare(d2, model2)
    if silent is False:
        dplot2.plot()
        mplot2.overplot()
        plt.show()

    # # =========================================
    # # Fitting happens here - don't break please
    stat = LeastSq()

    opt = LevMar()
    opt.verbose = 0
    opt.ftol = 1e-15
    opt.xtol = 1e-15
    opt.gtol = 1e-15
    opt.epsfcn = 1e-15
    print(opt)

    vfit = Fit(dall, mall, stat=stat, method=opt)
    print(vfit)
    vres = vfit.fit()

    print()
    print()
    print("Did the fit succeed? [bool]")
    print(vres.succeeded)
    print()
    print()
    print(vres.format())

    # # =========================================
    # # Plotting after fit
    if silent is False:
        # Dataset 1
        fplot1 = FitPlot()
        mplot1.prepare(d1, model1)
        fplot1.prepare(dplot1, mplot1)
        fplot1.plot()

        # residual
        title = "Data 1"
        plt.title(title)
        plt.plot(wave1, flux1 - model1(wave1))
        plt.show()

        # Dataset 2
        fplot2 = FitPlot()
        mplot2.prepare(d2, model2)
        fplot2.prepare(dplot2, mplot2)
        fplot2.plot()

        # residual
        title = "Data 2"
        plt.title(title)
        plt.plot(wave2, flux2 - model2(wave2))
        plt.show()

        # both datasets - no residuals
        splot = SplitPlot()
        splot.addplot(fplot1)
        splot.addplot(fplot2)

        plt.tight_layout()
        plt.show()

    return model_list
示例#22
0
def data_simul_fit_no_errors():
    data_one = Data1D("data_one", X_ARRAY, Y_ARRAY)
    data_two = Data1D("data_two", MULTIPLIER * X_ARRAY, MULTIPLIER * Y_ARRAY)
    return DataSimulFit(NAME, (data_one, data_two))
示例#23
0
class IterFit(NoNewAttributesAfterInit):
    def __init__(self, data, model, stat, method, itermethod_opts={'name':'none'}):
        # Even if there is only a single data set, I will
        # want to treat the data and models I am given as
        # collections of data and models -- so, put data and
        # models into the objects needed for simultaneous fitting,
        # if they are not already in such objects.
        self.data = data
        if (type(data) is not DataSimulFit):
            self.data = DataSimulFit('simulfit data', (data,))
        self.model = model
        if (type(model) is not SimulFitModel):
            self.model = SimulFitModel('simulfit model', (model,))
        self.stat = stat
        self.method = method
        # Data set attributes needed to store fitting values between
        # calls to fit
        self._dep = None
        self._staterror = None
        self._syserror = None
        self._nfev = 0
        self._file = None
        # Options to send to iterative fitting method
        self.itermethod_opts = itermethod_opts
        self.iterate = False
        self.funcs = {'primini':self.primini, 'sigmarej':self.sigmarej}
        self.current_func = None
        if (itermethod_opts['name'] != 'none'):
            self.current_func = self.funcs[itermethod_opts['name']]
            self.iterate = True

    # SIGINT (i.e., typing ctrl-C) can dump the user to the Unix prompt,
    # when signal is sent from G95 compiled code.  What we want is to
    # get to the Sherpa prompt instead.  Typically the user only thinks
    # to interrupt during long fits or projection, so look for SIGINT
    # here, and if it happens, raise the KeyboardInterrupt exception
    # instead of aborting.
    def _sig_handler(self, signum, frame):
        raise KeyboardInterrupt()

    def _get_callback(self, outfile=None, clobber=False):
        if len(self.model.thawedpars) == 0:
            #raise FitError('model has no thawed parameters')
            raise FitErr( 'nothawedpar' )

        # support Sherpa use with SAMP
        try:
            signal.signal(signal.SIGINT, self._sig_handler)
        except ValueError, e:
            warning(e)

        self._dep, self._staterror, self._syserror = self.data.to_fit(self.stat.calc_staterror)

        self._nfev = 0
        if outfile is not None:
            if os.path.isfile(outfile) and not clobber:
                #raise FitError("'%s' exists, and clobber==False" % outfile)
                raise FitErr( 'noclobererr', outfile )
            self._file = file(outfile, 'w')
            names = ['# nfev statistic']
            names.extend(['%s' % par.fullname for par in self.model.pars
                          if not par.frozen])
            print >> self._file, ' '.join(names)

        def cb(pars):
            # We need to store the new parameter values in order to support
            # linked parameters

            self.model.thawedpars = pars
            model = self.data.eval_model_to_fit(self.model)
            stat = self.stat.calc_stat(self._dep, model, self._staterror, self._syserror)

            if self._file is not None:
                vals = ['%5e %5e' % (self._nfev, stat[0])]
                vals.extend(['%5e' % val for val in self.model.thawedpars])
                print >> self._file, ' '.join(vals)

            self._nfev+=1
            return stat

        return cb