示例#1
0
        def calc_stat(proposed_params):

            # automatic rejection outside hard limits
            mins  = sao_fcmp(proposed_params, thawedparmins, _tol)
            maxes = sao_fcmp(thawedparmaxes, proposed_params, _tol)
            if -1 in mins or -1 in maxes:
                raise LimitError('Sherpa parameter hard limit exception')

            level = _log.getEffectiveLevel()

            try:
                # ignore warning from Sherpa about hard limits
                _log.setLevel(logging.CRITICAL)

                # soft limits are ignored, hard limits rejected.
                # proposed values beyond hard limit default to limit.
                fit.model.thawedpars = proposed_params

                # Calculate statistic on proposal, use likelihood
                proposed_stat = -0.5 * fit.calc_stat()

                # _log.setLevel(level)

            except:
                # set the model back to original state on exception
                fit.model.thawedpars = oldthawedpars
                raise
            finally:
                # set the logger back to previous level
                _log.setLevel(level)

            return proposed_stat
示例#2
0
    def calc_stat(proposed_params):

        # automatic rejection outside hard limits
        mins = sao_fcmp(proposed_params, thawedparmins, _tol)
        maxes = sao_fcmp(thawedparmaxes, proposed_params, _tol)
        if -1 in mins or -1 in maxes:
            #print'hard limit exception'
            raise LimitError('Sherpa parameter hard limit exception')

        try:
            # ignore warning from Sherpa about hard limits
            _log.setLevel(50)

            # soft limits are ignored, hard limits rejected.
            # proposed values beyond hard limit default to limit.
            fit.model.thawedpars = proposed_params
            _log.setLevel(_level)

            # Calculate statistic on proposal, use likelihood
            proposed_stat = -0.5 * fit.calc_stat()

        except:
            # set the model back to original state on exception
            fit.model.thawedpars = oldthawedpars
            raise

        return proposed_stat
示例#3
0
    def calc_stat(proposed_params):

        # automatic rejection outside hard limits
        mins  = sao_fcmp(proposed_params, thawedparmins, _tol)
        maxes = sao_fcmp(thawedparmaxes, proposed_params, _tol)
        if -1 in mins or -1 in maxes:
            #print'hard limit exception'
            raise LimitError('Sherpa parameter hard limit exception')

        try:
            # ignore warning from Sherpa about hard limits
            _log.setLevel(50)

            # soft limits are ignored, hard limits rejected.
            # proposed values beyond hard limit default to limit.
            fit.model.thawedpars = proposed_params
            _log.setLevel(_level)

            # Calculate statistic on proposal, use likelihood
            proposed_stat = -0.5*fit.calc_stat()

        except:
            # set the model back to original state on exception
            fit.model.thawedpars = oldthawedpars
            raise

        return proposed_stat
示例#4
0
    def fit(self, outfile=None, clobber=False):
        dep, staterror, syserror = self.data.to_fit(self.stat.calc_staterror)
        if not iterable(dep) or len(dep) == 0:
            #raise FitError('no noticed bins found in data set')
            raise FitErr( 'nobins' )

        if ((iterable(staterror) and 0.0 in staterror) and
            isinstance(self.stat, Chi2) and
            type(self.stat) != Chi2 and
            type(self.stat) != Chi2ModVar):
            #raise FitError('zeros found in uncertainties, consider using' +
            #               ' calculated uncertainties')
            raise FitErr( 'binhas0' )

        if (getattr(self.data, 'subtracted', False) and
            isinstance(self.stat, Likelihood) ):
            #raise FitError('%s statistics cannot be used with background'
            #               % self.stat.name + ' subtracted data')
            raise FitErr( 'statnotforbackgsub', self.stat.name )


        init_stat = self.calc_stat()
        # output = self.method.fit ...
        output = self._iterfit.fit(self._iterfit._get_callback(outfile, clobber),
                                   self.model.thawedpars,
                                   self.model.thawedparmins,
                                   self.model.thawedparmaxes)
        # LevMar always calculate chisquare, so call calc_stat
        # just in case statistics is something other then chisquare
        self.model.thawedpars = output[1]
        tmp = list(output)
        tmp[2] = self.calc_stat()
        output = tuple(tmp)
        # end of the gymnastics 'cause one cannot write to a tuple

        # check if any parameter values are at boundaries,
        # and warn user.
        tol = finfo(float32).eps
        param_warnings = ""
        for par in self.model.pars:
            if not par.frozen:
                if sao_fcmp(par.val, par.min, tol) == 0:
                    param_warnings += ("WARNING: parameter value %s is at its minimum boundary %s\n" %
                                      (par.fullname, str(par.min)))
                if sao_fcmp(par.val, par.max, tol) == 0:
                    param_warnings += ("WARNING: parameter value %s is at its maximum boundary %s\n" %
                                      (par.fullname, str(par.max)))

        if self._iterfit._file is not None:
            vals = ['%5e %5e' % (self._iterfit._nfev, tmp[2])]
            vals.extend(['%5e' % val for val in self.model.thawedpars])
            print >> self._iterfit._file, ' '.join(vals)
            self._iterfit._file.close()
            self._iterfit._file=None

        return FitResults(self, output, init_stat, param_warnings.strip("\n"))
示例#5
0
    def calc(self, p, x, xhi=None, **kwargs):
        x = numpy.asarray(x, dtype=SherpaFloat)

        if 0.0 == p[0]:
            raise ValueError('model evaluation failed, ' +
                             '%s fwhm cannot be zero' % self.name)

        if 0.0 == p[1]:
            raise ValueError('model evaluation failed, ' +
                             '%s pos cannot be zero' % self.name)

        if 0.0 == p[3]:
            raise ValueError('model evaluation failed, ' +
                             '%s skew cannot be zero' % self.name)

        y = numpy.zeros_like(x)
        sigma = p[1] * p[0] / 705951.5     # = 2.9979e5 / 2.354820044
        delta = numpy.abs((x - p[1]) / sigma)
        idx = (delta < self.limit)

        arg = - delta * delta / 2.0
        if sao_fcmp(p[3], 1.0, _tol) == 0:
            y[idx] = p[2] * numpy.exp(arg[idx]) / sigma / 2.50662828

        else:
            left = (arg <= p[1])
            arg[left] = numpy.exp(arg[left])
            right = ~left
            arg[right] = numpy.exp(arg[right] / p[3] / p[3])
            y[idx] = 2.0 * p[2] * arg[idx] / sigma / 2.50662828 / (1.0 + p[3])

        return y
示例#6
0
def _check_hist_bins(plot):
    """Ensure lo/hi edges that are "close" are merged.

    Ensure that "close-enough" bin edges use the same value.  We do
    this for all bins, even those that are identical, as it's
    easier. The tolerance is taken to be the float32 "eps" setting, as
    this seems to work for the (limited) data sets I've seen. This is
    to fix issue #977.

    Parameters
    ----------
    plot
        The plot structure, which must have xlo and xhi attributes.

    Notes
    -----
    Note that this holds even when plotting wavelength values, who
    have xlo/xhi in decreasing order, since the lo/hi values still
    hold.

    """

    # Technically idx should be 0 or 1, with no -1 values. We
    # do not enforce this. What we do is to take all bins that
    # appear similar (sao_fcmp==0) and set the xlo[i+1] bin
    # to the xhi[i] value.
    #
    equal = sao_fcmp(plot.xlo[1:], plot.xhi[:-1], _tol)
    idx, = np.where(equal == 0)
    plot.xlo[idx + 1] = plot.xhi[idx]
示例#7
0
    def calc(self, p, x, xhi=None, **kwargs):
        x = numpy.asarray(x, dtype=SherpaFloat)

        if 0.0 == p[0]:
            raise ValueError('model evaluation failed, ' +
                             '%s fwhm cannot be zero' % self.name)

        if 0.0 == p[1]:
            raise ValueError('model evaluation failed, ' +
                             '%s pos cannot be zero' % self.name)

        if 0.0 == p[3]:
            raise ValueError('model evaluation failed, ' +
                             '%s skew cannot be zero' % self.name)

        y = numpy.zeros_like(x)
        sigma = p[1] * p[0] / 705951.5  # = 2.9979e5 / 2.354820044
        delta = numpy.abs((x - p[1]) / sigma)
        idx = (delta < self.limit)

        arg = -delta * delta / 2.0
        if sao_fcmp(p[3], 1.0, _tol) == 0:
            y[idx] = p[2] * numpy.exp(arg[idx]) / sigma / 2.50662828

        else:
            left = (arg <= p[1])
            arg[left] = numpy.exp(arg[left])
            right = ~left
            arg[right] = numpy.exp(arg[right] / p[3] / p[3])
            y[idx] = 2.0 * p[2] * arg[idx] / sigma / 2.50662828 / (1.0 + p[3])

        return y
示例#8
0
文件: plot.py 项目: wmclaugh/sherpa
def _check_hist_bins(xlo, xhi):
    """Ensure lo/hi edges that are "close" are merged.

    Ensure that "close-enough" bin edges use the same value.  We do
    this for all bins, even those that are identical, as it's
    easier. The tolerance is taken to be the float32 "eps" setting, as
    this seems to work for the (limited) data sets I've seen. This is
    to fix issue #977.

    Parameters
    ----------
    xlo, xhi : array
        Lower and upper bin boundaries. Typically, ``xlo`` will contain the
        lower boundary and ``xhi`` the upper boundary, but this function can
        deal with situations where that is reversed. Both arrays have to be
        monotonically increasing or decreasing.

    Returns
    -------
    xlo, xhi : array
        xlo and xhi with values that were very close (within numerical
        tolerance) before changed such that they now match exactly.

    Notes
    -----
    Note that this holds even when plotting wavelength values, who
    have xlo/xhi in decreasing order, since the lo/hi values still
    hold.
    """
    if len(xlo) != len(xhi):
        # Not a Sherpa specific error, because this is more for developers.
        raise ValueError('Input arrays must have same length.')
    # Nothing to compare if input arrays are empty.
    if len(xlo) == 0:
        return xlo, xhi

    # Technically idx should be 0 or 1, with no -1 values. We
    # do not enforce this. What we do is to take all bins that
    # appear similar (sao_fcmp==0) and set the xlo[i+1] bin
    # to the xhi[i] value.
    #
    # Deal with xhi <-> xlo switches. Those can occor when converting
    # from energy to wavelength.
    # Deal with reversed order. Can happen when converting from energy
    # to wavelength, or if input PHA is not ordered in increasing energy.
    # But is both are happening at the same time, need to switch twice, which
    # is a no-op. So, we get to use the elusive Python XOR operator.
    if (xlo[0] > xhi[0]) ^ (xhi[0] > xhi[-1]):
        xlo, xhi = xhi, xlo
    equal = sao_fcmp(xlo[1:], xhi[:-1], _tol)
    idx, = np.where(equal == 0)
    xlo[idx + 1] = xhi[idx]

    return xlo, xhi
示例#9
0
 def _check_for_user_grid(self, x):
     return (len(self.channel) != len(x) or
             not (sao_fcmp(self.channel, x, _tol) == 0).all())
示例#10
0
 def _check_for_user_grid(self, x):
     return (len(self.channel) != len(x)
             or not (sao_fcmp(self.channel, x, _tol) == 0).all())
示例#11
0
    def primini(self, statfunc, pars, parmins, parmaxes, statargs = (),
                statkwargs = {}):
        # Primini's method can only be used with chi-squared;
        # raise exception if it is attempted with least-squares,
        # or maximum likelihood
        if (isinstance(self.stat, Chi2) and
            type(self.stat) is not LeastSq):
            pass
        else:
            raise FitErr('needchi2', 'Primini\'s')
        # Get tolerance, max number of iterations from the
        # dictionary for Primini's method
        tol = self.itermethod_opts['tol']
        if (type(tol) != int and
            type(tol) != float):
            raise SherpaErr("'tol' value for Primini's method must be a number")        
        maxiters = self.itermethod_opts['maxiters']
        if (type(maxiters) != int):
            raise SherpaErr("'maxiters' value for Primini's method must be an integer")

        # Store original statistical errors for all data sets.
        # Then, set all statistical errors equal to one, to
        # prepare for Primini's method.
        staterror_original = []
            
        for d in self.data.datasets:
            st = d.get_staterror(filter=False)
            staterror_original.append(st)
            d.staterror = ones_like(st)

        # Keep record of current and previous statistics;
        # when these are within some tolerace, Primini's method
        # is done.
        previous_stat = float32(finfo(float32).max)
        current_stat = statfunc(pars)[0]
        nfev = 0
        iters = 0

        # This is Primini's method.  The essence of the method
        # is to fit; then call the statistic's staterror function
        # on model values (calculated with current parameter values);
        # then call fit *again*.  Do this until the final fit
        # statistic for the previous and current calls to fit
        # agree to within tolerance.
        final_fit_results = None
        try:
            while (sao_fcmp(previous_stat, current_stat, tol) != 0 and
                   iters < maxiters):
                final_fit_results = self.method.fit(statfunc,
                                                    self.model.thawedpars,
                                                    parmins, parmaxes,
                                                    statargs, statkwargs)
                previous_stat = current_stat
                current_stat = final_fit_results[2]
                nfev += final_fit_results[4].get('nfev')
                iters += 1

                # Call stat.staterror with *model*values*, not data
                # Model values calculated using best-fit parameter
                # values from the just-completed call to the fit
                # function.
                model_iterator = iter(self.model())
                for d in self.data.datasets:
                    d.staterror = self.stat.calc_staterror(
                        d.eval_model(model_iterator.next()))

            # Final number of function evaluations is the sum
            # of the numbers of function evaluations from all calls
            # to the fit function.
            final_fit_results[4]['nfev'] = nfev
        finally:
            # Clean up *no*matter*what* -- we must always
            # restore original statistical errors.
            staterror_original.reverse()
            for d in self.data.datasets:
                d.staterror = staterror_original.pop()
            
        # Return results from Primini's iterative fitting method
        return final_fit_results