示例#1
0
 def __init__(self, func):
     if isinstance(func, Model):
         raise ModelErr('badinstance', type(self).__name__)
     if not callable(func):
         raise ModelErr('noncall', type(self).__name__, type(func).__name__)
     self.func = func
     Model.__init__(self, func.__name__)
示例#2
0
    def _make_and_validate_grid(self, args_array):
        """
        Validate input grid and check whether it's point or integrated.

        Parameters
        ----------
        args_array : list
            The array or arguments passed to the `call` method

        Returns
        -------
        requested_eval_space : EvaluationSpace2D
        """
        nargs = len(args_array)
        if nargs == 0:
            raise ModelErr('nogrid')

        requested_eval_space = EvaluationSpace2D(*args_array)

        # Ensure the two grids match: integrated or non-integrated.
        if self.evaluation_space.is_integrated and not requested_eval_space.is_integrated:
            raise ModelErr('needsint')
        if requested_eval_space.is_integrated and not self.evaluation_space.is_integrated:
            raise ModelErr('needspoint')

        return requested_eval_space
示例#3
0
    def test_NewSherpaErr(self):
        class OldSherpaErr(Exception):
            "Old class for all Sherpa exceptions"

            def __init__(self, dict, key, *args):
                if key in dict:
                    errmsg = dict[key] % args
                else:
                    errmsg = "unknown key '%s'" % key
                Exception.__init__(self, errmsg)

        dict = {'simple': 'simple message', 'arg': 'argument: %s'}

        # Test 1: verify that a correct call of the new constructor has the same result of the old one
        err = SherpaErr(dict, 'simple')
        old_err = OldSherpaErr(dict, 'simple')
        self.assertEqual(str(err), str(old_err))

        # Test 2: same as before, but with string placeholders
        err = SherpaErr(dict, 'arg', 'foo')
        self.assertEqual('argument: foo', str(err))

        # Test #3: verify that a call without a key results in a generic message being produced
        err = SherpaErr(dict)
        self.assertEqual('Generic Error', str(err))

        # Test #4: verify the user's expected behavior, i.e. a string is provided as error message
        err = SherpaErr(dict, 'My Error')
        self.assertEqual('My Error', str(err))

        # Test #5: verify the user provided example, which exercises a derived class
        err = ModelErr("Unable to frobnicate model %s" % 'modelname')
        self.assertEqual('Unable to frobnicate model modelname', str(err))
示例#4
0
 def query(self, p):
     try:
         return self.index[tuple(p)]
     except:
         raise ModelErr(
             "Interpolation of template parameters was disabled for this model, but parameter values not in the template library have been requested. Please use gridsearch method and make sure the sequence option is consistent with the template library"
         )
示例#5
0
 def fold(self, data):
     mask = data.mask
     if self.__x is None and numpy.iterable(mask):
         if len(mask) != len(self.__y):
             raise ModelErr("filtermismatch", 'table model',
                            'data, (%s vs %s)' % (len(self.__y), len(mask)))
         self.__filtered_y = self.__y[mask]
示例#6
0
    def __init__(self, name, parts):
        self.parts = tuple(parts)
        allpars = []
        model_with_dim = None
        for part in self.parts:

            ndim = part.ndim
            if ndim is not None:
                if self.ndim is None:
                    self.ndim = ndim
                    model_with_dim = part
                elif self.ndim != ndim:
                    raise ModelErr('Models do not match: ' +
                                   '{}D ({}) and '.format(self.ndim, model_with_dim.name) +
                                   '{}D ({})'.format(ndim, part.name))

            for p in part.pars:
                if p in allpars:
                    # If we already have a reference to this parameter, store
                    # a hidden, linked proxy instead
                    pnew = Parameter(p.modelname, p.name, 0.0, hidden=True)
                    pnew.link = p
                    p = pnew
                allpars.append(p)

        Model.__init__(self, name, allpars)

        for part in self.parts:
            try:
                self.is_discrete = self.is_discrete or part.is_discrete
            except:
                warning("Could not determine whether the model is discrete.\n" +
                        "This probably means that you have restored a session saved with a previous version of Sherpa.\n" +
                        "Falling back to assuming that the model is continuous.\n")
                self.is_discrete = False
示例#7
0
 def regrid(self, *args, **kwargs):
     for part in self.parts:
         # ArithmeticConstantModel does not support regrid by design
         if not hasattr(part, 'regrid'):
             continue
         # The full model expression must be used
         return part.__class__.regrid(self, *args, **kwargs)
     raise ModelErr('Neither component supports regrid method')
示例#8
0
    def calc(self, p, x0, x1=None, *args, **kwargs):

        if self.__x is not None and self.__y is not None:
            return p[0] * interpolate(
                x0, self.__x, self.__y, function=self.method)

        elif (self.__filtered_y is not None
              and len(x0) == len(self.__filtered_y)):
            return p[0] * self.__filtered_y

        elif (self.__y is not None and len(x0) == len(self.__y)):
            return p[0] * self.__y

        raise ModelErr("filtermismatch", 'table model',
                       'data, (%s vs %s)' % (len(self.__y), len(x0)))
示例#9
0
    def _set_thawed_pars(self, vals):
        tpars = [p for p in self.pars if not p.frozen]

        ngot = len(vals)
        nneed = len(tpars)
        if ngot != nneed:
            raise ModelErr('numthawed', nneed, ngot)

        for p, v in zip(tpars, vals):
            v = SherpaFloat(v)
            if v < p.hard_min:
                p.val = p.min
                warning(('value of parameter %s is below minimum; ' +
                         'setting to minimum') % p.fullname)
            elif v > p.hard_max:
                p.val = p.max
                warning(('value of parameter %s is above maximum; ' +
                         'setting to maximum') % p.fullname)
            else:
                p._val = v
示例#10
0
def get_response_for_pha(data, model, bkg_srcs={}, pileup_model=None, id=None):
    """Create the response model describing the source and model.

    Include any background components and apply the response
    model for the dataset.

    This is essentially the object-oriented version of
    `sherpa.astro.background.add_response`.

    Parameters
    ----------
    data : sherpa.astro.data.DataPHA instance
        The dataset (may be a background dataset).
    model : sherpa.models.model.ArithmeticModel instance
        The model (without response or background components)
        to match to data.
    bkg_srcs : dict
        Keys in the dictionary need to be the background ids in the dataset
        ``data``, and the values are the corresponding source models.
    pileup_model : None or `sherpa.astro.models.JDPileup` instance
        Pileup model for the dataset if needed, or ``None`` for no pileup
        model.
    id : string
        A string to label the dataset in warning messages. If this is set
        ``None`` the name of the dataset it used. Thus parameters is mainly
        needed if this function is called from the UI layer, where datasets and
        models have ids that are not stored in an attribute of the dataset
        itself.

    Returns
    -------
    fullmodel : sherpa.models.model.ArithmeticModel
        The model including the necessary response models and
        background components.

    """
    if id is None:
        id = data.name

    resp = data.get_full_response(pileup_model)
    if data.subtracted or (len(bkg_srcs) == 0):
        return resp(model)
    # At this point we have background one or more background
    # components that need to be added to the overall model.
    # If the scale factors are all scalars then we can return
    #
    #   resp(model + sum (scale_i * bgnd_i))        [1]
    #
    # but if any are arrays then we have to apply the scale factor
    # after applying the response, that is
    #
    #   resp(model) + sum(scale_i * resp(bgnd_i))   [2]
    #
    # This is because the scale values are in channel space,
    # and not the instrument response (i.e. the values used inside
    # the resp call).
    #
    # Note that if resp is not a linear response - for instance,
    # it is a pileup model - then we will not get the correct
    # answer if there's an array value for the scale factor
    # (i.e. equation [2] above). A warning message is created in this
    # case, but it is not treated as an error.
    #
    # For multiple background datasets we can have different models -
    # that is, one for each dataset - but it is expected that the
    # same model is used for all backgrounds (i.e. this is what
    # we 'optimise' for).

    # Identify the scalar and vector scale values for each
    # background dataset, and combine using the model as a key.
    #
    scales_scalar = defaultdict(list)
    scales_vector = defaultdict(list)
    for bkg_id in data.background_ids:
        try:
            bmdl = bkg_srcs[bkg_id]
        except KeyError:
            raise ModelErr('nobkg', bkg_id, id)

        scale = data.get_background_scale(bkg_id, units='rate', group=False)

        if np.isscalar(scale):
            store = scales_scalar
        else:
            store = scales_vector

        store[bmdl].append(scale)

    # Combine the scalar terms, grouping by the model.
    #
    for mdl, scales in scales_scalar.items():
        scale = sum(scales)
        model += scale * mdl

    # Apply the instrument response.
    #
    model = resp(model)

    if len(scales_vector) == 0:
        return model

    # Warn if a pileup model is being used. The error message here
    # is terrible.
    #
    # Should this be a Python Warning rather than a logged message?
    #
    if isinstance(resp, PileupResponse1D):
        wmsg = "model results for dataset {} ".format(id) + \
                "likely wrong: use of pileup model and array scaling " + \
                "for the background"

        # warnings.warn(wmsg)
        warning(wmsg)

    # Combine the vector terms, grouping by the model. A trick here
    # is that,to make the string version of the model be readable,
    # we add a model to contain the scale values, using the
    # ArithmeticConstantModel.
    #
    # Note that the model is given a name, to make it "easy" to
    # read in the model expression, but this name is not registered
    # anywhere. An alternative would be to use the default naming
    # convention of the model, which will use 'float64[n]' as a label.
    #
    nvectors = len(scales_vector)
    for i, (mdl, scales) in enumerate(scales_vector.items(), 1):

        # special case the single-value case
        if nvectors == 1:
            name = 'scale{}'.format(id)
        else:
            name = 'scale{}_{}'.format(id, i)

        # We sum up the scale arrays for this model.
        #
        scale = sum(scales)
        tbl = ArithmeticConstantModel(scale, name=name)
        model += tbl * resp(mdl)

    return model
示例#11
0
    def calc(self, p, xlo, xhi=None, **kwargs):
        if xhi is None:
            raise ModelErr('needsint')

        return _modelfcts.integrate1d(self.model.calc, p, xlo, xhi,
                                      **self.otherkwargs)
示例#12
0
 def calc(self, *args, **kwargs):
     if not self.integrate:
         raise ModelErr('alwaysint', self.name)
     return _modelfcts.schechter(*args, **kwargs)
示例#13
0
    def _set_val(self, val):
        val = SherpaFloat(val)
        if val.ndim > 1:
            raise ModelErr('The constant must be a scalar or 1D, not 2D')

        self._val = val
示例#14
0
def _sample_flux_get_samples_with_scales(fit,
                                         src,
                                         correlated,
                                         scales,
                                         num,
                                         clip='hard'):
    """Return the parameter samples given the parameter scales.

    Parameters
    ----------
    fit : sherpa.fit.Fit instance
        The fit instance. The fit.model expression is assumed to include
        any necessary response information. The number of free parameters
        in fit.model is mfree.
    src : sherpa.models.ArithmeticModel instance
        The model for which the flux is being calculated. This must be
        a subset of the fit.model expression, and should not include the
        response information. There must be at least one thawed parameter
        in this model. The number of free parameters in src is sfree.
    correlated : bool
        Are the parameters assumed to be correlated or not? If correlated
        is True then scales must be 2D.
    scales : 1D or 2D array
        The parameter scales. When 1D they are the gaussian sigma
        values for the parameter, and when a 2D array they are
        the covariance matrix. The scales parameter must match the number
        of parameters in fit (mfree) and not in src (sfree) when they
        are different. For 1D the size is mfree and for 2D it is
        mfree by mfree.
    num : int
        Tne number of samples to return. This must be 1 or greater.
    clip : {'hard', 'soft', 'none'}, optional
        What clipping strategy should be applied to the sampled
        parameters. The default ('hard') is to fix values at their
        hard limits if they exceed them. A value of 'soft' uses the
        soft limits instead, and 'none' applies no clipping. The last
        column in the returned arrays indicates if the row had any
        clipped parameters (even when clip is set to 'none').

    Returns
    -------
    samples, clipped : 2D NumPy array, 1D NumPy array
        The dimensions are num by mfree. The ordering of the parameter
        values in each row matches that of the free parameters in
        fit.model.  The clipped array indicates whether a row had one
        or more clipped parameters.

    Raises
    ------
    ArgumentErr
        If the scales argument contains invalid (e.g. None or IEEE
        non-finite values) values, or is the wrong shape.
    ModelErr
        If the scales argument has the wrong size (that is, it
        does not represent mfree parameter values).

    Notes
    -----
    The support for src being a subset of the fit.model argument
    has not been tested for complex models, that is when fit.model
    is rmf(arf(source_model)) and src is a combination of components
    in source_model but not all the components of source_model.

    """

    npar = len(src.thawedpars)
    mpar = len(fit.model.thawedpars)
    assert mpar >= npar

    scales = numpy.asarray(scales)

    # A None value will cause scales to have a dtype of object,
    # which is not supported by isfinite, so check for this
    # first.
    #
    # Numpy circa 1.11 raises a FutureWarning with 'if None in scales:'
    # about this changing to element-wise comparison (which is what
    # we want). To avoid this warning I use the suggestion from
    # https://github.com/numpy/numpy/issues/1608#issuecomment-9618150
    #
    if numpy.equal(None, scales).any():
        raise ArgumentErr('bad', 'scales', 'must not contain None values')

    # We require that scales only has finite values in it.
    # The underlying sample routines are assumed to check other
    # constraints, or deal with negative values (for the 1D case
    # uncorrelated case the absolute value is used).
    #
    if not numpy.isfinite(scales).all():
        raise ArgumentErr('bad', 'scales', 'must only contain finite values')

    if scales.ndim == 2 and (scales.shape[0] != scales.shape[1]):
        raise ArgumentErr('bad', 'scales', 'scales must be square when 2D')

    # Ensure the scales array matches the correlated parameter:
    #  - when True it must be the covariance matrix (2D)
    #  - when False it can be either be a 1D array of sigmas or
    #    the covariance matrix, which we convert to an array of
    #    sigmas
    #
    if correlated:
        if scales.ndim != 2:
            raise ArgumentErr('bad', 'scales',
                              'when correlated=True, scales must be 2D')
    elif scales.ndim == 2:
        # convert from covariance matrix
        scales = numpy.sqrt(scales.diagonal())
    elif scales.ndim != 1:
        raise ArgumentErr('bad', 'scales',
                          'when correlated=False, scales must be 1D or 2D')

    # At this point either 1D or 2D square array. Now to check the
    # number of elements.
    #
    if scales.shape[0] != mpar:
        raise ModelErr('numthawed', mpar, scales.shape[0])

    if correlated:
        sampler = NormalParameterSampleFromScaleMatrix()
    else:
        sampler = NormalParameterSampleFromScaleVector()

    samples = sampler.get_sample(fit, scales, num=num)
    clipped = sampler.clip(fit, samples, clip=clip)
    return samples, clipped
示例#15
0
def test5():
    """verify the user provided example, which exercises a derived class"""
    err = ModelErr("Unable to frobnicate model %s" % 'modelname')
    assert 'Unable to frobnicate model modelname' == str(err)