Esempio n. 1
0
def test_arithmeticconstantmodel_dimerr():

    x = np.ones(6).reshape(2, 3)
    with pytest.raises(ModelErr) as exc:
        ArithmeticConstantModel(x)

    assert str(exc.value) == 'The constant must be a scalar or 1D, not 2D'
Esempio n. 2
0
def test_arithmeticconstantmodel(val):
    """Does ArithmeticConstantModel handle ndim?

    With the current design we can not distinguish the model
    dimensionality from the value to be wrapped, so ndim is
    set to None for all arguments.
    """

    mdl = ArithmeticConstantModel(val)
    assert mdl.ndim is None
Esempio n. 3
0
def get_response_for_pha(data, model, bkg_srcs={}, pileup_model=None, id=None):
    """Create the response model describing the source and model.

    Include any background components and apply the response
    model for the dataset.

    This is essentially the object-oriented version of
    `sherpa.astro.background.add_response`.

    Parameters
    ----------
    data : sherpa.astro.data.DataPHA instance
        The dataset (may be a background dataset).
    model : sherpa.models.model.ArithmeticModel instance
        The model (without response or background components)
        to match to data.
    bkg_srcs : dict
        Keys in the dictionary need to be the background ids in the dataset
        ``data``, and the values are the corresponding source models.
    pileup_model : None or `sherpa.astro.models.JDPileup` instance
        Pileup model for the dataset if needed, or ``None`` for no pileup
        model.
    id : string
        A string to label the dataset in warning messages. If this is set
        ``None`` the name of the dataset it used. Thus parameters is mainly
        needed if this function is called from the UI layer, where datasets and
        models have ids that are not stored in an attribute of the dataset
        itself.

    Returns
    -------
    fullmodel : sherpa.models.model.ArithmeticModel
        The model including the necessary response models and
        background components.

    """
    if id is None:
        id = data.name

    resp = data.get_full_response(pileup_model)
    if data.subtracted or (len(bkg_srcs) == 0):
        return resp(model)
    # At this point we have background one or more background
    # components that need to be added to the overall model.
    # If the scale factors are all scalars then we can return
    #
    #   resp(model + sum (scale_i * bgnd_i))        [1]
    #
    # but if any are arrays then we have to apply the scale factor
    # after applying the response, that is
    #
    #   resp(model) + sum(scale_i * resp(bgnd_i))   [2]
    #
    # This is because the scale values are in channel space,
    # and not the instrument response (i.e. the values used inside
    # the resp call).
    #
    # Note that if resp is not a linear response - for instance,
    # it is a pileup model - then we will not get the correct
    # answer if there's an array value for the scale factor
    # (i.e. equation [2] above). A warning message is created in this
    # case, but it is not treated as an error.
    #
    # For multiple background datasets we can have different models -
    # that is, one for each dataset - but it is expected that the
    # same model is used for all backgrounds (i.e. this is what
    # we 'optimise' for).

    # Identify the scalar and vector scale values for each
    # background dataset, and combine using the model as a key.
    #
    scales_scalar = defaultdict(list)
    scales_vector = defaultdict(list)
    for bkg_id in data.background_ids:
        try:
            bmdl = bkg_srcs[bkg_id]
        except KeyError:
            raise ModelErr('nobkg', bkg_id, id)

        scale = data.get_background_scale(bkg_id, units='rate', group=False)

        if np.isscalar(scale):
            store = scales_scalar
        else:
            store = scales_vector

        store[bmdl].append(scale)

    # Combine the scalar terms, grouping by the model.
    #
    for mdl, scales in scales_scalar.items():
        scale = sum(scales)
        model += scale * mdl

    # Apply the instrument response.
    #
    model = resp(model)

    if len(scales_vector) == 0:
        return model

    # Warn if a pileup model is being used. The error message here
    # is terrible.
    #
    # Should this be a Python Warning rather than a logged message?
    #
    if isinstance(resp, PileupResponse1D):
        wmsg = "model results for dataset {} ".format(id) + \
                "likely wrong: use of pileup model and array scaling " + \
                "for the background"

        # warnings.warn(wmsg)
        warning(wmsg)

    # Combine the vector terms, grouping by the model. A trick here
    # is that,to make the string version of the model be readable,
    # we add a model to contain the scale values, using the
    # ArithmeticConstantModel.
    #
    # Note that the model is given a name, to make it "easy" to
    # read in the model expression, but this name is not registered
    # anywhere. An alternative would be to use the default naming
    # convention of the model, which will use 'float64[n]' as a label.
    #
    nvectors = len(scales_vector)
    for i, (mdl, scales) in enumerate(scales_vector.items(), 1):

        # special case the single-value case
        if nvectors == 1:
            name = 'scale{}'.format(id)
        else:
            name = 'scale{}_{}'.format(id, i)

        # We sum up the scale arrays for this model.
        #
        scale = sum(scales)
        tbl = ArithmeticConstantModel(scale, name=name)
        model += tbl * resp(mdl)

    return model
Esempio n. 4
0
def test_evaluate_cache_arithmeticconstant():
    """Check we run with cacheing: ArihmeticConstant"""

    mdl = ArithmeticConstantModel(2.3)
    assert not hasattr(mdl, '_use_caching')
Esempio n. 5
0
def test_constant_show(value, name, expected):
    """Does the ArithmeticConstantModel convert names as expected?"""

    m = ArithmeticConstantModel(value, name=name)
    assert m.name == expected
Esempio n. 6
0
    assert str(
        exc.value
    ) == 'ArithmeticFunctionModel instance cannot be created from another model'

    with pytest.raises(ModelErr) as exc:
        ArithmeticFunctionModel(23)

    assert str(
        exc.value
    ) == 'attempted to create ArithmeticFunctionModel from non-callable object of type int'


@pytest.mark.parametrize(
    'model,mtype',
    [(ArithmeticConstantModel(23, name='the-23'), ArithmeticConstantModel),
     (ArithmeticFunctionModel(numpy.sin), ArithmeticFunctionModel)])
def test_unop_arithmeticxxx(model, mtype):
    """Can we apply a function to an Arithmetic*Model object?

    Unlike the BinOpModel test we can't rely on Python numeric
    types, since there's no slots for __neg__ or __abs__ for
    the ArithmeticXXXModel classes, so we can not just say
    mneg = -<model>.

    """

    mneg = UnaryOpModel(model, numpy.negative, '-')
    assert isinstance(mneg, UnaryOpModel)
    assert mneg.op == numpy.negative
    assert isinstance(mneg.arg, mtype)