Пример #1
0
 def calc_stat(self,
               data,
               model,
               staterror=None,
               syserror=None,
               weight=None):
     if not self._statfuncset:
         raise StatErr('nostat', self.name, 'calc_stat()')
     return self.statfunc(self, data, model, staterror, syserror, weight)
Пример #2
0
    def calc_stat(data, model, staterror, *args, **kwargs):
        syserror, weight, extra = get_syserror_weight_extra(kwargs)
        if extra is None or extra['bkg'] is None:
            raise StatErr('usecstat')

        return _statfcts.calc_wstat_stat(data, model, extra['data_size'],
                                         extra['exposure_time'], extra['bkg'],
                                         extra['backscale_ratio'],
                                         truncation_value)
Пример #3
0
    def calc_stat(self, data, model):
        if not self._statfuncset:
            raise StatErr('nostat', self.name, 'calc_stat()')

        fitdata, modeldata = self._get_fit_model_data(data, model)

        return self.statfunc(fitdata[0],
                             modeldata,
                             staterror=fitdata[1],
                             syserror=fitdata[2],
                             weight=None)  # TODO weights
Пример #4
0
    def calc_stat(data,
                  model,
                  staterror=None,
                  syserror=None,
                  weight=None,
                  bkg=None):
        if bkg is None or bkg['bkg'] is None:
            raise StatErr('usecstat')

        return _statfcts.calc_wstat_stat(data, model, bkg['data_size'],
                                         bkg['exposure_time'], bkg['bkg'],
                                         bkg['backscale_ratio'],
                                         truncation_value)
Пример #5
0
    def calc_stat(self,
                  data,
                  model,
                  staterror=None,
                  syserror=None,
                  weight=None,
                  bkg=None):
        if not self._statfuncset:
            raise StatErr('nostat', self.name, 'calc_stat()')

        if bkg is None or bkg['bkg'] is None:
            return self.statfunc(data, model, staterror, syserror, weight)
        else:
            return self.statfunc(data, model, staterror, syserror, weight,
                                 bkg['bkg'])
Пример #6
0
    def _check_sizes_match(data, model):
        """Raise an error if number of datasets and models do not match.

        Parameters
        ----------
        data : a DataSimulFit instance
            The data sets to use.
        model : a SimulFitModel instance
            The model expressions for each data set. It must match
            the data parameter (the models are in the same order
            as the data objects).

        Raises
        ------
        StatErr

        """

        ndata = len(data.datasets)
        nmdl = len(model.parts)
        if ndata != nmdl:
            raise StatErr('mismatch', 'number of data sets', ndata,
                          'model expressions', nmdl)
Пример #7
0
 def calc_staterror(self, data):
     if not self._staterrfuncset:
         raise StatErr('nostat', self.name, 'calc_staterror()')
     return self.errfunc(data)
Пример #8
0
 def calc_staterror(data):
     raise StatErr('chi2noerr')
Пример #9
0
    def calc_stat(self, data, model):

        data, model = self._validate_inputs(data, model)

        # Need access to backscal values and background data filtered
        # and grouped in the same manner as the data. There is no
        # easy access to this via the Data API (in part because the
        # Data class has no knowledge of grouping or backscale values).
        #
        # An alternative approach would be to just calculate the
        # statistic for each dataset individually and then
        # sum the statistics for the return value, but the
        # original code used this approach.
        #
        data_src = []
        data_model = data.eval_model_to_fit(model)
        data_bkg = []
        nelems = []
        exp_src = []
        exp_bkg = []
        backscales = []

        # Why are we looping over model.parts as it isn't used?
        # Is it just a way to restrict to only use those
        # datasets for which we have a model?
        #
        for dset, mexpr in zip(data.datasets, model.parts):

            y = dset.to_fit(staterrfunc=None)[0]
            data_src.append(y)
            nelems.append(y.size)

            try:
                bids = dset.background_ids
            except AttributeError:
                raise StatErr('usecstat') from None

            nbkg = len(bids)
            if nbkg == 0:
                raise StatErr('usecstat')

            elif nbkg > 1:
                # TODO: improve warning
                warnings.warn("Only using first background component for data set {}".format(dset.name))

            bid = bids[0]

            bset = dset.get_background(bid)

            # TODO: the following should be reviewed to see what
            # happens if optional information is missing (e.g. if
            # BACKSCAL is not set we should default to all 1's,
            # but does this code handle it?)
            #

            data_bkg.append(dset.apply_filter(bset.get_dep(False),
                                              groupfunc=numpy.sum))

            # The assumption is that the source and background datasets
            # have the same number of channels (before any grouping or
            # filtering is applied).
            #
            # Since the backscal values can be a scalar or array, it is
            # easiest just to convert everything to an array.
            #
            dummy = numpy.ones(dset.get_dep(False).size)

            # Combine the BACKSCAL values (use the default _middle
            # scheme as this is used elsewhere when combining
            # BACKSCAL values; perhaps there should be an API call
            # for this?).
            #
            src_backscal = dset.apply_filter(dset.backscal * dummy,
                                             groupfunc=dset._middle)
            bkg_backscal = dset.apply_filter(bset.backscal * dummy,
                                             groupfunc=dset._middle)

            backscales.append(bkg_backscal / src_backscal)

            # The AREASCAL values are applied to the exposure
            # times, since this is how XSPEC handles this (at
            # least that's my undertanding of a conversation with
            # Keith Arnaud, for XSPEC ~ version 12.9). This requires
            # turning an exposure into an array if there's no
            # AREASCAl value
            #
            # For now we follow the same approach as the BACKSCAL
            # values if the data is grouped.
            #
            #
            if dset.areascal is None:
                ascal = dummy[:dset.get_dep(True).size]
            else:
                ascal = dset.apply_filter(dset.areascal * dummy,
                                          groupfunc=dset._middle)

            exp_src.append(dset.exposure * ascal)

            if bset.areascal is None:
                ascal = dummy[:dset.get_dep(True).size]
            else:
                ascal = dset.apply_filter(bset.areascal * dummy,
                                          groupfunc=dset._middle)

            exp_bkg.append(bset.exposure * ascal)

        data_src = numpy.concatenate(data_src)
        exp_src = numpy.concatenate(exp_src)
        exp_bkg = numpy.concatenate(exp_bkg)
        data_bkg = numpy.concatenate(data_bkg)
        backscales = numpy.concatenate(backscales)

        return self._calc(data_src, data_model, nelems,
                          exp_src, exp_bkg,
                          data_bkg, backscales,
                          truncation_value)
Пример #10
0
 def calc_stat(self, data, model, staterror, *args, **kwargs):
     if not self._statfuncset:
         raise StatErr('nostat', self.name, 'calc_stat()')
Пример #11
0
    def calc_stat(self, data, model):

        data, model = self._validate_inputs(data, model)

        # Need access to backscal values and background data filtered
        # and grouped in the same manner as the data. There is no
        # easy access to this via the Data API (in part because the
        # Data class has no knowledge of grouping or backscale values).
        #
        # An alternative approach would be to just calculate the
        # statistic for each dataset individually and then
        # sum the statistics for the return value, but the
        # original code used this approach.
        #
        data_src = []
        data_model = []
        data_bkg = []
        nelems = []
        exposures = []
        backscales = []

        for dset, mexpr in zip(data.datasets, model.parts):

            y = dset.to_fit(staterrfunc=None)[0]
            data_src.append(y)
            data_model.append(dset.eval_model_to_fit(mexpr))
            nelems.append(y.size)

            try:
                bids = dset.background_ids
            except AttributeError:
                raise StatErr('usecstat')

            nbkg = len(bids)
            if nbkg == 0:
                raise StatErr('usecstat')

            elif nbkg > 1:
                # TODO: improve warning
                warnings.warn(
                    "Only using first background component for data set {}".
                    format(dset.name))

            bid = bids[0]

            bset = dset.get_background(bid)

            data_bkg.append(
                dset.apply_filter(bset.get_dep(False), groupfunc=numpy.sum))

            exposures.extend([dset.exposure, bset.exposure])

            # The assumption is that the source and background datasets
            # have the same number of channels (before any grouping or
            # filtering is applied).
            #
            # Since the backscal values can be a scalar or array, it is
            # easiest just to convert everything to an array.
            #
            dummy = numpy.ones(dset.get_dep(False).size)

            # Combine the BACKSCAL values (use the default _middle
            # scheme as this is used elsewhere when combining
            # BACKSCAL values; perhaps there should be an API call
            # for this?).
            #
            src_backscal = dset.apply_filter(dset.backscal * dummy,
                                             groupfunc=dset._middle)
            bkg_backscal = dset.apply_filter(bset.backscal * dummy,
                                             groupfunc=dset._middle)

            backscales.append(bkg_backscal / src_backscal)

        data_src = numpy.concatenate(data_src)
        data_model = numpy.concatenate(data_model)
        data_bkg = numpy.concatenate(data_bkg)
        backscales = numpy.concatenate(backscales)

        return self._calc(data_src, data_model, nelems, exposures, data_bkg,
                          backscales, truncation_value)
Пример #12
0
 def calc_stat(self, data, model):
     raise StatErr('nostat', self.name, 'calc_stat()')