Exemple #1
0
def test_dispersion_spectrum_constructor(loaded_response):

    rsp = loaded_response

    pl = Powerlaw()

    ps = PointSource("fake", 0, 0, spectral_shape=pl)

    model = Model(ps)

    obs_spectrum = BinnedSpectrumWithDispersion(counts=np.ones(128),
                                                exposure=1,
                                                response=rsp,
                                                is_poisson=True)
    bkg_spectrum = BinnedSpectrumWithDispersion(counts=np.ones(128),
                                                exposure=1,
                                                response=rsp,
                                                is_poisson=True)

    specLike = DispersionSpectrumLike("fake",
                                      observation=obs_spectrum,
                                      background=bkg_spectrum)
    specLike.set_model(model)
    specLike.get_model()

    specLike.write_pha("test_from_dispersion", overwrite=True)

    assert os.path.exists("test_from_dispersion.pha")
    assert os.path.exists("test_from_dispersion_bak.pha")

    os.remove("test_from_dispersion.pha")
    os.remove("test_from_dispersion_bak.pha")
Exemple #2
0
def test_dispersion_spectrum_addition(loaded_response):

    rsp = loaded_response
    ebounds = ChannelSet.from_instrument_response(rsp)

    obs_spectrum_1 = BinnedSpectrumWithDispersion(
        counts=np.ones(len(ebounds)),
        count_errors=np.ones(len(ebounds)),
        exposure=1,
        response=rsp,
        is_poisson=False,
    )
    obs_spectrum_2 = BinnedSpectrumWithDispersion(
        counts=np.ones(len(ebounds)),
        count_errors=np.ones(len(ebounds)),
        exposure=2,
        response=rsp,
        is_poisson=False,
    )
    obs_spectrum_incompatible = None

    spectrum_addition(
        obs_spectrum_1,
        obs_spectrum_2,
        obs_spectrum_incompatible,
        lambda x, y: x + y,
        addition_proof_simple,
    )
    spectrum_addition(
        obs_spectrum_1,
        obs_spectrum_2,
        obs_spectrum_incompatible,
        lambda x, y: x.add_inverse_variance_weighted(y),
        addition_proof_weighted,
    )
Exemple #3
0
def test_dispersion_spectrum_clone(loaded_response):

    rsp = loaded_response

    obs_spectrum = BinnedSpectrumWithDispersion(counts=np.ones(128), exposure=1, response=rsp, is_poisson=True)

    obs_spectrum.clone(new_counts=np.zeros_like(obs_spectrum.counts), new_count_errors=None)

    obs_spectrum.clone()
Exemple #4
0
    def set_active_time_interval(self, *intervals, **kwargs):
        """
        Set the time interval to be used during the analysis.
        For now, only one interval can be selected. This may be
        updated in the future to allow for self consistent time
        resolved analysis.
        Specified as 'tmin-tmax'. Intervals are in seconds. Example:

        set_active_time_interval("0.0-10.0")

        which will set the energy range 0-10. seconds.
        :param options:
        :param intervals:
        :return:
        """

        self._time_series.set_active_time_intervals(*intervals)

        # extract a spectrum

        if self._response is None:

            self._observed_spectrum = BinnedSpectrum.from_time_series(
                self._time_series, use_poly=False)

        else:

            if self._rsp_is_weighted:
                self._response = self._weighted_rsp.weight_by_counts(
                    *self._time_series.time_intervals.to_string().split(','))

            self._observed_spectrum = BinnedSpectrumWithDispersion.from_time_series(
                self._time_series, self._response, use_poly=False)

        self._active_interval = intervals

        if self._time_series.poly_fit_exists:

            if self._response is None:

                self._background_spectrum = BinnedSpectrum.from_time_series(
                    self._time_series, use_poly=True)

            else:

                self._background_spectrum = BinnedSpectrumWithDispersion.from_time_series(
                    self._time_series, self._response, use_poly=True)

        self._tstart = self._time_series.time_intervals.absolute_start_time
        self._tstop = self._time_series.time_intervals.absolute_stop_time
    def _build_fake_observation(fake_data, channel_set, source_errors,
                                source_sys_errors, is_poisson, **kwargs):
        """
        This is the fake observation builder for SpectrumLike which builds data
        for a binned spectrum without dispersion. It must be overridden in child classes.

        :param fake_data: series of values... they are ignored later
        :param channel_set: a channel set
        :param source_errors:
        :param source_sys_errors:
        :param is_poisson:
        :return:
        """

        assert 'response' in kwargs, 'A response was not provided. Cannor build synthetic observation'

        response = kwargs.pop('response')

        observation = BinnedSpectrumWithDispersion(
            fake_data,
            exposure=1.,
            response=response,
            count_errors=source_errors,
            sys_errors=source_sys_errors,
            quality=None,
            scale_factor=1.,
            is_poisson=is_poisson,
            mission='fake_mission',
            instrument='fake_instrument',
            tstart=0.,
            tstop=1.)

        return observation
Exemple #6
0
def test_dispersion_spectrum_addition_poisson(loaded_response):

    rsp = loaded_response
    ebounds = ChannelSet.from_instrument_response(rsp)

    obs_spectrum_1 = BinnedSpectrumWithDispersion(counts=np.ones(len(ebounds)),
                                                  exposure=1,
                                                  response=rsp,
                                                  is_poisson=True)
    obs_spectrum_2 = BinnedSpectrumWithDispersion(counts=np.ones(len(ebounds)),
                                                  exposure=2,
                                                  response=rsp,
                                                  is_poisson=True)
    obs_spectrum_incompatible = None

    spectrum_addition(obs_spectrum_1, obs_spectrum_2,
                      obs_spectrum_incompatible, lambda x, y: x + y,
                      addition_proof_simple)
Exemple #7
0
def test_dispersion_spectrum_clone(loaded_response):

    rsp = loaded_response

    obs_spectrum = BinnedSpectrumWithDispersion(counts=np.ones(128),
                                                exposure=1,
                                                response=rsp,
                                                is_poisson=True)

    obs_spectrum.clone(new_counts=np.zeros_like(obs_spectrum.counts),
                       new_count_errors=None)

    obs_spectrum.clone()
Exemple #8
0
    def set_background_interval(self, *intervals, **options):
        """
        Set the time interval to fit the background.
        Multiple intervals can be input as separate arguments
        Specified as 'tmin-tmax'. Intervals are in seconds. Example:

        setBackgroundInterval("-10.0-0.0","10.-15.")


        :param *intervals:
        :param **options:

        :return: none

        """
        if 'unbinned' in options:

            unbinned = options.pop('unbinned')
        else:

            unbinned = self._default_unbinned

        self._time_series.set_polynomial_fit_interval(*intervals,
                                                      unbinned=unbinned)

        # In theory this will automatically get the poly counts if a
        # time interval already exists

        if self._active_interval is not None:

            if self._response is None:

                self._background_spectrum = BinnedSpectrum.from_time_series(
                    self._time_series, use_poly=True)

            else:

                # we do not need to worry about the interval of the response if it is a set. only the ebounds are extracted here

                self._background_spectrum = BinnedSpectrumWithDispersion.from_time_series(
                    self._time_series, self._response, use_poly=True)
    def _build_fake_observation(
            fake_data, channel_set, source_errors, source_sys_errors, is_poisson, exposure, scale_factor, **kwargs
    ):
        """
        This is the fake observation builder for SpectrumLike which builds data
        for a binned spectrum without dispersion. It must be overridden in child classes.

        :param fake_data: series of values... they are ignored later
        :param channel_set: a channel set
        :param source_errors:
        :param source_sys_errors:
        :param is_poisson:
        :return:
        """

        if not  ( "response" in kwargs):

            log.error("A response was not provided. Cannot build synthetic observation")

            raise RuntimeError()

        response = kwargs.pop("response")

        observation = BinnedSpectrumWithDispersion(
            fake_data,
            exposure=exposure,
            response=response,
            count_errors=source_errors,
            sys_errors=source_sys_errors,
            quality=None,
            scale_factor=scale_factor,
            is_poisson=is_poisson,
            mission="fake_mission",
            instrument="fake_instrument",
            tstart=0.0,
            tstop=exposure,
        )

        return observation
Exemple #10
0
    def _create_timeseries(self):
        """
        create all the time series for each detector
        :return: None
        """

        self._time_series = collections.OrderedDict()

        for det_num in range(14):

            # detectors are arranged [time,det,channel]

            # for now just keep the normal exposure

            # we will create binned spectra for each time slice

            drm_gen = DRMGenTrig(
                self._qauts,
                self._sc_pos,
                det_num,  # det number
                tstart=self._tstart,
                tstop=self._tstop,
                mat_type=2,
                time=0,
                occult=True
            )

            # we will use a single response for each detector

            tmp_drm = BALROG_DRM(drm_gen, 0, 0)

            # extract the counts

            counts = self._rates[:, det_num, :] * self._time_intervals.widths.reshape(
                (len(self._time_intervals), 1)
            )

            # now create a binned spectrum for each interval

            binned_spectrum_list = []

            for c, start, stop in zip(counts, self._tstart, self._tstop):
                binned_spectrum_list.append(
                    BinnedSpectrumWithDispersion(
                        counts=c,
                        exposure=stop - start,
                        response=tmp_drm,
                        tstart=start,
                        tstop=stop,
                    )
                )

            # make a binned spectrum set

            bss = BinnedSpectrumSet(
                binned_spectrum_list,
                reference_time=0.0,
                time_intervals=self._time_intervals,
            )

            # convert that set to a series

            bss2 = BinnedSpectrumSeries(bss, first_channel=0)

            # now we need to get the name of the detector

            name = lu[det_num]

            if self._restore_poly_fit is not None:
                bkg_fit_file = self._restore_poly_fit.get(name, None)
            else:
                bkg_fit_file = None

            # create a time series builder which can produce plugins

            tsb = TimeSeriesBuilder(
                name,
                bss2,
                response=tmp_drm,
                verbose=self._verbose,
                poly_order=self._poly_order,
                restore_poly_fit=bkg_fit_file,
            )

            # attach that to the full list

            self._time_series[name] = tsb
Exemple #11
0
    def __init__(self,
                 pha_file_or_instance,
                 file_type='observed',
                 rsp_file=None,
                 arf_file=None):
        """
        A spectrum with dispersion build from an OGIP-compliant PHA FITS file. Both Type I & II files can be read. Type II
        spectra are selected either by specifying the spectrum_number or via the {spectrum_number} file name convention used
        in XSPEC. If the file_type is background, a 3ML InstrumentResponse or subclass must be passed so that the energy
        bounds can be obtained.


        :param pha_file_or_instance: either a PHA file name or threeML.plugins.OGIP.pha.PHAII instance
        :param spectrum_number: (optional) the spectrum number of the TypeII file to be used
        :param file_type: observed or background
        :param rsp_file: RMF filename or threeML.plugins.OGIP.response.InstrumentResponse instance
        :param arf_file: (optional) and ARF filename
        """

        # extract the spectrum number if needed

        assert isinstance(pha_file_or_instance, str) or isinstance(
            pha_file_or_instance,
            PHAII), 'Must provide a FITS file name or PHAII instance'

        with fits.open(pha_file_or_instance) as f:

            try:

                HDUidx = f.index_of("SPECTRUM")

            except:

                raise RuntimeError("The input file %s is not in PHA format" %
                                   (pha2_file))

            spectrum = f[HDUidx]
            data = spectrum.data

            if "COUNTS" in data.columns.names:

                has_rates = False
                data_column_name = "COUNTS"

            elif "RATE" in data.columns.names:

                has_rates = True
                data_column_name = "RATE"

            else:

                raise RuntimeError(
                    "This file does not contain a RATE nor a COUNTS column. "
                    "This is not a valid PHA file")

                # Determine if this is a PHA I or PHA II
            if len(data.field(data_column_name).shape) == 2:

                num_spectra = data.field(data_column_name).shape[0]

            else:

                raise RuntimeError(
                    "This appears to be a PHA I and not PHA II file")

        pha_information = _read_pha_or_pha2_file(pha_file_or_instance,
                                                 None,
                                                 file_type,
                                                 rsp_file,
                                                 arf_file,
                                                 treat_as_time_series=True)

        # default the grouping to all open bins
        # this will only be altered if the spectrum is rebinned
        self._grouping = np.ones_like(pha_information['counts'])

        # this saves the extra properties to the class

        self._gathered_keywords = pha_information['gathered_keywords']

        self._file_type = file_type

        # need to see if we have count errors, tstart, tstop
        # if not, we create an list of None

        if pha_information['count_errors'] is None:

            count_errors = [None] * num_spectra

        else:

            count_errors = pha_information['count_errors']

        if pha_information['tstart'] is None:

            tstart = [None] * num_spectra

        else:

            tstart = pha_information['tstart']

        if pha_information['tstop'] is None:

            tstop = [None] * num_spectra

        else:

            tstop = pha_information['tstop']

        # now build the list of binned spectra

        list_of_binned_spectra = []

        with progress_bar(num_spectra, title='Loading PHAII spectra') as p:
            for i in range(num_spectra):

                list_of_binned_spectra.append(
                    BinnedSpectrumWithDispersion(
                        counts=pha_information['counts'][i],
                        exposure=pha_information['exposure'][i, 0],
                        response=pha_information['rsp'],
                        count_errors=count_errors[i],
                        sys_errors=pha_information['sys_errors'][i],
                        is_poisson=pha_information['is_poisson'],
                        quality=pha_information['quality'].get_slice(i),
                        mission=pha_information['gathered_keywords']
                        ['mission'],
                        instrument=pha_information['gathered_keywords']
                        ['instrument'],
                        tstart=tstart[i],
                        tstop=tstop[i]))

                p.increase()

        # now get the time intervals

        start_times = data.field('TIME')
        stop_times = data.field('ENDTIME')

        time_intervals = TimeIntervalSet.from_starts_and_stops(
            start_times, stop_times)

        reference_time = 0

        # see if there is a reference time in the file

        if 'TRIGTIME' in spectrum.header:
            reference_time = spectrum.header['TRIGTIME']

        for t_number in range(spectrum.header['TFIELDS']):

            if 'TZERO%d' % t_number in spectrum.header:
                reference_time = spectrum.header['TZERO%d' % t_number]

        super(PHASpectrumSet, self).__init__(list_of_binned_spectra,
                                             reference_time=reference_time,
                                             time_intervals=time_intervals)
Exemple #12
0
    def __init__(self,
                 pha_file_or_instance: Union[str, Path, PHAII],
                 file_type: str = "observed",
                 rsp_file: Optional[str] = None,
                 arf_file: Optional[str] = None):
        """
        A spectrum with dispersion build from an OGIP-compliant PHA FITS file. Both Type I & II files can be read. Type II
        spectra are selected either by specifying the spectrum_number or via the {spectrum_number} file name convention used
        in XSPEC. If the file_type is background, a 3ML InstrumentResponse or subclass must be passed so that the energy
        bounds can be obtained.


        :param pha_file_or_instance: either a PHA file name or threeML.plugins.OGIP.pha.PHAII instance
        :param spectrum_number: (optional) the spectrum number of the TypeII file to be used
        :param file_type: observed or background
        :param rsp_file: RMF filename or threeML.plugins.OGIP.response.InstrumentResponse instance
        :param arf_file: (optional) and ARF filename
        """

        # extract the spectrum number if needed

        for t in _valid_input_types:

            if isinstance(pha_file_or_instance, t):
                break

        else:

            log.error(
                f"Must provide a FITS file name or PHAII instance. Got {type(pha_file_or_instance)}"
            )

            raise RuntimeError()

        with fits.open(pha_file_or_instance) as f:

            try:

                HDUidx = f.index_of("SPECTRUM")

            except:

                raise RuntimeError("The input file %s is not in PHA format" %
                                   (pha_file_or_instance))

            spectrum = f[HDUidx]
            data = spectrum.data

            if "COUNTS" in data.columns.names:

                has_rates = False
                data_column_name = "COUNTS"

            elif "RATE" in data.columns.names:

                has_rates = True
                data_column_name = "RATE"

            else:

                log.error(
                    "This file does not contain a RATE nor a COUNTS column. "
                    "This is not a valid PHA file")

                raise RuntimeError()

                # Determine if this is a PHA I or PHA II
            if len(data.field(data_column_name).shape) == 2:

                num_spectra = data.field(data_column_name).shape[0]

            else:

                log.error("This appears to be a PHA I and not PHA II file")

                raise RuntimeError()

        pha_information = _read_pha_or_pha2_file(
            pha_file_or_instance,
            None,
            file_type,
            rsp_file,
            arf_file,
            treat_as_time_series=True,
        )

        # default the grouping to all open bins
        # this will only be altered if the spectrum is rebinned
        self._grouping = np.ones_like(pha_information["counts"])

        # this saves the extra properties to the class

        self._gathered_keywords = pha_information["gathered_keywords"]

        self._file_type = file_type

        # need to see if we have count errors, tstart, tstop
        # if not, we create an list of None

        if pha_information["count_errors"] is None:

            count_errors = [None] * num_spectra

        else:

            count_errors = pha_information["count_errors"]

        if pha_information["tstart"] is None:

            tstart = [None] * num_spectra

        else:

            tstart = pha_information["tstart"]

        if pha_information["tstop"] is None:

            tstop = [None] * num_spectra

        else:

            tstop = pha_information["tstop"]

        # now build the list of binned spectra

        list_of_binned_spectra = []

        for i in trange(num_spectra, desc="Loading PHAII Spectra"):

            list_of_binned_spectra.append(
                BinnedSpectrumWithDispersion(
                    counts=pha_information["counts"][i],
                    exposure=pha_information["exposure"][i, 0],
                    response=pha_information["rsp"],
                    count_errors=count_errors[i],
                    sys_errors=pha_information["sys_errors"][i],
                    is_poisson=pha_information["is_poisson"],
                    quality=pha_information["quality"].get_slice(i),
                    mission=pha_information["gathered_keywords"]["mission"],
                    instrument=pha_information["gathered_keywords"]
                    ["instrument"],
                    tstart=tstart[i],
                    tstop=tstop[i],
                ))

        # now get the time intervals

        _allowed_time_keys = (("TIME", "ENDTIME"), ("TSTART", "TSTOP"))

        for keys in _allowed_time_keys:

            try:

                start_times = data.field(keys[0])
                stop_times = data.field(keys[1])
                break

            except (KeyError):

                pass

        else:

            log.error(
                f"Could not find times in {pha_file_or_instance}. Tried: {_allowed_time_keys}"
            )

            raise RuntimeError()

        time_intervals = TimeIntervalSet.from_starts_and_stops(
            start_times, stop_times)

        reference_time = 0

        # see if there is a reference time in the file

        if "TRIGTIME" in spectrum.header:
            reference_time = spectrum.header["TRIGTIME"]

        for t_number in range(spectrum.header["TFIELDS"]):

            if "TZERO%d" % t_number in spectrum.header:
                reference_time = spectrum.header["TZERO%d" % t_number]

        super(PHASpectrumSet, self).__init__(
            list_of_binned_spectra,
            reference_time=reference_time,
            time_intervals=time_intervals,
        )