Esempio n. 1
0
    def __getitem__(self, index):
        """
        Return the corresponding count value at the index or a new Lightcurve
        object upon slicing.

        This method adds functionality to retrieve the count value at
        a particular index. This also can be used for slicing and generating
        a new Lightcurve object. GTIs are recalculated based on the new light
        curve segment

        If the slice object is of kind start:stop:step, GTIs are also sliced,
        and rewritten as zip(time - self.dt /2, time + self.dt / 2)

        Parameters
        ----------
        index : int or slice instance
            Index value of the time array or a slice object.

        Examples
        --------
        >>> time = [1, 2, 3, 4, 5, 6, 7, 8, 9]
        >>> count = [11, 22, 33, 44, 55, 66, 77, 88, 99]
        >>> lc = Lightcurve(time, count)
        >>> lc[2]
        33
        >>> lc[:2].counts
        array([11, 22])
        """
        if isinstance(index, int):
            return self.counts[index]
        elif isinstance(index, slice):
            start = assign_value_if_none(index.start, 0)
            stop = assign_value_if_none(index.stop, len(self.counts))
            step = assign_value_if_none(index.step, 1)

            new_counts = self.counts[start:stop:step]
            new_time = self.time[start:stop:step]

            new_gti = [[
                self.time[start] - 0.5 * self.dt,
                self.time[stop - 1] + 0.5 * self.dt
            ]]
            new_gti = np.asarray(new_gti)
            if step > 1:
                new_gt1 = np.array(
                    list(zip(new_time - self.dt / 2, new_time + self.dt / 2)))
                new_gti = cross_two_gtis(new_gti, new_gt1)

            new_gti = cross_two_gtis(self.gti, new_gti)

            return Lightcurve(new_time,
                              new_counts,
                              mjdref=self.mjdref,
                              gti=new_gti,
                              dt=self.dt)
        else:
            raise IndexError("The index must be either an integer or a slice "
                             "object !")
Esempio n. 2
0
    def __getitem__(self, index):
        """
        Return the corresponding count value at the index or a new Lightcurve
        object upon slicing.

        This method adds functionality to retrieve the count value at
        a particular index. This also can be used for slicing and generating
        a new Lightcurve object. GTIs are recalculated based on the new light
        curve segment

        If the slice object is of kind start:stop:step, GTIs are also sliced,
        and rewritten as zip(time - self.dt /2, time + self.dt / 2)

        Parameters
        ----------
        index : int or slice instance
            Index value of the time array or a slice object.

        Examples
        --------
        >>> time = [1, 2, 3, 4, 5, 6, 7, 8, 9]
        >>> count = [11, 22, 33, 44, 55, 66, 77, 88, 99]
        >>> lc = Lightcurve(time, count)
        >>> lc[2]
        33
        >>> lc[:2].counts
        array([11, 22])
        """
        if isinstance(index, int):
            return self.counts[index]
        elif isinstance(index, slice):
            start = assign_value_if_none(index.start, 0)
            stop = assign_value_if_none(index.stop, len(self.counts))
            step = assign_value_if_none(index.step, 1)

            new_counts = self.counts[start:stop:step]
            new_time = self.time[start:stop:step]

            new_gti = [[self.time[start] - 0.5 * self.dt,
                        self.time[stop - 1] + 0.5 * self.dt]]
            new_gti = np.asarray(new_gti)
            if step > 1:
                new_gt1 = np.array(list(zip(new_time - self.dt / 2,
                                            new_time + self.dt / 2)))
                new_gti = cross_two_gtis(new_gti, new_gt1)

            new_gti = cross_two_gtis(self.gti, new_gti)

            return Lightcurve(new_time, new_counts, mjdref=self.mjdref,
                              gti=new_gti, dt=self.dt)
        else:
            raise IndexError("The index must be either an integer or a slice "
                             "object !")
Esempio n. 3
0
def sum_lc(lc_1, lc_2):
    # Add up two Lightcurve instances. Must be simultaneous and bins must line up.

    common_gti = sting_gti.cross_two_gtis(lc_1.gti, lc_2.gti)
    lc_1.gti = common_gti
    lc_2.gti = common_gti
    lc_1.apply_gtis()
    lc_2.apply_gtis()
    if np.sum(lc_1.time) != np.sum(lc_2.time):
        print('Lightcurves don\'t line up. Exiting')
        return None

    area_ratio = lc_1.calc_area() / lc_2.calc_area()
    summed_err = np.sqrt(
        np.square(lc_1.counts_err) + np.square(lc_2.counts_err * area_ratio))

    # Handle edge case where counts=0. TODO: more accurate error estimation.
    summed_err[summed_err == 0.0] = 1.0

    # New Lightcurve
    summed_lc = Lightcurve_ext(lc_1.time,
                               lc_1.counts + (lc_2.counts * area_ratio),
                               err=summed_err,
                               gti=common_gti,
                               mjdref=lc_1.mjdref,
                               dt=lc_1.dt,
                               input_counts=True,
                               skip_checks=True,
                               centroid=None,
                               radius=lc_1.radius)
    return summed_lc
Esempio n. 4
0
def extract_events(file_A, file_B, buff=0):
    # Extracts events for FPMA and FPMB from .evt files (assumed clean).
    # The gtis are crossed to make sure they are the same.
    # Can also use stingray.io.load_events_and_gtis, but this takes in X, Y, and PRIOR.

    ev_files = [fits.open(file_A), fits.open(file_B)]

    ev_data_A = ev_files[0][1].data
    ev_data_B = ev_files[1][1].data

    ev_gti_A = [[x, y] for x, y in ev_files[0][2].data]
    ev_gti_B = [[x, y] for x, y in ev_files[1][2].data]
    common_gti = sting_gti.cross_two_gtis(ev_gti_A, ev_gti_B)
    buffered_gti = []
    if buff > 0:
        for x, y in common_gti:
            if np.abs(y - x) > 2 * buff:
                buffered_gti.append([x + buff, y - buff])
    else:
        buffered_gti = common_gti


    events = [EventList_ext(time=ev_data_A['TIME'], gti=buffered_gti, pi = ev_data_A['PI'], \
                       mjdref=ev_files[0][0].header['MJDREFI'] + ev_files[0][0].header['MJDREFF'], \
                           prior=ev_data_A['PRIOR'], x=ev_data_A['X'], y=ev_data_A['Y']), \
              EventList_ext(time=ev_data_B['TIME'], gti=buffered_gti, pi = ev_data_B['PI'], \
                       mjdref=ev_files[1][0].header['MJDREFI'] + ev_files[1][0].header['MJDREFF'], \
                           prior=ev_data_B['PRIOR'], x=ev_data_B['X'], y=ev_data_B['Y'])]
    ev_files[0].close()
    ev_files[1].close()
    return events
Esempio n. 5
0
    def _operation_with_other_lc(self, other, operation):
        """
        Helper method to codify an operation of one light curve with another (e.g. add, subtract, ...).
        Takes into account the GTIs correctly, and returns a new :class:`Lightcurve` object.

        Parameters
        ----------
        other : :class:`Lightcurve` object
            A second light curve object

        operation : function
            An operation between the :class:`Lightcurve` object calling this method, and ``other``,
            operating on the ``counts`` attribute in each :class:`Lightcurve` object

        Returns
        -------
        lc_new : Lightcurve object
            The new light curve calculated in ``operation``
        """
        if self.mjdref != other.mjdref:
            raise ValueError("MJDref is different in the two light curves")

        common_gti = cross_two_gtis(self.gti, other.gti)
        mask_self = create_gti_mask(self.time, common_gti, dt=self.dt)
        mask_other = create_gti_mask(other.time, common_gti, dt=other.dt)

        # ValueError is raised by Numpy while asserting np.equal over arrays
        # with different dimensions.
        try:
            diff = np.abs((self.time[mask_self] - other.time[mask_other]))
            assert np.all(diff < self.dt / 100)
        except (ValueError, AssertionError):
            raise ValueError("GTI-filtered time arrays of both light curves "
                             "must be of same dimension and equal.")

        new_time = self.time[mask_self]
        new_counts = operation(self.counts[mask_self],
                               other.counts[mask_other])

        if self.err_dist.lower() != other.err_dist.lower():
            simon("Lightcurves have different statistics!"
                  "We are setting the errors to zero to avoid complications.")
            new_counts_err = np.zeros_like(new_counts)
        elif self.err_dist.lower() in valid_statistics:
            new_counts_err = \
                np.sqrt(np.add(self.counts_err[mask_self]**2,
                               other.counts_err[mask_other]**2))
        # More conditions can be implemented for other statistics
        else:
            raise StingrayError("Statistics not recognized."
                                " Please use one of these: "
                                "{}".format(valid_statistics))

        lc_new = Lightcurve(new_time,
                            new_counts,
                            err=new_counts_err,
                            gti=common_gti,
                            mjdref=self.mjdref)

        return lc_new
Esempio n. 6
0
    def _decide_ref_intervals(self, channel_band, ref_band):
        """
        Ensures that the ``channel_band`` (i.e. the band of interest) is
        not contained within the ``ref_band`` (i.e. the reference band)

        Parameters
        ----------
        channel_band : iterable of type ``[elow, ehigh]``
            The lower/upper limits of the energies to be contained in the band
            of interest

        ref_band : iterable
            The lower/upper limits of the energies in the reference band

        Returns
        -------
        ref_intervals : iterable
            The channels that are both in the reference band in not in the
            bands of interest
        """
        channel_band = np.asarray(channel_band)
        ref_band = np.asarray(ref_band)
        if len(ref_band.shape) <= 1:
            ref_band = np.asarray([ref_band])
        if check_separate(ref_band, [channel_band]):
            return np.asarray(ref_band)
        not_channel_band = [[0, channel_band[0]],
                            [channel_band[1], np.max([np.max(ref_band),
                                                      channel_band[1] + 1])]]

        return cross_two_gtis(ref_band, not_channel_band)
Esempio n. 7
0
    def _operation_with_other_lc(self, other, operation):
        """
        Helper method to codify an operation of one light curve with another (e.g. add, subtract, ...).
        Takes into account the GTIs correctly, and returns a new :class:`Lightcurve` object.

        Parameters
        ----------
        other : :class:`Lightcurve` object
            A second light curve object

        operation : function
            An operation between the :class:`Lightcurve` object calling this method, and ``other``,
            operating on the ``counts`` attribute in each :class:`Lightcurve` object

        Returns
        -------
        lc_new : Lightcurve object
            The new light curve calculated in ``operation``
        """
        if self.mjdref != other.mjdref:
            raise ValueError("MJDref is different in the two light curves")

        common_gti = cross_two_gtis(self.gti, other.gti)
        mask_self = create_gti_mask(self.time, common_gti, dt=self.dt)
        mask_other = create_gti_mask(other.time, common_gti, dt=other.dt)

        # ValueError is raised by Numpy while asserting np.equal over arrays
        # with different dimensions.
        try:
            diff = np.abs((self.time[mask_self] - other.time[mask_other]))
            assert np.all(diff < self.dt / 100)
        except (ValueError, AssertionError):
            raise ValueError("GTI-filtered time arrays of both light curves "
                             "must be of same dimension and equal.")

        new_time = self.time[mask_self]
        new_counts = operation(self.counts[mask_self],
                               other.counts[mask_other])

        if self.err_dist.lower() != other.err_dist.lower():
            simon("Lightcurves have different statistics!"
                  "We are setting the errors to zero to avoid complications.")
            new_counts_err = np.zeros_like(new_counts)
        elif self.err_dist.lower() in valid_statistics:
                new_counts_err = \
                    np.sqrt(np.add(self.counts_err[mask_self]**2,
                                   other.counts_err[mask_other]**2))
            # More conditions can be implemented for other statistics
        else:
            raise StingrayError("Statistics not recognized."
                                " Please use one of these: "
                                "{}".format(valid_statistics))

        lc_new = Lightcurve(new_time, new_counts,
                            err=new_counts_err, gti=common_gti,
                            mjdref=self.mjdref)

        return lc_new
Esempio n. 8
0
    def _make_segment_spectrum(self, lc1, lc2, segment_size):

        # TODO: need to update this for making cross spectra.
        assert isinstance(lc1, Lightcurve)
        assert isinstance(lc2, Lightcurve)

        if lc1.tseg != lc2.tseg:
            raise ValueError("Lightcurves do not have same tseg.")

        # If dt differs slightly, its propagated error must not be more than
        # 1/100th of the bin
        if not np.isclose(lc1.dt, lc2.dt, rtol=0.01 * lc1.dt / lc1.tseg):
            raise ValueError("Light curves do not have same time binning dt.")

        # In case a small difference exists, ignore it
        lc1.dt = lc2.dt

        if self.gti is None:
            self.gti = cross_two_gtis(lc1.gti, lc2.gti)
            lc1.gti = lc2.gti = self.gti
            lc1._apply_gtis()
            lc2._apply_gtis()

        check_gtis(self.gti)

        cs_all = []
        nphots1_all = []
        nphots2_all = []

        start_inds, end_inds = \
            bin_intervals_from_gtis(self.gti, segment_size, lc1.time,
                                    dt=lc1.dt)

        for start_ind, end_ind in zip(start_inds, end_inds):
            time_1 = lc1.time[start_ind:end_ind]
            counts_1 = lc1.counts[start_ind:end_ind]
            counts_1_err = lc1.counts_err[start_ind:end_ind]
            time_2 = lc2.time[start_ind:end_ind]
            counts_2 = lc2.counts[start_ind:end_ind]
            counts_2_err = lc2.counts_err[start_ind:end_ind]
            gti1 = np.array([[time_1[0] - lc1.dt / 2,
                             time_1[-1] + lc1.dt / 2]])
            gti2 = np.array([[time_2[0] - lc2.dt / 2,
                             time_2[-1] + lc2.dt / 2]])
            lc1_seg = Lightcurve(time_1, counts_1, err=counts_1_err,
                                 err_dist=lc1.err_dist,
                                 gti=gti1,
                                 dt=lc1.dt)
            lc2_seg = Lightcurve(time_2, counts_2, err=counts_2_err,
                                 err_dist=lc2.err_dist,
                                 gti=gti2,
                                 dt=lc2.dt)
            cs_seg = Crossspectrum(lc1_seg, lc2_seg, norm=self.norm)
            cs_all.append(cs_seg)
            nphots1_all.append(np.sum(lc1_seg.counts))
            nphots2_all.append(np.sum(lc2_seg.counts))
        return cs_all, nphots1_all, nphots2_all
Esempio n. 9
0
    def _make_segment_spectrum(self, lc1, lc2, segment_size):

        # TODO: need to update this for making cross spectra.
        assert isinstance(lc1, Lightcurve)
        assert isinstance(lc2, Lightcurve)

        if lc1.tseg != lc2.tseg:
            raise ValueError("Lightcurves do not have same tseg.")

        # If dt differs slightly, its propagated error must not be more than
        # 1/100th of the bin
        if not np.isclose(lc1.dt, lc2.dt, rtol=0.01 * lc1.dt / lc1.tseg):
            raise ValueError("Light curves do not have same time binning dt.")

        # In case a small difference exists, ignore it
        lc1.dt = lc2.dt

        if self.gti is None:
            self.gti = cross_two_gtis(lc1.gti, lc2.gti)
            lc1.gti = lc2.gti = self.gti
            lc1._apply_gtis()
            lc2._apply_gtis()

        check_gtis(self.gti)

        cs_all = []
        nphots1_all = []
        nphots2_all = []

        start_inds, end_inds = \
            bin_intervals_from_gtis(self.gti, segment_size, lc1.time,
                                    dt=lc1.dt)

        for start_ind, end_ind in zip(start_inds, end_inds):
            time_1 = lc1.time[start_ind:end_ind]
            counts_1 = lc1.counts[start_ind:end_ind]
            counts_1_err = lc1.counts_err[start_ind:end_ind]
            time_2 = lc2.time[start_ind:end_ind]
            counts_2 = lc2.counts[start_ind:end_ind]
            counts_2_err = lc2.counts_err[start_ind:end_ind]
            gti1 = np.array([[time_1[0] - lc1.dt / 2,
                             time_1[-1] + lc1.dt / 2]])
            gti2 = np.array([[time_2[0] - lc2.dt / 2,
                             time_2[-1] + lc2.dt / 2]])
            lc1_seg = Lightcurve(time_1, counts_1, err=counts_1_err,
                                 err_dist=lc1.err_dist,
                                 gti=gti1,
                                 dt=lc1.dt)
            lc2_seg = Lightcurve(time_2, counts_2, err=counts_2_err,
                                 err_dist=lc2.err_dist,
                                 gti=gti2,
                                 dt=lc2.dt)
            cs_seg = Crossspectrum(lc1_seg, lc2_seg, norm=self.norm)
            cs_all.append(cs_seg)
            nphots1_all.append(np.sum(lc1_seg.counts))
            nphots2_all.append(np.sum(lc2_seg.counts))
        return cs_all, nphots1_all, nphots2_all
Esempio n. 10
0
    def _truncate_by_index(self, start, stop):
        """Private method for truncation using index values."""
        time_new = self.time[start:stop]
        counts_new = self.counts[start:stop]
        gti = \
            cross_two_gtis(self.gti,
                           np.asarray([[self.time[start] - 0.5 * self.dt,
                                        time_new[-1] + 0.5 * self.dt]]))

        return Lightcurve(time_new, counts_new, gti=gti)
Esempio n. 11
0
    def _truncate_by_index(self, start, stop):
        """Private method for truncation using index values."""
        time_new = self.time[start:stop]
        counts_new = self.counts[start:stop]
        gti = \
            cross_two_gtis(self.gti,
                           np.asarray([[self.time[start] - 0.5 * self.dt,
                                        time_new[-1] + 0.5 * self.dt]]))

        return Lightcurve(time_new, counts_new, gti=gti)
Esempio n. 12
0
    def _make_crossspectrum(self, lc1, lc2):

        # make sure the inputs work!
        if not isinstance(lc1, lightcurve.Lightcurve):
            raise TypeError("lc1 must be a lightcurve.Lightcurve object")

        if not isinstance(lc2, lightcurve.Lightcurve):
            raise TypeError("lc2 must be a lightcurve.Lightcurve object")

        # Then check that GTIs make sense
        if self.gti is None:
            self.gti = cross_two_gtis(lc1.gti, lc2.gti)

        check_gtis(self.gti)

        if self.gti.shape[0] != 1:
            raise TypeError("Non-averaged Cross Spectra need "
                            "a single Good Time Interval")

        lc1 = lc1.split_by_gti()[0]
        lc2 = lc2.split_by_gti()[0]

        # total number of photons is the sum of the
        # counts in the light curve
        self.nphots1 = np.float64(np.sum(lc1.counts))
        self.nphots2 = np.float64(np.sum(lc2.counts))

        self.meancounts1 = np.mean(lc1.counts)
        self.meancounts2 = np.mean(lc2.counts)

        # the number of data points in the light curve

        if lc1.n != lc2.n:
            raise StingrayError("Light curves do not have same number "
                                "of time bins per segment.")

        if lc1.dt != lc2.dt:
            raise StingrayError("Light curves do not have "
                                "same time binning dt.")

        self.n = lc1.n

        # the frequency resolution
        self.df = 1.0 / lc1.tseg

        # the number of averaged periodograms in the final output
        # This should *always* be 1 here
        self.m = 1

        # make the actual Fourier transform and compute cross spectrum
        self.freq, self.unnorm_power = self._fourier_cross(lc1, lc2)

        # If co-spectrum is desired, normalize here. Otherwise, get raw back
        # with the imaginary part still intact.
        self.power = self._normalize_crossspectrum(self.unnorm_power, lc1.tseg)
Esempio n. 13
0
    def _make_crossspectrum(self, lc1, lc2):

        ## make sure the inputs work!
        if not isinstance(lc1, lightcurve.Lightcurve):
            raise TypeError("lc1 must be a lightcurve.Lightcurve object")

        if not isinstance(lc2, lightcurve.Lightcurve):
            raise TypeError("lc2 must be a lightcurve.Lightcurve object")

        # Then check that GTIs make sense
        if self.gti is None:
            self.gti = cross_two_gtis(lc1.gti, lc2.gti)

        check_gtis(self.gti)

        if self.gti.shape[0] != 1:
            raise TypeError("Non-averaged Cross Spectra need "
                            "a single Good Time Interval")

        lc1 = lc1.split_by_gti()[0]
        lc2 = lc2.split_by_gti()[0]

        ## total number of photons is the sum of the
        ## counts in the light curve
        self.nphots1 = np.float64(np.sum(lc1.counts))
        self.nphots2 = np.float64(np.sum(lc2.counts))

        self.meancounts1 = np.mean(lc1.counts)
        self.meancounts2 = np.mean(lc2.counts)

        ## the number of data points in the light curve

        if lc1.counts.shape[0] != lc2.counts.shape[0]:
            raise StingrayError("Light curves do not have same number "
                                "of time bins per segment.")

        if lc1.dt != lc2.dt:
            raise StingrayError("Light curves do not have "
                                "same time binning dt.")

        self.n = lc1.counts.shape[0]

        ## the frequency resolution
        self.df = 1.0/lc1.tseg

        ## the number of averaged periodograms in the final output
        ## This should *always* be 1 here
        self.m = 1

        ## make the actual Fourier transform and compute cross spectrum
        self.freq, self.unnorm_power = self._fourier_cross(lc1, lc2)

        ## If co-spectrum is desired, normalize here. Otherwise, get raw back
        ## with the imaginary part still intact.
        self.power = self._normalize_crossspectrum(self.unnorm_power, lc1.tseg)
Esempio n. 14
0
    def __sub__(self, other):
        """
        Subtract two light curves element by element having the same time array.

        This magic method subtracts two Lightcurve objects having the same
        time array such that the corresponding counts arrays interferes with
        each other.

        GTIs are crossed, so that only common intervals are saved.

        Example
        -------
        >>> time = [10, 20, 30]
        >>> count1 = [600, 1200, 800]
        >>> count2 = [300, 100, 400]
        >>> gti1 = [[0, 20]]
        >>> gti2 = [[0, 25]]
        >>> lc1 = Lightcurve(time, count1, gti=gti1)
        >>> lc2 = Lightcurve(time, count2, gti=gti2)
        >>> lc = lc1 - lc2
        >>> lc.counts
        array([ 300, 1100,  400])
        """

        # ValueError is raised by Numpy while asserting np.equal over arrays
        # with different dimensions.
        try:
            assert np.all(np.equal(self.time, other.time))
        except (ValueError, AssertionError):
            raise ValueError("Time arrays of both light curves must be "
                             "of same dimension and equal.")

        new_counts = np.subtract(self.counts, other.counts)

        if self.err_dist.lower() != other.err_dist.lower():
            simon("Lightcurves have different statistics!"
                  "We are setting the errors to zero to avoid complications.")
            new_counts_err = np.zeros_like(new_counts)
        elif self.err_dist.lower() in valid_statistics:
            new_counts_err = np.sqrt(np.add(self.counts_err**2,
                                            other.counts_err**2))
            # More conditions can be implemented for other statistics
        else:
            raise StingrayError("Statistics not recognized."
                                " Please use one of these: "
                                "{}".format(valid_statistics))

        common_gti = cross_two_gtis(self.gti, other.gti)

        lc_new = Lightcurve(self.time, new_counts,
                            err=new_counts_err, gti=common_gti)

        return lc_new
Esempio n. 15
0
    def _construct_lightcurves(self,
                               channel_band,
                               tstart=None,
                               tstop=None,
                               exclude=True,
                               only_base=False):
        if self.use_pi:
            energies1 = self.events1.pi
            energies2 = self.events2.pi
        else:
            energies2 = self.events2.energy
            energies1 = self.events1.energy

        gti = cross_two_gtis(self.events1.gti, self.events2.gti)

        tstart = assign_value_if_none(tstart, gti[0, 0])
        tstop = assign_value_if_none(tstop, gti[-1, -1])

        good = (energies1 >= channel_band[0]) & (energies1 < channel_band[1])
        base_lc = Lightcurve.make_lightcurve(self.events1.time[good],
                                             self.bin_time,
                                             tstart=tstart,
                                             tseg=tstop - tstart,
                                             gti=gti,
                                             mjdref=self.events1.mjdref)

        if only_base:
            return base_lc

        if exclude:
            ref_intervals = self._decide_ref_intervals(channel_band,
                                                       self.ref_band)
        else:
            ref_intervals = self.ref_band

        ref_lc = Lightcurve(base_lc.time,
                            np.zeros_like(base_lc.counts),
                            gti=base_lc.gti,
                            mjdref=base_lc.mjdref,
                            err_dist='gauss')

        for i in ref_intervals:
            good = (energies2 >= i[0]) & (energies2 < i[1])
            new_lc = Lightcurve.make_lightcurve(self.events2.time[good],
                                                self.bin_time,
                                                tstart=tstart,
                                                tseg=tstop - tstart,
                                                gti=base_lc.gti,
                                                mjdref=self.events2.mjdref)
            ref_lc = ref_lc + new_lc

        ref_lc.err_dist = base_lc.err_dist
        return base_lc, ref_lc
Esempio n. 16
0
    def _decide_ref_intervals(self, channel_band, ref_band):
        """Eliminate channel_band from ref_band."""
        channel_band = np.asarray(channel_band)
        ref_band = np.asarray(ref_band)
        if len(ref_band.shape) <= 1:
            ref_band = np.asarray([ref_band])
        if check_separate(ref_band, [channel_band]):
            return np.asarray(ref_band)
        not_channel_band = [[0, channel_band[0]],
                            [channel_band[1], np.max([np.max(ref_band),
                                                      channel_band[1] + 1])]]

        return cross_two_gtis(ref_band, not_channel_band)
Esempio n. 17
0
def get_non_overlapping_ref_band(channel_band, ref_band):
    """
    Ensures that the ``channel_band`` (i.e. the band of interest) is
    not contained within the ``ref_band`` (i.e. the reference band)

    Parameters
    ----------
    channel_band : iterable of type ``[elow, ehigh]``
        The lower/upper limits of the energies to be contained in the band
        of interest

    ref_band : iterable
        The lower/upper limits of the energies in the reference band

    Returns
    -------
    ref_intervals : iterable
        The channels that are both in the reference band in not in the
        bands of interest

    Examples
    --------
    >>> channel_band = [2, 3]
    >>> ref_band = [[0, 10]]
    >>> new_ref = get_non_overlapping_ref_band(channel_band, ref_band)
    >>> np.allclose(new_ref, [[0, 2], [3, 10]])
    True

    Test this also works with a 1-D ref. band
    >>> new_ref = get_non_overlapping_ref_band(channel_band, [0, 10])
    >>> np.allclose(new_ref, [[0, 2], [3, 10]])
    True
    >>> new_ref = get_non_overlapping_ref_band([0, 1], [[2, 3]])
    >>> np.allclose(new_ref, [[2, 3]])
    True
    """
    channel_band = np.asarray(channel_band)
    ref_band = np.asarray(ref_band)
    if len(ref_band.shape) <= 1:
        ref_band = np.asarray([ref_band])
    if check_separate(ref_band, [channel_band]):
        return np.asarray(ref_band)
    not_channel_band = [
        [0, channel_band[0]],
        [channel_band[1],
         np.max([np.max(ref_band), channel_band[1] + 1])],
    ]

    return cross_two_gtis(ref_band, not_channel_band)
Esempio n. 18
0
    def _decide_ref_intervals(self, channel_band, ref_band):
        """Eliminate channel_band from ref_band."""
        channel_band = np.asarray(channel_band)
        ref_band = np.asarray(ref_band)
        if len(ref_band.shape) <= 1:
            ref_band = np.asarray([ref_band])
        if check_separate(ref_band, [channel_band]):
            return np.asarray(ref_band)
        not_channel_band = [[0, channel_band[0]],
                            [
                                channel_band[1],
                                np.max([np.max(ref_band), channel_band[1] + 1])
                            ]]

        return cross_two_gtis(ref_band, not_channel_band)
Esempio n. 19
0
    def to_lc(self,
              dt,
              pi_low=35,
              pi_high=1909,
              centroid=None,
              radius=None,
              tstart=None,
              tseg=None,
              gti=None,
              buff=False,
              buffersize=100.0):
        # Bin this Events instance into a Lightcurve object.
        # Unlike the built-in version, this includes region and energy filtering, and you can introduce a new gti.

        if gti is None:
            gti = self.gti

        else:
            gti = sting_gti.cross_two_gtis(self.gti, gti)

        if buff:
            buffered_gti = []
            for x, y in gti:
                if np.abs(y - x) > 2 * buffersize:
                    buffered_gti.append([x + buffersize, y - buffersize])
            gti = buffered_gti

        if tstart is None and gti is not None:
            tstart = gti[0][0]
            tseg = gti[-1][1] - tstart

        reg_mask = np.ones(np.shape(self.time)).astype(bool)
        if (centroid is not None) and (radius is not None):
            reg_mask = (np.sqrt(
                np.square(self.x - centroid[0]) +
                np.square(self.y - centroid[1])) < radius).astype(bool)

        pi_mask = (self.pi > pi_low) * (self.pi <= pi_high)
        tot_mask = reg_mask * pi_mask
        return Lightcurve_ext.make_lightcurve(self.time[tot_mask],
                                              dt,
                                              tstart=tstart,
                                              gti=gti,
                                              tseg=tseg,
                                              mjdref=self.mjdref,
                                              centroid=centroid,
                                              radius=radius)
Esempio n. 20
0
    def _construct_lightcurves(self, channel_band, tstart=None, tstop=None,
                               exclude=True, only_base=False):
        if self.use_pi:
            energies1 = self.events1.pi
            energies2 = self.events2.pi
        else:
            energies2 = self.events2.energy
            energies1 = self.events1.energy

        gti = cross_two_gtis(self.events1.gti, self.events2.gti)

        tstart = assign_value_if_none(tstart, gti[0, 0])
        tstop = assign_value_if_none(tstop, gti[-1, -1])

        good = (energies1 >= channel_band[0]) & (energies1 < channel_band[1])
        base_lc = Lightcurve.make_lightcurve(self.events1.time[good],
                                             self.bin_time,
                                             tstart=tstart,
                                             tseg=tstop - tstart,
                                             gti=gti,
                                             mjdref=self.events1.mjdref)

        if only_base:
            return base_lc

        if exclude:
            ref_intervals = self._decide_ref_intervals(channel_band,
                                                       self.ref_band)
        else:
            ref_intervals = self.ref_band

        ref_lc = Lightcurve(base_lc.time, np.zeros_like(base_lc.counts),
                            gti=base_lc.gti, mjdref=base_lc.mjdref,
                            err_dist='gauss')

        for i in ref_intervals:
            good = (energies2 >= i[0]) & (energies2 < i[1])
            new_lc = Lightcurve.make_lightcurve(self.events2.time[good],
                                                self.bin_time,
                                                tstart=tstart,
                                                tseg=tstop - tstart,
                                                gti=base_lc.gti,
                                                mjdref=self.events2.mjdref)
            ref_lc = ref_lc + new_lc

        ref_lc.err_dist = base_lc.err_dist
        return base_lc, ref_lc
Esempio n. 21
0
    def _make_segment_spectrum(self, lc1, lc2, segment_size):

        # TODO: need to update this for making cross spectra.
        assert isinstance(lc1, Lightcurve)
        assert isinstance(lc2, Lightcurve)

        if lc1.dt != lc2.dt:
            raise ValueError("Light curves do not have same time binning dt.")

        if lc1.tseg != lc2.tseg:
            raise ValueError("Lightcurves do not have same tseg.")

        if self.gti is None:
            self.gti = cross_two_gtis(lc1.gti, lc2.gti)

        check_gtis(self.gti)

        cs_all = []
        nphots1_all = []
        nphots2_all = []

        start_inds, end_inds = \
            bin_intervals_from_gtis(self.gti, segment_size, lc1.time)

        for start_ind, end_ind in zip(start_inds, end_inds):
            time_1 = lc1.time[start_ind:end_ind]
            counts_1 = lc1.counts[start_ind:end_ind]
            counts_1_err = lc1.counts_err[start_ind:end_ind]
            time_2 = lc2.time[start_ind:end_ind]
            counts_2 = lc2.counts[start_ind:end_ind]
            counts_2_err = lc2.counts_err[start_ind:end_ind]
            lc1_seg = Lightcurve(time_1,
                                 counts_1,
                                 err=counts_1_err,
                                 err_dist=lc1.err_dist)
            lc2_seg = Lightcurve(time_2,
                                 counts_2,
                                 err=counts_2_err,
                                 err_dist=lc2.err_dist)
            cs_seg = Crossspectrum(lc1_seg, lc2_seg, norm=self.norm)
            cs_all.append(cs_seg)
            nphots1_all.append(np.sum(lc1_seg.counts))
            nphots2_all.append(np.sum(lc2_seg.counts))

        return cs_all, nphots1_all, nphots2_all
Esempio n. 22
0
    def split_by_time(self, bintime=100, gti=None):
        # Split up the Events object into multiple Events instances based on the input time.

        split_ev = []
        if gti is None:
            gti = self.gti
        else:
            gti = sting_gti.cross_two_gtis(self.gti, gti)
        for g in gti:
            g_len = g[1] - g[0]
            for i in range(int(np.floor(g_len / bintime))):
                g_mask = (self.time <= g[0] +
                          ((i + 1) * bintime)) * (self.time >= g[0] +
                                                  (i * bintime))
                split_ev.append(EventList_ext(time=self.time[g_mask], mjdref=self.mjdref, \
                                  dt=self.dt, notes=self.notes, gti=[[g[0]+(i*bintime), g[0]+((i+1)*bintime)]], pi=self.pi[g_mask], prior=self.prior[g_mask], \
                                  x=self.x[g_mask], y=self.y[g_mask], xy_weights = self.xy_weights[g_mask]))
        return split_ev
Esempio n. 23
0
    def _analyze_inputs(self):
        """Make some checks on the inputs and set some internal variable.

        If the object of events1 is the same as events2, set `same_events` to True.
        This will, for example, tell the methods to use events1 for the subject bands
        and events2 for the reference band (useful in deadtime-affected data).

        Also, if the event lists are distinct, calculate common GTIs.
        """
        events1 = self.events1
        events2 = self.events2
        common_gti = events1.gti
        if events2 is None or events2 is events1:
            self.events2 = self.events1
            self.same_events = True
        else:
            common_gti = cross_two_gtis(events1.gti, events2.gti)
            self.same_events = False
        self.gti = common_gti
Esempio n. 24
0
    def _operation_with_other_lc(self, other, operation):
        if self.mjdref != other.mjdref:
            raise ValueError("MJDref is different in the two light curves")

        common_gti = cross_two_gtis(self.gti, other.gti)
        mask_self = create_gti_mask(self.time, common_gti)
        mask_other = create_gti_mask(other.time, common_gti)

        # ValueError is raised by Numpy while asserting np.equal over arrays
        # with different dimensions.
        try:
            assert np.all(
                np.equal(self.time[mask_self], other.time[mask_other]))
        except (ValueError, AssertionError):
            raise ValueError("GTI-filtered time arrays of both light curves "
                             "must be of same dimension and equal.")

        new_time = self.time[mask_self]
        new_counts = operation(self.counts[mask_self],
                               other.counts[mask_other])

        if self.err_dist.lower() != other.err_dist.lower():
            simon("Lightcurves have different statistics!"
                  "We are setting the errors to zero to avoid complications.")
            new_counts_err = np.zeros_like(new_counts)
        elif self.err_dist.lower() in valid_statistics:
            new_counts_err = np.sqrt(
                np.add(self.counts_err[mask_self]**2,
                       other.counts_err[mask_other]**2))
        # More conditions can be implemented for other statistics
        else:
            raise StingrayError("Statistics not recognized."
                                " Please use one of these: "
                                "{}".format(valid_statistics))

        lc_new = Lightcurve(new_time,
                            new_counts,
                            err=new_counts_err,
                            gti=common_gti,
                            mjdref=self.mjdref)

        return lc_new
Esempio n. 25
0
    def _operation_with_other_lc(self, other, operation):
        if self.mjdref != other.mjdref:
            raise ValueError("MJDref is different in the two light curves")

        common_gti = cross_two_gtis(self.gti, other.gti)
        mask_self = create_gti_mask(self.time, common_gti)
        mask_other = create_gti_mask(other.time, common_gti)

        # ValueError is raised by Numpy while asserting np.equal over arrays
        # with different dimensions.
        try:
            assert np.all(np.equal(self.time[mask_self],
                                   other.time[mask_other]))
        except (ValueError, AssertionError):
            raise ValueError("GTI-filtered time arrays of both light curves "
                             "must be of same dimension and equal.")

        new_time = self.time[mask_self]
        new_counts = operation(self.counts[mask_self],
                               other.counts[mask_other])

        if self.err_dist.lower() != other.err_dist.lower():
            simon("Lightcurves have different statistics!"
                  "We are setting the errors to zero to avoid complications.")
            new_counts_err = np.zeros_like(new_counts)
        elif self.err_dist.lower() in valid_statistics:
                new_counts_err = np.sqrt(np.add(self.counts_err[mask_self]**2,
                                                other.counts_err[mask_other]**2))
            # More conditions can be implemented for other statistics
        else:
            raise StingrayError("Statistics not recognized."
                                " Please use one of these: "
                                "{}".format(valid_statistics))

        lc_new = Lightcurve(new_time, new_counts,
                            err=new_counts_err, gti=common_gti,
                            mjdref=self.mjdref)

        return lc_new
Esempio n. 26
0
    def __sub__(self, other):
        """
        Subtract two light curves element by element having the same time array.

        This magic method subtracts two Lightcurve objects having the same
        time array such that the corresponding counts arrays interferes with
        each other.

        GTIs are crossed, so that only common intervals are saved.

        Example
        -------
        >>> time = [10, 20, 30]
        >>> count1 = [600, 1200, 800]
        >>> count2 = [300, 100, 400]
        >>> gti1 = [[0, 20]]
        >>> gti2 = [[0, 25]]
        >>> lc1 = Lightcurve(time, count1, gti=gti1)
        >>> lc2 = Lightcurve(time, count2, gti=gti2)
        >>> lc = lc1 - lc2
        >>> lc.counts
        array([ 300, 1100,  400])
        """

        # ValueError is raised by Numpy while asserting np.equal over arrays
        # with different dimensions.
        try:
            assert np.all(np.equal(self.time, other.time))
        except (ValueError, AssertionError):
            raise ValueError("Time arrays of both light curves must be "
                             "of same dimension and equal.")

        new_counts = np.subtract(self.counts, other.counts)
        common_gti = cross_two_gtis(self.gti, other.gti)

        lc_new = Lightcurve(self.time, new_counts, gti=common_gti)

        return lc_new
Esempio n. 27
0
    def __sub__(self, other):
        """
        Subtract two light curves element by element having the same time array.

        This magic method subtracts two Lightcurve objects having the same
        time array such that the corresponding counts arrays interferes with
        each other.

        GTIs are crossed, so that only common intervals are saved.

        Example
        -------
        >>> time = [10, 20, 30]
        >>> count1 = [600, 1200, 800]
        >>> count2 = [300, 100, 400]
        >>> gti1 = [[0, 20]]
        >>> gti2 = [[0, 25]]
        >>> lc1 = Lightcurve(time, count1, gti=gti1)
        >>> lc2 = Lightcurve(time, count2, gti=gti2)
        >>> lc = lc1 - lc2
        >>> lc.counts
        array([ 300, 1100,  400])
        """

        # ValueError is raised by Numpy while asserting np.equal over arrays
        # with different dimensions.
        try:
            assert np.all(np.equal(self.time, other.time))
        except (ValueError, AssertionError):
            raise ValueError("Time arrays of both light curves must be "
                             "of same dimension and equal.")

        new_counts = np.subtract(self.counts, other.counts)
        common_gti = cross_two_gtis(self.gti, other.gti)

        lc_new = Lightcurve(self.time, new_counts, gti=common_gti)

        return lc_new
Esempio n. 28
0
    def _make_segment_spectrum(self, lc1, lc2, segment_size):

        # TODO: need to update this for making cross spectra.
        assert isinstance(lc1, lightcurve.Lightcurve)
        assert isinstance(lc2, lightcurve.Lightcurve)

        if lc1.dt != lc2.dt:
            raise ValueError("Light curves do not have same time binning dt.")

        if lc1.tseg != lc2.tseg:
            raise ValueError("Lightcurves do not have same tseg.")

        if self.gti is None:
            self.gti = cross_two_gtis(lc1.gti, lc2.gti)

        check_gtis(self.gti)

        cs_all = []
        nphots1_all = []
        nphots2_all = []

        start_inds, end_inds = \
            bin_intervals_from_gtis(self.gti, segment_size, lc1.time)

        for start_ind, end_ind in zip(start_inds, end_inds):
            time_1 = lc1.time[start_ind:end_ind]
            counts_1 = lc1.counts[start_ind:end_ind]
            time_2 = lc2.time[start_ind:end_ind]
            counts_2 = lc2.counts[start_ind:end_ind]
            lc1_seg = lightcurve.Lightcurve(time_1, counts_1)
            lc2_seg = lightcurve.Lightcurve(time_2, counts_2)
            cs_seg = Crossspectrum(lc1_seg, lc2_seg, norm=self.norm)
            cs_all.append(cs_seg)
            nphots1_all.append(np.sum(lc1_seg.counts))
            nphots2_all.append(np.sum(lc2_seg.counts))

        return cs_all, nphots1_all, nphots2_all
Esempio n. 29
0
    def _make_segment_spectrum(self, lc1, lc2, segment_size):
        """
        Split the light curves into segments of size ``segment_size``, and calculate a cross spectrum for
        each.

        Parameters
        ----------
        lc1, lc2 : :class:`stingray.Lightcurve` objects
            Two light curves used for computing the cross spectrum.

        segment_size : ``numpy.float``
            Size of each light curve segment to use for averaging.

        Returns
        -------
        cs_all : list of :class:`Crossspectrum`` objects
            A list of cross spectra calculated independently from each light curve segment

        nphots1_all, nphots2_all : ``numpy.ndarray` for each of ``lc1`` and ``lc2``
            Two lists containing the number of photons for all segments calculated from ``lc1`` and ``lc2``.

        """

        # TODO: need to update this for making cross spectra.
        assert isinstance(lc1, Lightcurve)
        assert isinstance(lc2, Lightcurve)

        if lc1.tseg != lc2.tseg:
            raise ValueError("Lightcurves do not have same tseg.")

        # If dt differs slightly, its propagated error must not be more than
        # 1/100th of the bin
        if not np.isclose(lc1.dt, lc2.dt, rtol=0.01 * lc1.dt / lc1.tseg):
            raise ValueError("Light curves do not have same time binning dt.")

        # In case a small difference exists, ignore it
        lc1.dt = lc2.dt

        if self.gti is None:
            self.gti = cross_two_gtis(lc1.gti, lc2.gti)
            lc1.gti = lc2.gti = self.gti
            lc1._apply_gtis()
            lc2._apply_gtis()

        check_gtis(self.gti)

        cs_all = []
        nphots1_all = []
        nphots2_all = []

        start_inds, end_inds = \
            bin_intervals_from_gtis(self.gti, segment_size, lc1.time,
                                    dt=lc1.dt)

        for start_ind, end_ind in zip(start_inds, end_inds):
            time_1 = lc1.time[start_ind:end_ind]
            counts_1 = lc1.counts[start_ind:end_ind]
            counts_1_err = lc1.counts_err[start_ind:end_ind]
            time_2 = lc2.time[start_ind:end_ind]
            counts_2 = lc2.counts[start_ind:end_ind]
            counts_2_err = lc2.counts_err[start_ind:end_ind]
            gti1 = np.array([[time_1[0] - lc1.dt / 2,
                              time_1[-1] + lc1.dt / 2]])
            gti2 = np.array([[time_2[0] - lc2.dt / 2,
                              time_2[-1] + lc2.dt / 2]])
            lc1_seg = Lightcurve(time_1, counts_1, err=counts_1_err,
                                 err_dist=lc1.err_dist,
                                 gti=gti1,
                                 dt=lc1.dt)
            lc2_seg = Lightcurve(time_2, counts_2, err=counts_2_err,
                                 err_dist=lc2.err_dist,
                                 gti=gti2,
                                 dt=lc2.dt)
            cs_seg = Crossspectrum(lc1_seg, lc2_seg, norm=self.norm, power_type=self.power_type)
            cs_all.append(cs_seg)
            nphots1_all.append(np.sum(lc1_seg.counts))
            nphots2_all.append(np.sum(lc2_seg.counts))

        return cs_all, nphots1_all, nphots2_all
Esempio n. 30
0
    def _make_segment_spectrum(self, lc1, lc2, segment_size):
        """
        Split the light curves into segments of size ``segment_size``, and calculate a cross spectrum for
        each.

        Parameters
        ----------
        lc1, lc2 : :class:`stingray.Lightcurve` objects
            Two light curves used for computing the cross spectrum.

        segment_size : ``numpy.float``
            Size of each light curve segment to use for averaging.

        Returns
        -------
        cs_all : list of :class:`Crossspectrum`` objects
            A list of cross spectra calculated independently from each light curve segment

        nphots1_all, nphots2_all : ``numpy.ndarray` for each of ``lc1`` and ``lc2``
            Two lists containing the number of photons for all segments calculated from ``lc1`` and ``lc2``.

        """

        # TODO: need to update this for making cross spectra.
        assert isinstance(lc1, Lightcurve)
        assert isinstance(lc2, Lightcurve)

        if lc1.tseg != lc2.tseg:
            simon("Lightcurves do not have same tseg. This means that the data"
                  "from the two channels are not completely in sync. This "
                  "might or might not be an issue. Keep an eye on it.")

        # If dt differs slightly, its propagated error must not be more than
        # 1/100th of the bin
        if not np.isclose(lc1.dt, lc2.dt, rtol=0.01 * lc1.dt / lc1.tseg):
            raise ValueError("Light curves do not have same time binning dt.")

        # In case a small difference exists, ignore it
        lc1.dt = lc2.dt

        gti = cross_two_gtis(lc1.gti, lc2.gti)
        lc1.apply_gtis()
        lc2.apply_gtis()
        if self.gti is None:
            self.gti = gti
        else:
            if not np.all(self.gti == gti):
                self.gti = np.vstack([self.gti, gti])

        check_gtis(self.gti)

        cs_all = []
        nphots1_all = []
        nphots2_all = []


        start_inds, end_inds = \
            bin_intervals_from_gtis(gti, segment_size, lc1.time,
                                    dt=lc1.dt)
        simon("Errorbars on cross spectra are not thoroughly tested. "
              "Please report any inconsistencies.")
        for start_ind, end_ind in zip(start_inds, end_inds):
            time_1 = lc1.time[start_ind:end_ind]
            counts_1 = lc1.counts[start_ind:end_ind]
            counts_1_err = lc1.counts_err[start_ind:end_ind]
            time_2 = lc2.time[start_ind:end_ind]
            counts_2 = lc2.counts[start_ind:end_ind]
            counts_2_err = lc2.counts_err[start_ind:end_ind]
            gti1 = np.array([[time_1[0] - lc1.dt / 2,
                              time_1[-1] + lc1.dt / 2]])
            gti2 = np.array([[time_2[0] - lc2.dt / 2,
                              time_2[-1] + lc2.dt / 2]])
            lc1_seg = Lightcurve(time_1, counts_1, err=counts_1_err,
                                 err_dist=lc1.err_dist,
                                 gti=gti1,
                                 dt=lc1.dt, skip_checks=True)
            lc2_seg = Lightcurve(time_2, counts_2, err=counts_2_err,
                                 err_dist=lc2.err_dist,
                                 gti=gti2,
                                 dt=lc2.dt, skip_checks=True)
            with warnings.catch_warnings(record=True) as w:
                cs_seg = Crossspectrum(lc1_seg, lc2_seg, norm=self.norm, power_type=self.power_type)

            cs_all.append(cs_seg)
            nphots1_all.append(np.sum(lc1_seg.counts))
            nphots2_all.append(np.sum(lc2_seg.counts))

        return cs_all, nphots1_all, nphots2_all
Esempio n. 31
0
    def _make_crossspectrum(self, lc1, lc2):
        """
        Auxiliary method computing the normalized cross spectrum from two
        light curves. This includes checking for the presence of and
        applying Good Time Intervals, computing the unnormalized Fourier
        cross-amplitude, and then renormalizing using the required
        normalization. Also computes an uncertainty estimate on the cross
        spectral powers.

        Parameters
        ----------
        lc1, lc2 : :class:`stingray.Lightcurve` objects
            Two light curves used for computing the cross spectrum.

        """
        # make sure the inputs work!
        if not isinstance(lc1, Lightcurve):
            raise TypeError("lc1 must be a lightcurve.Lightcurve object")

        if not isinstance(lc2, Lightcurve):
            raise TypeError("lc2 must be a lightcurve.Lightcurve object")

        if self.lc2.mjdref != self.lc1.mjdref:
            raise ValueError("MJDref is different in the two light curves")

        # Then check that GTIs make sense
        if self.gti is None:
            self.gti = cross_two_gtis(lc1.gti, lc2.gti)

        check_gtis(self.gti)

        if self.gti.shape[0] != 1:
            raise TypeError("Non-averaged Cross Spectra need "
                            "a single Good Time Interval")

        lc1 = lc1.split_by_gti()[0]
        lc2 = lc2.split_by_gti()[0]

        # total number of photons is the sum of the
        # counts in the light curve
        self.nphots1 = np.float64(np.sum(lc1.counts))
        self.nphots2 = np.float64(np.sum(lc2.counts))

        self.meancounts1 = lc1.meancounts
        self.meancounts2 = lc2.meancounts

        # the number of data points in the light curve

        if lc1.n != lc2.n:
            raise StingrayError("Light curves do not have same number "
                                "of time bins per segment.")

        # If dt differs slightly, its propagated error must not be more than
        # 1/100th of the bin
        if not np.isclose(lc1.dt, lc2.dt, rtol=0.01 * lc1.dt / lc1.tseg):
            raise StingrayError("Light curves do not have same time binning "
                                "dt.")

        # In case a small difference exists, ignore it
        lc1.dt = lc2.dt

        self.n = lc1.n

        # the frequency resolution
        self.df = 1.0 / lc1.tseg

        # the number of averaged periodograms in the final output
        # This should *always* be 1 here
        self.m = 1

        # make the actual Fourier transform and compute cross spectrum
        self.freq, self.unnorm_power = self._fourier_cross(lc1, lc2)

        # If co-spectrum is desired, normalize here. Otherwise, get raw back
        # with the imaginary part still intact.
        self.power = self._normalize_crossspectrum(self.unnorm_power, lc1.tseg)

        if lc1.err_dist.lower() != lc2.err_dist.lower():
            simon("Your lightcurves have different statistics."
                  "The errors in the Crossspectrum will be incorrect.")
        elif lc1.err_dist.lower() != "poisson":
            simon("Looks like your lightcurve statistic is not poisson."
                  "The errors in the Powerspectrum will be incorrect.")

        if self.__class__.__name__ in ['Powerspectrum',
                                       'AveragedPowerspectrum']:
            self.power_err = self.power / np.sqrt(self.m)
        elif self.__class__.__name__ in ['Crossspectrum',
                                         'AveragedCrossspectrum']:
            # This is clearly a wild approximation.
            simon("Errorbars on cross spectra are not thoroughly tested. "
                  "Please report any inconsistencies.")
            unnorm_power_err = np.sqrt(2) / np.sqrt(self.m)  # Leahy-like
            unnorm_power_err /= (2 / np.sqrt(self.nphots1 * self.nphots2))
            unnorm_power_err += np.zeros_like(self.power)

            self.power_err = \
                self._normalize_crossspectrum(unnorm_power_err, lc1.tseg)
        else:
            self.power_err = np.zeros(len(self.power))
Esempio n. 32
0
def treat_event_file(filename,
                     noclobber=False,
                     gti_split=False,
                     min_length=4,
                     gtistring=None,
                     length_split=None):
    """Read data from an event file, with no external GTI information.

    Parameters
    ----------
    filename : str

    Other Parameters
    ----------------
    noclobber: bool
        if a file is present, do not overwrite it
    gtistring: str
        comma-separated set of GTI strings to consider
    gti_split: bool
        split the file in multiple chunks, containing one GTI each
    length_split: float, default None
        split the file in multiple chunks, with approximately this length
    min_length: float
        minimum length of GTIs accepted (only if gti_split is True or
        length_split is not None)
    """
    gtistring = assign_value_if_none(gtistring, 'GTI,STDGTI')
    logging.info('Opening %s' % filename)

    instr = read_header_key(filename, 'INSTRUME')
    mission = read_header_key(filename, 'TELESCOP')

    data = load_events_and_gtis(filename, gtistring=gtistring)

    events = data.ev_list
    gtis = events.gti
    detector_id = data.detector_id

    if detector_id is not None:
        detectors = np.array(list(set(detector_id)))
    else:
        detectors = [None]
    outfile_root = \
        hen_root(filename) + '_' + mission.lower() + '_' + instr.lower()

    for d in detectors:
        if d is not None:
            good_det = d == data.detector_id
            outroot_local = \
                '{0}_det{1:02d}'.format(outfile_root, d)

        else:
            good_det = np.ones_like(events.time, dtype=bool)
            outroot_local = outfile_root

        outfile = outroot_local + '_ev' + HEN_FILE_EXTENSION
        if noclobber and os.path.exists(outfile) and (not (gti_split
                                                           or length_split)):
            warnings.warn(
                '{0} exists and using noclobber. Skipping'.format(outfile))
            return

        if gti_split or (length_split is not None):
            lengths = np.array([g1 - g0 for (g0, g1) in gtis])
            gtis = gtis[lengths >= min_length]

            if length_split:
                gti0 = np.arange(gtis[0, 0], gtis[-1, 1], length_split)
                gti1 = gti0 + length_split
                gti_chunks = np.array([[g0, g1]
                                       for (g0, g1) in zip(gti0, gti1)])
                label = 'chunk'
            else:
                gti_chunks = gtis
                label = 'gti'

            for ig, g in enumerate(gti_chunks):
                outfile_local = \
                    '{0}_{1}{2:03d}_ev'.format(outroot_local, label,
                                           ig) + HEN_FILE_EXTENSION

                good_gtis = cross_two_gtis([g], gtis)
                if noclobber and os.path.exists(outfile_local):
                    warnings.warn('{0} exists, '.format(outfile_local) +
                                  'and noclobber option used. Skipping')
                    return
                good = np.logical_and(events.time >= g[0], events.time < g[1])
                all_good = good_det & good
                if len(events.time[all_good]) < 1:
                    continue
                events_filt = EventList(events.time[all_good],
                                        pi=events.pi[all_good],
                                        gti=good_gtis,
                                        mjdref=events.mjdref)
                events_filt.instr = events.instr
                events_filt.header = events.header
                save_events(events_filt, outfile_local)
            pass
        else:
            events_filt = EventList(events.time[good_det],
                                    pi=events.pi[good_det],
                                    gti=events.gti,
                                    mjdref=events.mjdref)
            events_filt.instr = events.instr
            events_filt.header = events.header

            save_events(events_filt, outfile)
Esempio n. 33
0
    def _make_crossspectrum(self, lc1, lc2):

        # make sure the inputs work!
        if not isinstance(lc1, Lightcurve):
            raise TypeError("lc1 must be a lightcurve.Lightcurve object")

        if not isinstance(lc2, Lightcurve):
            raise TypeError("lc2 must be a lightcurve.Lightcurve object")

        if self.lc2.mjdref != self.lc1.mjdref:
            raise ValueError("MJDref is different in the two light curves")

        # Then check that GTIs make sense
        if self.gti is None:
            self.gti = cross_two_gtis(lc1.gti, lc2.gti)

        check_gtis(self.gti)

        if self.gti.shape[0] != 1:
            raise TypeError("Non-averaged Cross Spectra need "
                            "a single Good Time Interval")

        lc1 = lc1.split_by_gti()[0]
        lc2 = lc2.split_by_gti()[0]

        # total number of photons is the sum of the
        # counts in the light curve
        self.nphots1 = np.float64(np.sum(lc1.counts))
        self.nphots2 = np.float64(np.sum(lc2.counts))

        self.meancounts1 = lc1.meancounts
        self.meancounts2 = lc2.meancounts

        # the number of data points in the light curve

        if lc1.n != lc2.n:
            raise StingrayError("Light curves do not have same number "
                                "of time bins per segment.")

        # If dt differs slightly, its propagated error must not be more than
        # 1/100th of the bin
        if not np.isclose(lc1.dt, lc2.dt, rtol=0.01 * lc1.dt / lc1.tseg):
            raise StingrayError("Light curves do not have same time binning "
                                "dt.")

        # In case a small difference exists, ignore it
        lc1.dt = lc2.dt

        self.n = lc1.n

        # the frequency resolution
        self.df = 1.0 / lc1.tseg

        # the number of averaged periodograms in the final output
        # This should *always* be 1 here
        self.m = 1

        # make the actual Fourier transform and compute cross spectrum
        self.freq, self.unnorm_power = self._fourier_cross(lc1, lc2)

        # If co-spectrum is desired, normalize here. Otherwise, get raw back
        # with the imaginary part still intact.
        self.power = self._normalize_crossspectrum(self.unnorm_power, lc1.tseg)

        if lc1.err_dist.lower() != lc2.err_dist.lower():
            simon("Your lightcurves have different statistics."
                  "The errors in the Crossspectrum will be incorrect.")
        elif lc1.err_dist.lower() != "poisson":
            simon("Looks like your lightcurve statistic is not poisson."
                  "The errors in the Powerspectrum will be incorrect.")

        if self.__class__.__name__ in ['Powerspectrum',
                                       'AveragedPowerspectrum']:
            self.power_err = self.power / np.sqrt(self.m)
        elif self.__class__.__name__ in ['Crossspectrum',
                                         'AveragedCrossspectrum']:
            # This is clearly a wild approximation.
            simon("Errorbars on cross spectra are not thoroughly tested. "
                  "Please report any inconsistencies.")
            unnorm_power_err = np.sqrt(2) / np.sqrt(self.m)  # Leahy-like
            unnorm_power_err /= (2 / np.sqrt(self.nphots1 * self.nphots2))
            unnorm_power_err += np.zeros_like(self.power)

            self.power_err = \
                self._normalize_crossspectrum(unnorm_power_err, lc1.tseg)
        else:
            self.power_err = np.zeros(len(self.power))
Esempio n. 34
0
def efold_search_AandB(events_A,
                       events_B,
                       f_min,
                       f_max,
                       f_steps,
                       fdots=None,
                       time_intervals=None,
                       nbin=32,
                       pi_min=35,
                       pi_max=260,
                       return_peak=False,
                       z_n=2):
    # Scan over frequency and do epoch folding.

    A_mask = np.sqrt(
        np.square(events_A.x - events_A.centroid[0]) +
        np.square(events_A.y - events_A.centroid[1])) <= events_A.radius
    B_mask = np.sqrt(
        np.square(events_B.x - events_B.centroid[0]) +
        np.square(events_B.y - events_B.centroid[1])) <= events_B.radius

    temp_time = np.concatenate([events_A.time[A_mask], events_B.time[B_mask]])
    sorted_arg = np.argsort(temp_time)
    temp_time = temp_time[sorted_arg]
    temp_pi = np.concatenate([events_A.pi[A_mask],
                              events_B.pi[B_mask]])[sorted_arg]

    joined_ev = EventList_ext(time=temp_time,
                              gti=sting_gti.cross_two_gtis(
                                  events_A.gti, events_B.gti),
                              pi=temp_pi)
    ref_time = joined_ev.time[0]
    f_arr = np.linspace(f_min, f_max, num=f_steps)

    # if fdots:
    #     fgrid, fdgrid, z_stats = z_n_search(joined_ev.time, f_arr, nharm=z_n, nbin=nbin, gti=joined_ev.gti, fdots=fdots, segment_size=1e6)
    # else:
    #     fgrid, z_stats = z_n_search(joined_ev.time, f_arr, nharm=z_n, nbin=nbin, gti=joined_ev.gti, fdots=fdots, segment_size=1e6)

    pi_mask = ((joined_ev.pi > pi_min) * (joined_ev.pi < pi_max)).astype(bool)
    # The times to actually fold into a profile
    fold_times = joined_ev.time[pi_mask] - ref_time
    z_stats = []
    if fdots is not None:
        f_grid, fd_grid = np.meshgrid(f_arr, fdots)
        z_stats = np.zeros(f_grid.shape)
        for x in tqdm(range(f_steps)):
            for y in range(len(fdots)):
                # The phase of each folded event
                fold_phases = plsr.pulse_phase(fold_times,
                                               *[f_grid[y, x], fd_grid[y, x]])
                z_stats[y, x] = plsr.z_n(fold_phases, n=z_n)

        z_prob = stats.z2_n_logprobability(z_stats,
                                           ntrial=(f_steps * len(fdots)),
                                           n=z_n)

        if return_peak:
            max_yx = np.unravel_index(np.argmax(z_stats, axis=None),
                                      z_stats.shape)
            phase_bins, profile, profile_err, _ = \
                joined_ev.fold_events(*[f_grid[max_yx], fd_grid[max_yx]], time_intervals = time_intervals, \
                                        nbin = nbin, ref_time = ref_time, region_filter=False, pi_min=pi_min, pi_max=pi_max, weight_pos=False, z_n=z_n)

            return f_grid, fd_grid, z_prob, z_stats, phase_bins, profile, profile_err

        else:
            return f_grid, fd_grid, z_prob, z_stats

    else:
        for f in tqdm(f_arr):
            # The phase of each folded event
            fold_phases = plsr.pulse_phase(fold_times, f)

            z_stat = plsr.z_n(fold_phases, n=z_n)

            # _, _, _, z_stat = \
            #     joined_ev.fold_events(f, time_intervals = time_intervals, \
            #                             nbin = nbin, ref_time = joined_ev.time[0], region_filter=False, pi_min=pi_min, pi_max=pi_max, weight_pos=False, z_n=z_n)

            z_stats.append(z_stat)

        z_stats = np.array(z_stats)
        z_prob = stats.z2_n_logprobability(z_stats, ntrial=len(f_arr), n=z_n)

        if return_peak:
            phase_bins, profile, profile_err, _ = \
                joined_ev.fold_events(f_arr[np.argmax(z_stats)], time_intervals = time_intervals, \
                                        nbin = nbin, ref_time = ref_time, region_filter=False, pi_min=pi_min, pi_max=pi_max, weight_pos=False, z_n=z_n)

            return f_arr, z_prob, z_stats, phase_bins, profile, profile_err

        else:
            return f_arr, z_prob, z_stats
Esempio n. 35
0
    def _make_multitaper_periodogram(self, lc, NW=4, adaptive=False,
                                     jackknife=True, low_bias=True,
                                     lombscargle=False):
        """Compute the normalized multitaper spectral estimate.

        This includes checking for the presence of and applying Good Time Intervals,
        computing the a nitime inspired normalized power spectrum, unnormalizing it,
        and then renormalizing it using the required normalization.

        Parameters
        ----------
        lc : :class:`stingray.Lightcurve` objects
            Two light curves used for computing the cross spectrum.

        NW: float, optional, default ``4``
            The normalized half-bandwidth of the data tapers, indicating a
            multiple of the fundamental frequency of the DFT (Fs/N).
        Common choices are n/2, for n >= 4.

        adaptive: boolean, optional, default ``False``
            Use an adaptive weighting routine to combine the PSD estimates of
            different tapers.

        jackknife: boolean, optional, default ``True``
            Use the jackknife method to make an estimate of the PSD variance
            at each point.

        low_bias: boolean, optional, default ``True``
            Rather than use 2NW tapers, only use the tapers that have better than
            90% spectral concentration within the bandwidth (still using
            a maximum of 2NW tapers)
        """

        if not isinstance(lc, Lightcurve):
            raise TypeError("lc must be a lightcurve.Lightcurve object")

        if self.gti is None:
            self.gti = cross_two_gtis(lc.gti, lc.gti)

        check_gtis(self.gti)

        if self.gti.shape[0] != 1:
            raise TypeError("Non-averaged Spectra need "
                            "a single Good Time Interval")

        lc = lc.split_by_gti()[0]

        self.meancounts = lc.meancounts
        self.nphots = np.float64(np.sum(lc.counts))

        self.err_dist = 'poisson'
        if lc.err_dist == 'poisson':
            self.var = lc.meancounts
        else:
            self.var = np.mean(lc.counts_err) ** 2
            self.err_dist = 'gauss'

        self.dt = lc.dt
        self.n = lc.n

        # the frequency resolution
        self.df = 1.0 / lc.tseg

        # the number of averaged periodograms in the final output
        # This should *always* be 1 here
        self.m = 1

        if lombscargle:
            self.freq, self.multitaper_norm_power = \
                self._fourier_multitaper_lomb_scargle(lc, NW=NW,
                                                      low_bias=low_bias)

            self.unnorm_power = self.multitaper_norm_power * lc.n * 2

        else:

            self.freq, self.multitaper_norm_power = \
                self._fourier_multitaper(lc, NW=NW, adaptive=adaptive,
                                         jackknife=jackknife, low_bias=low_bias)

            self.unnorm_power = self.multitaper_norm_power * lc.n / lc.dt

        self.power = \
            self._normalize_multitaper(self.unnorm_power, lc.tseg)

        if lc.err_dist.lower() != "poisson":
            simon("Looks like your lightcurve statistic is not poisson."
                  "The errors in the Powerspectrum will be incorrect.")

        self.power_err = self.power / np.sqrt(self.m)
Esempio n. 36
0
    def _spectrum_function(self):
        # Extract the photon arrival times from the reference band
        ref_events = self._get_times_from_energy_range(self.events2,
                                                       self.ref_band[0])
        ref_power_noise = poisson_level(norm="none", n_ph=ref_events.size)

        # Calculate the PDS in the reference band. Needed to calculate errors.
        results = avg_pds_from_events(ref_events,
                                      self.gti,
                                      self.segment_size,
                                      self.bin_time,
                                      silent=True,
                                      norm="none")
        freq = results["freq"]
        ref_power = results["power"]
        m_ave = results.meta["m"]

        # Get the frequency bins to be averaged in the final results.
        good = self._get_good_frequency_bins(freq)
        mean_ref_power = np.mean(ref_power[good])
        n_ave_bin = np.count_nonzero(good)

        m_tot = n_ave_bin * m_ave

        f = (self.freq_interval[0] + self.freq_interval[1]) / 2
        for i, eint in enumerate(show_progress(self.energy_intervals)):
            # Extract the photon arrival times from the subject band
            sub_events = self._get_times_from_energy_range(self.events1, eint)
            sub_power_noise = poisson_level(norm="none", n_ph=sub_events.size)

            results_cross = avg_cs_from_events(sub_events,
                                               ref_events,
                                               self.gti,
                                               self.segment_size,
                                               self.bin_time,
                                               silent=True,
                                               norm="none")

            results_ps = avg_pds_from_events(sub_events,
                                             self.gti,
                                             self.segment_size,
                                             self.bin_time,
                                             silent=True,
                                             norm="none")

            if results_cross is None or results_ps is None:
                continue

            cross = results_cross["power"]
            sub_power = results_ps["power"]

            Cmean = np.mean(cross[good])

            mean_sub_power = np.mean(sub_power[good])

            # Is the subject band overlapping with the reference band?
            # This will be used to correct the error bars, following
            # Ingram 2019.
            common_ref = self.same_events and len(
                cross_two_gtis([eint], self.ref_band)) > 0

            _, _, phi_e, _ = error_on_averaged_cross_spectrum(
                Cmean,
                mean_sub_power,
                mean_ref_power,
                m_tot,
                sub_power_noise,
                ref_power_noise,
                common_ref=common_ref)

            lag = np.mean((np.angle(cross[good]) / (2 * np.pi * freq[good])))

            lag_e = phi_e / (2 * np.pi * f)
            self.spectrum[i] = lag
            self.spectrum_error[i] = lag_e
Esempio n. 37
0
    def _make_crossspectrum(self, lc1, lc2):

        # make sure the inputs work!
        if not isinstance(lc1, Lightcurve):
            raise TypeError("lc1 must be a lightcurve.Lightcurve object")

        if not isinstance(lc2, Lightcurve):
            raise TypeError("lc2 must be a lightcurve.Lightcurve object")

        # Then check that GTIs make sense
        if self.gti is None:
            self.gti = cross_two_gtis(lc1.gti, lc2.gti)

        check_gtis(self.gti)

        if self.gti.shape[0] != 1:
            raise TypeError("Non-averaged Cross Spectra need "
                            "a single Good Time Interval")

        lc1 = lc1.split_by_gti()[0]
        lc2 = lc2.split_by_gti()[0]

        # total number of photons is the sum of the
        # counts in the light curve
        self.nphots1 = np.float64(np.sum(lc1.counts))
        self.nphots2 = np.float64(np.sum(lc2.counts))

        self.meancounts1 = lc1.meancounts
        self.meancounts2 = lc2.meancounts

        # the number of data points in the light curve

        if lc1.n != lc2.n:
            raise StingrayError("Light curves do not have same number "
                                "of time bins per segment.")

        if lc1.dt != lc2.dt:
            raise StingrayError("Light curves do not have "
                                "same time binning dt.")

        self.n = lc1.n

        # the frequency resolution
        self.df = 1.0 / lc1.tseg

        # the number of averaged periodograms in the final output
        # This should *always* be 1 here
        self.m = 1

        # make the actual Fourier transform and compute cross spectrum
        self.freq, self.unnorm_power = self._fourier_cross(lc1, lc2)

        # If co-spectrum is desired, normalize here. Otherwise, get raw back
        # with the imaginary part still intact.
        self.power = self._normalize_crossspectrum(self.unnorm_power, lc1.tseg)

        if lc1.err_dist.lower() != lc2.err_dist.lower():
            simon("Your lightcurves have different statistics."
                  "The errors in the Crossspectrum will be incorrect.")
        elif lc1.err_dist.lower() != "poisson":
            simon("Looks like your lightcurve statistic is not poisson."
                  "The errors in the Powerspectrum will be incorrect.")

        if self.__class__.__name__ in [
                'Powerspectrum', 'AveragedPowerspectrum'
        ]:
            self.power_err = self.power / np.sqrt(self.m)
        elif self.__class__.__name__ in [
                'Crossspectrum', 'AveragedCrossspectrum'
        ]:
            # This is clearly a wild approximation.
            simon("Errorbars on cross spectra are not thoroughly tested. "
                  "Please report any inconsistencies.")
            unnorm_power_err = np.sqrt(2) / np.sqrt(self.m)  # Leahy-like
            unnorm_power_err /= (2 / np.sqrt(self.nphots1 * self.nphots2))
            unnorm_power_err += np.zeros_like(self.power)

            self.power_err = \
                self._normalize_crossspectrum(unnorm_power_err, lc1.tseg)
        else:
            self.power_err = np.zeros(len(self.power))
Esempio n. 38
0
    def _construct_lightcurves(self,
                               channel_band,
                               tstart=None,
                               tstop=None,
                               exclude=True,
                               only_base=False):
        """
        Construct light curves from event data, for each band of interest.

        Parameters
        ----------
        channel_band : iterable of type ``[elow, ehigh]``
            The lower/upper limits of the energies to be contained in the band
            of interest

        tstart : float, optional, default ``None``
            A common start time (if start of observation is different from
            the first recorded event)

        tstop : float, optional, default ``None``
            A common stop time (if start of observation is different from
            the first recorded event)

        exclude : bool, optional, default ``True``
            if ``True``, exclude the band of interest from the reference band

        only_base : bool, optional, default ``False``
            if ``True``, only return the light curve of the channel of interest, not
            that of the reference band

        Returns
        -------
        base_lc : :class:`Lightcurve` object
            The light curve of the channels of interest

        ref_lc : :class:`Lightcurve` object (only returned if ``only_base`` is ``False``)
            The reference light curve for comparison with ``base_lc``
        """
        if self.use_pi:
            energies1 = self.events1.pi
            energies2 = self.events2.pi
        else:
            energies2 = self.events2.energy
            energies1 = self.events1.energy

        gti = cross_two_gtis(self.events1.gti, self.events2.gti)

        tstart = assign_value_if_none(tstart, gti[0, 0])
        tstop = assign_value_if_none(tstop, gti[-1, -1])

        good = (energies1 >= channel_band[0]) & (energies1 < channel_band[1])
        base_lc = Lightcurve.make_lightcurve(
            self.events1.time[good],
            self.bin_time,
            tstart=tstart,
            tseg=tstop - tstart,
            gti=gti,
            mjdref=self.events1.mjdref,
        )

        if only_base:
            return base_lc

        if exclude:
            ref_intervals = get_non_overlapping_ref_band(
                channel_band, self.ref_band)
        else:
            ref_intervals = self.ref_band

        ref_lc = Lightcurve(
            base_lc.time,
            np.zeros_like(base_lc.counts),
            gti=base_lc.gti,
            mjdref=base_lc.mjdref,
            dt=base_lc.dt,
            err_dist=base_lc.err_dist,
            skip_checks=True,
        )

        for i in ref_intervals:
            good = (energies2 >= i[0]) & (energies2 < i[1])
            new_lc = Lightcurve.make_lightcurve(
                self.events2.time[good],
                self.bin_time,
                tstart=tstart,
                tseg=tstop - tstart,
                gti=base_lc.gti,
                mjdref=self.events2.mjdref,
            )
            ref_lc = ref_lc + new_lc

        ref_lc.err_dist = base_lc.err_dist
        return base_lc, ref_lc
Esempio n. 39
0
    def _spectrum_function(self):
        # Extract events from the reference band and calculate the PDS and
        # the Poisson noise level.
        ref_events = self._get_times_from_energy_range(self.events2,
                                                       self.ref_band[0])
        countrate_ref = get_average_ctrate(ref_events, self.gti,
                                           self.segment_size)
        ref_power_noise = poisson_level(norm="abs", meanrate=countrate_ref)

        results = avg_pds_from_events(ref_events,
                                      self.gti,
                                      self.segment_size,
                                      self.bin_time,
                                      silent=True,
                                      norm="abs")
        freq = results["freq"]
        ref_power = results["power"]
        m_ave = results.meta["m"]

        # Select the frequency range to be averaged for the measurement.
        good = (freq >= self.freq_interval[0]) & (freq < self.freq_interval[1])
        n_ave_bin = np.count_nonzero(good)
        mean_ref_power = np.mean(ref_power[good])

        m_tot = m_ave * n_ave_bin
        # Frequency resolution
        delta_nu = n_ave_bin * self.delta_nu

        for i, eint in enumerate(show_progress(self.energy_intervals)):
            # Extract events from the subject band
            sub_events = self._get_times_from_energy_range(self.events1, eint)
            countrate_sub = get_average_ctrate(sub_events, self.gti,
                                               self.segment_size)
            sub_power_noise = poisson_level(norm="abs", meanrate=countrate_sub)

            results_cross = avg_cs_from_events(
                sub_events,
                ref_events,
                self.gti,
                self.segment_size,
                self.bin_time,
                silent=True,
                norm="abs",
            )

            results_ps = avg_pds_from_events(sub_events,
                                             self.gti,
                                             self.segment_size,
                                             self.bin_time,
                                             silent=True,
                                             norm="abs")

            if results_cross is None or results_ps is None:
                continue

            cross = results_cross["power"]
            sub_power = results_ps["power"]
            mean = results_ps.meta["mean"]

            # Is the subject band overlapping with the reference band?
            # This will be used to correct the error bars, following
            # Ingram 2019.
            common_ref = self.same_events and len(
                cross_two_gtis([eint], self.ref_band)) > 0
            Cmean = np.mean(cross[good])
            if common_ref:
                # Equation 6 from Ingram+2019
                Cmean -= sub_power_noise

            Cmean_real = np.abs(Cmean)

            mean_sub_power = np.mean(sub_power[good])

            _, _, _, Ce = error_on_averaged_cross_spectrum(
                Cmean,
                mean_sub_power,
                mean_ref_power,
                m_tot,
                sub_power_noise,
                ref_power_noise,
                common_ref=common_ref)
            if not self.return_complex:
                Cmean = Cmean_real

            # Convert the cross spectrum to a covariance.
            cov, cov_e = cross_to_covariance(np.asarray([Cmean,
                                                         Ce]), mean_ref_power,
                                             ref_power_noise, delta_nu)

            meanrate = mean / self.bin_time

            if self.norm == "frac":
                cov, cov_e = cov / meanrate, cov_e / meanrate

            self.spectrum[i] = cov
            self.spectrum_error[i] = cov_e