Пример #1
0
    def _operation_with_other_lc(self, other, operation):
        """
        Helper method to codify an operation of one light curve with another (e.g. add, subtract, ...).
        Takes into account the GTIs correctly, and returns a new :class:`Lightcurve` object.

        Parameters
        ----------
        other : :class:`Lightcurve` object
            A second light curve object

        operation : function
            An operation between the :class:`Lightcurve` object calling this method, and ``other``,
            operating on the ``counts`` attribute in each :class:`Lightcurve` object

        Returns
        -------
        lc_new : Lightcurve object
            The new light curve calculated in ``operation``
        """
        if self.mjdref != other.mjdref:
            raise ValueError("MJDref is different in the two light curves")

        common_gti = cross_two_gtis(self.gti, other.gti)
        mask_self = create_gti_mask(self.time, common_gti, dt=self.dt)
        mask_other = create_gti_mask(other.time, common_gti, dt=other.dt)

        # ValueError is raised by Numpy while asserting np.equal over arrays
        # with different dimensions.
        try:
            diff = np.abs((self.time[mask_self] - other.time[mask_other]))
            assert np.all(diff < self.dt / 100)
        except (ValueError, AssertionError):
            raise ValueError("GTI-filtered time arrays of both light curves "
                             "must be of same dimension and equal.")

        new_time = self.time[mask_self]
        new_counts = operation(self.counts[mask_self],
                               other.counts[mask_other])

        if self.err_dist.lower() != other.err_dist.lower():
            simon("Lightcurves have different statistics!"
                  "We are setting the errors to zero to avoid complications.")
            new_counts_err = np.zeros_like(new_counts)
        elif self.err_dist.lower() in valid_statistics:
            new_counts_err = \
                np.sqrt(np.add(self.counts_err[mask_self]**2,
                               other.counts_err[mask_other]**2))
        # More conditions can be implemented for other statistics
        else:
            raise StingrayError("Statistics not recognized."
                                " Please use one of these: "
                                "{}".format(valid_statistics))

        lc_new = Lightcurve(new_time,
                            new_counts,
                            err=new_counts_err,
                            gti=common_gti,
                            mjdref=self.mjdref)

        return lc_new
Пример #2
0
    def _operation_with_other_lc(self, other, operation):
        """
        Helper method to codify an operation of one light curve with another (e.g. add, subtract, ...).
        Takes into account the GTIs correctly, and returns a new :class:`Lightcurve` object.

        Parameters
        ----------
        other : :class:`Lightcurve` object
            A second light curve object

        operation : function
            An operation between the :class:`Lightcurve` object calling this method, and ``other``,
            operating on the ``counts`` attribute in each :class:`Lightcurve` object

        Returns
        -------
        lc_new : Lightcurve object
            The new light curve calculated in ``operation``
        """
        if self.mjdref != other.mjdref:
            raise ValueError("MJDref is different in the two light curves")

        common_gti = cross_two_gtis(self.gti, other.gti)
        mask_self = create_gti_mask(self.time, common_gti, dt=self.dt)
        mask_other = create_gti_mask(other.time, common_gti, dt=other.dt)

        # ValueError is raised by Numpy while asserting np.equal over arrays
        # with different dimensions.
        try:
            diff = np.abs((self.time[mask_self] - other.time[mask_other]))
            assert np.all(diff < self.dt / 100)
        except (ValueError, AssertionError):
            raise ValueError("GTI-filtered time arrays of both light curves "
                             "must be of same dimension and equal.")

        new_time = self.time[mask_self]
        new_counts = operation(self.counts[mask_self],
                               other.counts[mask_other])

        if self.err_dist.lower() != other.err_dist.lower():
            simon("Lightcurves have different statistics!"
                  "We are setting the errors to zero to avoid complications.")
            new_counts_err = np.zeros_like(new_counts)
        elif self.err_dist.lower() in valid_statistics:
                new_counts_err = \
                    np.sqrt(np.add(self.counts_err[mask_self]**2,
                                   other.counts_err[mask_other]**2))
            # More conditions can be implemented for other statistics
        else:
            raise StingrayError("Statistics not recognized."
                                " Please use one of these: "
                                "{}".format(valid_statistics))

        lc_new = Lightcurve(new_time, new_counts,
                            err=new_counts_err, gti=common_gti,
                            mjdref=self.mjdref)

        return lc_new
Пример #3
0
def filter_lc_gtis(lc,
                   safe_interval=None,
                   delete=False,
                   min_length=0,
                   return_borders=False):
    """Filter a light curve for GTIs.

    Parameters
    ----------
    lc : :class:`Lightcurve` object
        The input light curve

    Returns
    -------
    newlc : :class:`Lightcurve` object
        The output light curve
    borders : [[i0_0, i0_1], [i1_0, i1_1], ...], optional
        The indexes of the light curve corresponding to the borders of the
        GTIs. Returned if return_borders is set to True

    Other Parameters
    ----------------
    safe_interval : float or [float, float]
        Seconds to filter out at the start and end of each GTI. If single
        float, these safe windows are equal, otherwise the two numbers refer
        to the start and end of the GTI respectively
    delete : bool
        If delete is True, the intervals outside of GTIs are filtered out from
        the light curve. Otherwise, they are set to zero.
    min_length : float
        Minimum length of GTI. GTIs below this length will be removed.
    return_borders : bool
        If True, return also the indexes of the light curve corresponding to
        the borders of the GTIs
    """
    mask, newgtis = create_gti_mask(lc.time,
                                    lc.gti,
                                    return_new_gtis=True,
                                    safe_interval=safe_interval,
                                    min_length=min_length)

    nomask = np.logical_not(mask)

    newlc = copy.copy(lc)
    newlc.counts[nomask] = 0
    newlc.gti = newgtis

    if return_borders:
        mask = create_gti_mask(lc.time, newgtis)
        borders = contiguous_regions(mask)
        return newlc, borders
    else:
        return newlc
Пример #4
0
    def baseline(self, lam, p, niter=10, offset_correction=False):
        """Calculate the baseline of the light curve, accounting for GTIs.

        Parameters
        ----------
        lam : float
            "smoothness" parameter. Larger values make the baseline stiffer
            Typically ``1e2 < lam < 1e9``
        p : float
            "asymmetry" parameter. Smaller values make the baseline more
            "horizontal". Typically ``0.001 < p < 0.1``, but not necessary.

        Other parameters
        ----------------
        offset_correction : bool, default False
            by default, this method does not align to the running mean of the
            light curve, but it goes below the light curve. Setting align to
            True, an additional step is done to shift the baseline so that it
            is shifted to the middle of the light curve noise distribution.


        Returns
        -------
        baseline : numpy.ndarray
            An array with the baseline of the light curve
        """
        baseline = np.zeros_like(self.time)
        for g in self.gti:
            good = create_gti_mask(self.time, [g], dt=self.dt)
            _, baseline[good] = \
                baseline_als(self.time[good], self.counts[good], lam, p,
                             niter, offset_correction=offset_correction,
                             return_baseline=True)

        return baseline
Пример #5
0
 def test_gti_mask(self):
     arr = np.array([0, 1, 2, 3, 4, 5, 6])
     gti = np.array([[0, 2.1], [3.9, 5]])
     mask, new_gtis = create_gti_mask(arr, gti, return_new_gtis=True)
     # NOTE: the time bin has to be fully inside the GTI. That is why the
     # bin at times 0, 2, 4 and 5 are not in.
     assert np.allclose(mask, np.array([0, 1, 0, 0, 0, 0, 0], dtype=bool))
Пример #6
0
    def baseline(self, lam, p, niter=10, offset_correction=False):
        """Calculate the baseline of the light curve, accounting for GTIs.

        Parameters
        ----------
        lam : float
            "smoothness" parameter. Larger values make the baseline stiffer
            Typically ``1e2 < lam < 1e9``
        p : float
            "asymmetry" parameter. Smaller values make the baseline more
            "horizontal". Typically ``0.001 < p < 0.1``, but not necessary.

        Other parameters
        ----------------
        offset_correction : bool, default False
            by default, this method does not align to the running mean of the
            light curve, but it goes below the light curve. Setting align to
            True, an additional step is done to shift the baseline so that it
            is shifted to the middle of the light curve noise distribution.


        Returns
        -------
        baseline : numpy.ndarray
            An array with the baseline of the light curve
        """
        baseline = np.zeros_like(self.time)
        for g in self.gti:
            good = create_gti_mask(self.time, [g], dt=self.dt)
            _, baseline[good] = \
                baseline_als(self.time[good], self.counts[good], lam, p,
                             niter, offset_correction=offset_correction,
                             return_baseline=True)

        return baseline
Пример #7
0
 def test_gti_mask_none_longer_than_minlen(self):
     arr = np.array([0, 1, 2, 3, 4, 5, 6])
     gti = np.array([[0, 2.1], [3.9, 5]])
     with pytest.warns(UserWarning) as record:
         mask = create_gti_mask(arr, gti, min_length=10)
     assert np.any(
         ["No GTIs longer than" in r.message.args[0] for r in record])
     assert np.all(~mask)
Пример #8
0
def plot_lc(lcfiles,
            figname=None,
            fromstart=False,
            xlog=None,
            ylog=None,
            output_data_file=None):
    """Plot a list of light curve files, or a single one."""
    if is_string(lcfiles):
        lcfiles = [lcfiles]

    figlabel = lcfiles[0]

    plt.figure('LC ' + figlabel)
    for lcfile in lcfiles:
        logging.info('Loading %s...' % lcfile)
        lcdata = load_data(lcfile)

        time = lcdata['time']
        lc = lcdata['counts']
        gti = lcdata['gti']
        instr = lcdata['instr']

        if fromstart:
            time -= lcdata['Tstart']
            gti -= lcdata['Tstart']

        if instr == 'PCA':
            # If RXTE, plot per PCU count rate
            npcus = lcdata['nPCUs']
            lc /= npcus

        for g in gti:
            plt.axvline(g[0], ls='-', color='red')
            plt.axvline(g[1], ls='--', color='red')

        good = create_gti_mask(time, gti)
        plt.plot(time, lc, drawstyle='steps-mid', color='grey')
        plt.plot(time[good], lc[good], drawstyle='steps-mid', label=lcfile)
        if 'base' in lcdata:
            plt.plot(time, lcdata['base'], color='r')

        if output_data_file is not None:
            outqdpdata = [time[good], lc[good]]
            if 'base' in lcdata:
                outqdpdata.append(lcdata['base'][good])
            save_as_qdp(outqdpdata, filename=output_data_file, mode='a')

    plt.xlabel('Time (s)')
    if instr == 'PCA':
        plt.ylabel('light curve (Ct/bin/PCU)')
    else:
        plt.ylabel('light curve (Ct/bin)')

    plt.legend()
    if figname is not None:
        plt.savefig(figname)
Пример #9
0
 def test_lc_baseline(self):
     times = np.arange(0, 100, 0.01)
     counts = np.random.normal(100, 0.1, len(times)) + \
         0.001 * times
     gti = [[-0.005, 50.005], [59.005, 100.005]]
     good = create_gti_mask(times, gti)
     counts[np.logical_not(good)] = 0
     lc = Lightcurve(times, counts, gti=gti)
     baseline = lc.baseline(10000, 0.01)
     assert np.all(lc.counts - baseline < 1)
Пример #10
0
 def test_lc_baseline(self):
     times = np.arange(0, 100, 0.01)
     counts = np.random.normal(100, 0.1, len(times)) + \
         0.001 * times
     gti = [[-0.005, 50.005], [59.005, 100.005]]
     good = create_gti_mask(times, gti)
     counts[np.logical_not(good)] = 0
     lc = Lightcurve(times, counts, gti=gti)
     baseline = lc.baseline(10000, 0.01)
     assert np.all(lc.counts - baseline < 1)
Пример #11
0
 def test_gti_mask_compare2(self):
     arr = np.array([0.5, 1.5, 2.5, 3.5])
     gti = np.array([[0, 4]])
     mask_c, new_gtis_c = \
         create_gti_mask_complete(arr, gti, return_new_gtis=True,
                                  safe_interval=[1, 1])
     mask, new_gtis = create_gti_mask(arr, gti, return_new_gtis=True,
                                      safe_interval=[1, 1])
     assert np.allclose(mask, mask_c)
     assert np.allclose(new_gtis, new_gtis_c)
Пример #12
0
    def _operation_with_other_lc(self, other, operation):
        if self.mjdref != other.mjdref:
            raise ValueError("MJDref is different in the two light curves")

        common_gti = cross_two_gtis(self.gti, other.gti)
        mask_self = create_gti_mask(self.time, common_gti)
        mask_other = create_gti_mask(other.time, common_gti)

        # ValueError is raised by Numpy while asserting np.equal over arrays
        # with different dimensions.
        try:
            assert np.all(
                np.equal(self.time[mask_self], other.time[mask_other]))
        except (ValueError, AssertionError):
            raise ValueError("GTI-filtered time arrays of both light curves "
                             "must be of same dimension and equal.")

        new_time = self.time[mask_self]
        new_counts = operation(self.counts[mask_self],
                               other.counts[mask_other])

        if self.err_dist.lower() != other.err_dist.lower():
            simon("Lightcurves have different statistics!"
                  "We are setting the errors to zero to avoid complications.")
            new_counts_err = np.zeros_like(new_counts)
        elif self.err_dist.lower() in valid_statistics:
            new_counts_err = np.sqrt(
                np.add(self.counts_err[mask_self]**2,
                       other.counts_err[mask_other]**2))
        # More conditions can be implemented for other statistics
        else:
            raise StingrayError("Statistics not recognized."
                                " Please use one of these: "
                                "{}".format(valid_statistics))

        lc_new = Lightcurve(new_time,
                            new_counts,
                            err=new_counts_err,
                            gti=common_gti,
                            mjdref=self.mjdref)

        return lc_new
Пример #13
0
 def test_lc_baseline_offset(self):
     times = np.arange(0, 100, 0.01)
     input_stdev = 0.1
     counts = np.random.normal(100, input_stdev, len(times)) + \
         0.001 * times
     gti = [[-0.005, 50.005], [59.005, 100.005]]
     good = create_gti_mask(times, gti)
     counts[np.logical_not(good)] = 0
     lc = Lightcurve(times, counts, gti=gti)
     baseline = lc.baseline(10000, 0.01, offset_correction=True)
     assert np.isclose(np.std(lc.counts - baseline), input_stdev, rtol=0.1)
Пример #14
0
def _plot_corrected_light_curve(time, lc, expo, gti=None, outroot="expo"):
    import matplotlib.pyplot as plt
    good = create_gti_mask(time, gti)
    fig = plt.figure("Exposure-corrected lc")
    plt.plot(time[good], expo[good] / np.max(expo) * np.max(lc[good]),
             label="Exposure (arbitrary units)", zorder=10)
    plt.plot(time[good], lc[good], label="Light curve", zorder=20)
    plt.plot(time[good], lc[good] / expo[good],
             label="Exposure-corrected Light curve")
    plt.legend()
    fig.savefig(outroot + "_corr_lc.png")
Пример #15
0
 def test_lc_baseline_offset(self):
     times = np.arange(0, 100, 0.01)
     input_stdev = 0.1
     counts = np.random.normal(100, input_stdev, len(times)) + \
         0.001 * times
     gti = [[-0.005, 50.005], [59.005, 100.005]]
     good = create_gti_mask(times, gti)
     counts[np.logical_not(good)] = 0
     lc = Lightcurve(times, counts, gti=gti)
     baseline = lc.baseline(10000, 0.01, offset_correction=True)
     assert np.isclose(np.std(lc.counts - baseline), input_stdev, rtol=0.1)
Пример #16
0
 def test_gti_mask_compare(self):
     arr = np.array([0.5, 1.5, 2.5, 3.5])
     gti = np.array([[0, 4]])
     mask_c, new_gtis_c = \
         create_gti_mask_complete(arr, gti, return_new_gtis=True,
                                  safe_interval=1)
     mask, new_gtis = create_gti_mask(arr,
                                      gti,
                                      return_new_gtis=True,
                                      safe_interval=1)
     assert np.all(mask == mask_c)
     assert np.all(new_gtis == new_gtis_c)
Пример #17
0
    def _operation_with_other_lc(self, other, operation):
        if self.mjdref != other.mjdref:
            raise ValueError("MJDref is different in the two light curves")

        common_gti = cross_two_gtis(self.gti, other.gti)
        mask_self = create_gti_mask(self.time, common_gti)
        mask_other = create_gti_mask(other.time, common_gti)

        # ValueError is raised by Numpy while asserting np.equal over arrays
        # with different dimensions.
        try:
            assert np.all(np.equal(self.time[mask_self],
                                   other.time[mask_other]))
        except (ValueError, AssertionError):
            raise ValueError("GTI-filtered time arrays of both light curves "
                             "must be of same dimension and equal.")

        new_time = self.time[mask_self]
        new_counts = operation(self.counts[mask_self],
                               other.counts[mask_other])

        if self.err_dist.lower() != other.err_dist.lower():
            simon("Lightcurves have different statistics!"
                  "We are setting the errors to zero to avoid complications.")
            new_counts_err = np.zeros_like(new_counts)
        elif self.err_dist.lower() in valid_statistics:
                new_counts_err = np.sqrt(np.add(self.counts_err[mask_self]**2,
                                                other.counts_err[mask_other]**2))
            # More conditions can be implemented for other statistics
        else:
            raise StingrayError("Statistics not recognized."
                                " Please use one of these: "
                                "{}".format(valid_statistics))

        lc_new = Lightcurve(new_time, new_counts,
                            err=new_counts_err, gti=common_gti,
                            mjdref=self.mjdref)

        return lc_new
Пример #18
0
    def test_rebin_with_gtis(self):
        times = np.arange(0, 100, 0.1)

        counts = np.random.normal(100, 0.1, size=times.shape[0])
        gti = [[0, 40], [60, 100]]

        good = create_gti_mask(times, gti)

        counts[np.logical_not(good)] = 0
        lc = Lightcurve(times, counts, gti=gti)

        lc_rebin = lc.rebin(1.0)

        assert (lc_rebin.time[39] - lc_rebin.time[38]) > 1.0
Пример #19
0
    def test_rebin_with_gtis(self):
        times = np.arange(0, 100, 0.1)

        counts = np.random.normal(100, 0.1, size=times.shape[0])
        gti = [[0, 40], [60, 100]]

        good = create_gti_mask(times, gti)

        counts[np.logical_not(good)] = 0
        lc = Lightcurve(times, counts, gti=gti)

        lc_rebin = lc.rebin(1.0)

        assert (lc_rebin.time[39] - lc_rebin.time[38]) > 1.0
Пример #20
0
    def _apply_gtis(self):
        """Apply GTIs to a light curve after modification."""
        check_gtis(self.gti)

        good = create_gti_mask(self.time, self.gti)

        self.time = self.time[good]
        self.counts = self.counts[good]
        self.counts_err = self.counts_err[good]
        self.countrate = self.countrate[good]
        self.countrate_err = self.countrate_err[good]

        self.meanrate = np.mean(self.countrate)
        self.meancounts = np.mean(self.counts)
        self.n = self.counts.shape[0]
Пример #21
0
    def baseline(self, lam, p, niter=10):
        """Calculate the baseline of the light curve, accounting for GTIs.

        Parameters
        ----------
        lam : float
            "smoothness" parameter. Larger values make the baseline stiffer
            Typically 1e2 < lam < 1e9
        p : float
            "asymmetry" parameter. Smaller values make the baseline more 
            "horizontal". Typically 0.001 < p < 0.1, but not necessary.
        """
        baseline = np.zeros_like(self.time)
        for g in self.gti:
            good = create_gti_mask(self.time, [g])
            baseline[good] = baseline_als(self.counts[good], lam, p, niter)

        return baseline
Пример #22
0
    def _apply_gtis(self):
        """
        Apply GTIs to a light curve. Filters the ``time``, ``counts``, ``countrate``, ``counts_err`` and
        ``countrate_err`` arrays for all bins that fall into Good Time Intervals and recalculates mean
        count(rate) and the number of bins.
        """
        check_gtis(self.gti)

        good = create_gti_mask(self.time, self.gti, dt=self.dt)

        self.time = self.time[good]
        self.counts = self.counts[good]

        self.counts_err = self.counts_err[good]
        self.countrate = self.countrate[good]
        self.countrate_err = self.countrate_err[good]

        self.meanrate = np.mean(self.countrate)
        self.meancounts = np.mean(self.counts)
        self.n = self.counts.shape[0]
Пример #23
0
    def _apply_gtis(self):
        """
        Apply GTIs to a light curve. Filters the ``time``, ``counts``, ``countrate``, ``counts_err`` and
        ``countrate_err`` arrays for all bins that fall into Good Time Intervals and recalculates mean
        count(rate) and the number of bins.
        """
        check_gtis(self.gti)

        good = create_gti_mask(self.time, self.gti, dt=self.dt)

        self.time = self.time[good]
        self.counts = self.counts[good]

        self.counts_err = self.counts_err[good]
        self.countrate = self.countrate[good]
        self.countrate_err = self.countrate_err[good]

        self.meanrate = np.mean(self.countrate)
        self.meancounts = np.mean(self.counts)
        self.n = self.counts.shape[0]
Пример #24
0
def apply_gti(fname, gti, outname=None, minimum_length=0):
    """Apply a GTI list to the data contained in a file.

    File MUST have a GTI extension already, and an extension called `time`.
    """
    ftype, data = get_file_type(fname, raw_data=True)

    try:
        datagti = data['gti']
        newgtis = cross_gtis([gti, datagti])
    except:  # pragma: no cover
        logging.warning('Data have no GTI extension')
        newgtis = gti

    newgtis = filter_gti_by_length(newgtis, minimum_length)

    data['__sr__class__type__'] = 'gti'
    data['gti'] = newgtis
    good = create_gti_mask(data['time'], newgtis)

    data['time'] = data['time'][good]
    if ftype == 'lc':
        data['counts'] = data['counts'][good]
        data['counts_err'] = data['counts_err'][good]
    elif ftype == 'events':
        data['PI'] = data['PI'][good]
        if data['instr'] == 'PCA':  # pragma: no cover
            data['PCU'] = data['PCU'][good]

    newext = '_gtifilt' + HEN_FILE_EXTENSION
    outname = _assign_value_if_none(
        outname,
        fname.replace(HEN_FILE_EXTENSION, '') + newext)
    save_data(data, outname)

    return newgtis
Пример #25
0
 def test_gti_mask_fails_empty_gti(self):
     arr = np.array([0, 1, 2, 3, 4, 5, 6])
     gti = np.array([])
     with pytest.raises(ValueError) as excinfo:
         create_gti_mask(arr, gti, return_new_gtis=True)
     assert 'empty GTI array' in str(excinfo.value)
Пример #26
0
    def __init__(self, time, counts, err=None, input_counts=True,
                 gti=None, err_dist='poisson', mjdref=0, dt=None):
        """
        Make a light curve object from an array of time stamps and an
        array of counts.

        Parameters
        ----------
        time: iterable
            A list or array of time stamps for a light curve

        counts: iterable, optional, default None
            A list or array of the counts in each bin corresponding to the
            bins defined in `time` (note: use `input_counts=False` to
            input the count range, i.e. counts/second, otherwise use
            counts/bin).

        err: iterable, optional, default None:
            A list or array of the uncertainties in each bin corresponding to
            the bins defined in `time` (note: use `input_counts=False` to
            input the count rage, i.e. counts/second, otherwise use
            counts/bin). If None, we assume the data is poisson distributed
            and calculate the error from the average of the lower and upper 
            1-sigma confidence intervals for the Poissonian distribution with 
            mean equal to `counts`.

        input_counts: bool, optional, default True
            If True, the code assumes that the input data in 'counts'
            is in units of counts/bin. If False, it assumes the data
            in 'counts' is in counts/second.

        gti: 2-d float array, default None
            [[gti0_0, gti0_1], [gti1_0, gti1_1], ...]
            Good Time Intervals. They are *not* applied to the data by default.
            They will be used by other methods to have an indication of the
            "safe" time intervals to use during analysis.

        err_dist: str, optional, default=None
            Statistic of the Lightcurve, it is used to calculate the
            uncertainties and other statistical values apropriately.
            Default makes no assumptions and keep errors equal to zero.

        mjdref: float
            MJD reference (useful in most high-energy mission data)


        Attributes
        ----------
        time: numpy.ndarray
            The array of midpoints of time bins.

        bin_lo:
            The array of lower time stamp of time bins.

        bin_hi:
            The array of higher time stamp of time bins.

        counts: numpy.ndarray
            The counts per bin corresponding to the bins in `time`.

        counts_err: numpy.ndarray
            The uncertainties corresponding to `counts`

        countrate: numpy.ndarray
            The counts per second in each of the bins defined in `time`.

        countrate_err: numpy.ndarray
            The uncertainties corresponding to `countrate`

        meanrate: float
            The mean count rate of the light curve.

        meancounts: float
            The mean counts of the light curve.

        n: int
            The number of data points in the light curve.

        dt: float
            The time resolution of the light curve.

        mjdref: float
            MJD reference date (tstart / 86400 gives the date in MJD at the
            start of the observation)

        tseg: float
            The total duration of the light curve.

        tstart: float
            The start time of the light curve.

        gti: 2-d float array
            [[gti0_0, gti0_1], [gti1_0, gti1_1], ...]
            Good Time Intervals. They indicate the "safe" time intervals
            to be used during the analysis of the light curve.

        err_dist: string
            Statistic of the Lightcurve, it is used to calculate the
            uncertainties and other statistical values apropriately.
            It propagates to Spectrum classes.

        """

        if not np.all(np.isfinite(time)):
            raise ValueError("There are inf or NaN values in "
                             "your time array!")

        if not np.all(np.isfinite(counts)):
            raise ValueError("There are inf or NaN values in "
                             "your counts array!")

        if len(time) != len(counts):

            raise StingrayError("time and counts array are not "
                                "of the same length!")

        if len(time) <= 1:
            raise StingrayError("A single or no data points can not create "
                                "a lightcurve!")

        if err is not None:
            if not np.all(np.isfinite(err)):
                raise ValueError("There are inf or NaN values in "
                                 "your err array")
        else:
            if err_dist.lower() not in valid_statistics:
                # err_dist set can be increased with other statistics
                raise StingrayError("Statistic not recognized."
                                    "Please select one of these: ",
                                    "{}".format(valid_statistics))
            if err_dist.lower() == 'poisson':
                # Instead of the simple square root, we use confidence
                # intervals (should be valid for low fluxes too)
                err_low, err_high = poisson_conf_interval(np.asarray(counts),
                    interval='frequentist-confidence', sigma=1)
                # calculate approximately symmetric uncertainties
                err_low -= np.asarray(counts)
                err_high -= np.asarray(counts)
                err = (np.absolute(err_low) + np.absolute(err_high))/2.0
                # other estimators can be implemented for other statistics
            else:
                simon("Stingray only uses poisson err_dist at the moment, "
                      "We are setting your errors to zero. "
                      "Sorry for the inconvenience.")
                err = np.zeros_like(counts)

        self.mjdref = mjdref
        self.time = np.asarray(time)
        if dt is None:
            self.dt = np.median(self.time[1:] - self.time[:-1])
        else:
            self.dt = dt

        self.bin_lo = self.time - 0.5 * self.dt
        self.bin_hi = self.time + 0.5 * self.dt

        self.err_dist = err_dist

        self.tstart = self.time[0] - 0.5*self.dt
        self.tseg = self.time[-1] - self.time[0] + self.dt

        self.gti = \
            np.asarray(assign_value_if_none(gti,
                                            [[self.tstart,
                                              self.tstart + self.tseg]]))
        check_gtis(self.gti)

        good = create_gti_mask(self.time, self.gti)

        self.time = self.time[good]
        if input_counts:
            self.counts = np.asarray(counts)[good]
            self.countrate = self.counts / self.dt
            self.counts_err = np.asarray(err)[good]
            self.countrate_err = np.asarray(err)[good] / self.dt
        else:
            self.countrate = np.asarray(counts)[good]
            self.counts = self.countrate * self.dt
            self.counts_err = np.asarray(err)[good] * self.dt
            self.countrate_err = np.asarray(err)[good]

        self.meanrate = np.mean(self.countrate)
        self.meancounts = np.mean(self.counts)
        self.n = self.counts.shape[0]

        # Issue a warning if the input time iterable isn't regularly spaced,
        # i.e. the bin sizes aren't equal throughout.
        dt_array = np.diff(self.time)
        if not (np.allclose(dt_array, np.repeat(self.dt, dt_array.shape[0]))):
            simon("Bin sizes in input time array aren't equal throughout! "
                  "This could cause problems with Fourier transforms. "
                  "Please make the input time evenly sampled.")
Пример #27
0
    def __init__(self, time, counts, err=None, input_counts=True,
                 gti=None, err_dist='poisson', mjdref=0, dt=None):

        if not np.all(np.isfinite(time)):
            raise ValueError("There are inf or NaN values in "
                             "your time array!")

        if not np.all(np.isfinite(counts)):
            raise ValueError("There are inf or NaN values in "
                             "your counts array!")

        if len(time) != len(counts):

            raise StingrayError("time and counts array are not "
                                "of the same length!")

        if len(time) <= 1:
            raise StingrayError("A single or no data points can not create "
                                "a lightcurve!")

        if err is not None:
            if not np.all(np.isfinite(err)):
                raise ValueError("There are inf or NaN values in "
                                 "your err array")
        else:
            if err_dist.lower() not in valid_statistics:
                # err_dist set can be increased with other statistics
                raise StingrayError("Statistic not recognized."
                                    "Please select one of these: ",
                                    "{}".format(valid_statistics))
            if err_dist.lower() == 'poisson':
                # Instead of the simple square root, we use confidence
                # intervals (should be valid for low fluxes too)
                err = poisson_symmetrical_errors(counts)
            else:
                simon("Stingray only uses poisson err_dist at the moment, "
                      "We are setting your errors to zero. "
                      "Sorry for the inconvenience.")
                err = np.zeros_like(counts)

        self.mjdref = mjdref
        self.time = np.asarray(time)
        dt_array = np.diff(np.sort(self.time))
        dt_array_unsorted = np.diff(self.time)
        unsorted = np.any(dt_array_unsorted < 0)

        if dt is None:
            if unsorted:
                logging.warning("The light curve is unordered! This may cause "
                                "unexpected behaviour in some methods! Use "
                                "sort() to order the light curve in time and "
                                "check that the time resolution `dt` is "
                                "calculated correctly!")

            self.dt = np.median(dt_array)
        else:
            self.dt = dt

        self.bin_lo = self.time - 0.5 * self.dt
        self.bin_hi = self.time + 0.5 * self.dt

        self.err_dist = err_dist

        if unsorted:
            self.tstart = np.min(self.time) - 0.5 * self.dt
            self.tseg = np.max(self.time) - np.min(self.time) + self.dt
        else:
            self.tstart = self.time[0] - 0.5 * self.dt
            self.tseg = self.time[-1] - self.time[0] + self.dt

        self.gti = \
            np.asarray(assign_value_if_none(gti,
                                            [[self.tstart,
                                              self.tstart + self.tseg]]))

        check_gtis(self.gti)

        good = create_gti_mask(self.time, self.gti, dt=self.dt)

        self.time = self.time[good]

        if input_counts:
            self.counts = np.asarray(counts)[good]
            self.countrate = self.counts / self.dt
            self.counts_err = np.asarray(err)[good]
            self.countrate_err = np.asarray(err)[good] / self.dt
        else:
            self.countrate = np.asarray(counts)[good]
            self.counts = self.countrate * self.dt
            self.counts_err = np.asarray(err)[good] * self.dt
            self.countrate_err = np.asarray(err)[good]

        self.meanrate = np.mean(self.countrate)
        self.meancounts = np.mean(self.counts)
        self.n = self.counts.shape[0]

        # Issue a warning if the input time iterable isn't regularly spaced,
        # i.e. the bin sizes aren't equal throughout.
        dt_array = []
        for g in self.gti:
            mask = create_gti_mask(self.time, [g], dt=self.dt)
            t = self.time[mask]
            dt_array.extend(np.diff(t))
        dt_array = np.asarray(dt_array)

        if not (np.allclose(dt_array, np.repeat(self.dt, dt_array.shape[0]))):
            simon("Bin sizes in input time array aren't equal throughout! "
                  "This could cause problems with Fourier transforms. "
                  "Please make the input time evenly sampled.")
Пример #28
0
    def __init__(self, time, counts, err=None, input_counts=True,
                 gti=None, err_dist='poisson', mjdref=0, dt=None):

        if not np.all(np.isfinite(time)):
            raise ValueError("There are inf or NaN values in "
                             "your time array!")

        if not np.all(np.isfinite(counts)):
            raise ValueError("There are inf or NaN values in "
                             "your counts array!")

        if len(time) != len(counts):

            raise StingrayError("time and counts array are not "
                                "of the same length!")

        if len(time) <= 1:
            raise StingrayError("A single or no data points can not create "
                                "a lightcurve!")

        if err is not None:
            if not np.all(np.isfinite(err)):
                raise ValueError("There are inf or NaN values in "
                                 "your err array")
        else:
            if err_dist.lower() not in valid_statistics:
                # err_dist set can be increased with other statistics
                raise StingrayError("Statistic not recognized."
                                    "Please select one of these: ",
                                    "{}".format(valid_statistics))
            if err_dist.lower() == 'poisson':
                # Instead of the simple square root, we use confidence
                # intervals (should be valid for low fluxes too)
                err = poisson_symmetrical_errors(counts)
            else:
                simon("Stingray only uses poisson err_dist at the moment, "
                      "We are setting your errors to zero. "
                      "Sorry for the inconvenience.")
                err = np.zeros_like(counts)

        self.mjdref = mjdref
        self.time = np.asarray(time)
        dt_array = np.diff(np.sort(self.time))
        dt_array_unsorted = np.diff(self.time)
        unsorted = np.any(dt_array_unsorted < 0)

        if dt is None:
            if unsorted:
                logging.warning("The light curve is unordered! This may cause "
                                "unexpected behaviour in some methods! Use "
                                "sort() to order the light curve in time and "
                                "check that the time resolution `dt` is "
                                "calculated correctly!")

            self.dt = np.median(dt_array)
        else:
            self.dt = dt

        self.bin_lo = self.time - 0.5 * self.dt
        self.bin_hi = self.time + 0.5 * self.dt

        self.err_dist = err_dist

        if unsorted:
            self.tstart = np.min(self.time) - 0.5 * self.dt
            self.tseg = np.max(self.time) - np.min(self.time) + self.dt
        else:
            self.tstart = self.time[0] - 0.5 * self.dt
            self.tseg = self.time[-1] - self.time[0] + self.dt

        self.gti = \
            np.asarray(assign_value_if_none(gti,
                                            [[self.tstart,
                                              self.tstart + self.tseg]]))

        check_gtis(self.gti)

        good = create_gti_mask(self.time, self.gti, dt=self.dt)

        self.time = self.time[good]

        if input_counts:
            self.counts = np.asarray(counts)[good]
            self.countrate = self.counts / self.dt
            self.counts_err = np.asarray(err)[good]
            self.countrate_err = np.asarray(err)[good] / self.dt
        else:
            self.countrate = np.asarray(counts)[good]
            self.counts = self.countrate * self.dt
            self.counts_err = np.asarray(err)[good] * self.dt
            self.countrate_err = np.asarray(err)[good]

        self.meanrate = np.mean(self.countrate)
        self.meancounts = np.mean(self.counts)
        self.n = self.counts.shape[0]

        # Issue a warning if the input time iterable isn't regularly spaced,
        # i.e. the bin sizes aren't equal throughout.
        dt_array = []
        for g in self.gti:
            mask = create_gti_mask(self.time, [g], dt=self.dt)
            t = self.time[mask]
            dt_array.extend(np.diff(t))
        dt_array = np.asarray(dt_array)

        if not (np.allclose(dt_array, np.repeat(self.dt, dt_array.shape[0]))):
            simon("Bin sizes in input time array aren't equal throughout! "
                  "This could cause problems with Fourier transforms. "
                  "Please make the input time evenly sampled.")
Пример #29
0
 def test_gti_mask_fails_empty_time(self):
     arr = np.array([])
     gti = np.array([[0, 2.1], [3.9, 5]])
     with pytest.raises(ValueError) as excinfo:
         create_gti_mask(arr, gti, return_new_gtis=True)
     assert 'empty time array' in str(excinfo.value)
Пример #30
0
 def apply_gtis(self):
     good = create_gti_mask(self.time, lc.gti)