Exemplo n.º 1
0
    def _spectrum_function(self):

        lag_spec = np.zeros(len(self.energy_intervals))
        lag_spec_err = np.zeros_like(lag_spec)
        for i, eint in enumerate(self.energy_intervals):
            base_lc, ref_lc = self._construct_lightcurves(eint)
            try:
                xspect = AveragedCrossspectrum(base_lc, ref_lc,
                                               segment_size=self.segment_size)
            except AssertionError as e:
                # Avoid assertions in AveragedCrossspectrum.
                simon("AssertionError: " + str(e))
            else:
                good = (xspect.freq >= self.freq_interval[0]) & \
                       (xspect.freq < self.freq_interval[1])
                lag, lag_err = xspect.time_lag()
                good_lag, good_lag_err = lag[good], lag_err[good]
                coh, coh_err = xspect.coherence()
                lag_spec[i] = np.mean(good_lag)
                coh_check = coh > 1.2 / (1 + 0.2 * xspect.m)
                if not np.all(coh_check[good]):
                    simon("Coherence is not ideal over the specified energy "
                          "range. Lag values and uncertainties might be "
                          "underestimated. See Epitropakis and Papadakis, "
                          "A\&A 591, 1113, 2016")

                # Root squared sum of errors of the spectrum
                # Verified!
                lag_spec_err[i] = \
                    np.sqrt(np.sum(good_lag_err**2) / len(good_lag))

        return lag_spec, lag_spec_err
Exemplo n.º 2
0
    def _spectrum_function(self):

        lag_spec = np.zeros(len(self.energy_intervals))
        lag_spec_err = np.zeros_like(lag_spec)
        for i, eint in enumerate(self.energy_intervals):
            base_lc, ref_lc = self._construct_lightcurves(eint)
            xspect = AveragedCrossspectrum(base_lc,
                                           ref_lc,
                                           segment_size=self.segment_size)
            good = (xspect.freq >= self.freq_interval[0]) & \
                   (xspect.freq < self.freq_interval[1])
            lag, lag_err = xspect.time_lag()
            good_lag, good_lag_err = lag[good], lag_err[good]
            coh, coh_err = xspect.coherence()
            lag_spec[i] = np.mean(good_lag)
            coh_check = coh > 1.2 / (1 + 0.2 * xspect.m)
            if not np.all(coh_check[good]):
                simon("Coherence is not ideal over the specified energy range."
                      " Lag values and uncertainties might be underestimated. "
                      "See Epitropakis and Papadakis, A\&A 591, 1113, 2016")

            # Root squared sum of errors of the spectrum
            # Verified!
            lag_spec_err[i] = np.sqrt(np.sum(good_lag_err**2) / len(good_lag))

        return lag_spec, lag_spec_err
Exemplo n.º 3
0
    def test_calc_cpds(self):
        cs_normal = AveragedCrossspectrum(
            self.lc1, self.lc2, segment_size=8192, silent=True, legacy=True
        )
        with pytest.warns(UserWarning) as record:
            cs_large = AveragedCrossspectrum(
                self.lc1, self.lc2, segment_size=8192, large_data=True, silent=True
            )
            assert np.any(['The large_data option and the save_all' in r.message.args[0]
                           for r in record])

        attrs = [
            "freq",
            "power",
            "unnorm_power",
            "df",
            "n",
            "nphots1",
            "nphots2",
            "m",
            "gti",
        ]
        assert cs_normal.freq.size == cs_large.freq.size

        allgood = True
        for attr in attrs:
            if not np.allclose(
                getattr(cs_normal, attr),
                getattr(cs_large, attr),
                rtol=0.1,
                atol=0.1,
            ):
                print(f"Attribute = {attr} ")
                print(
                    f"Raw Array: \nOriginal: {getattr(cs_normal, attr)}, \n"
                    f"Large: {getattr(cs_large, attr)}"
                )
                maxdev = np.amax(
                    getattr(cs_normal, attr) - getattr(cs_large, attr)
                )
                maxdev_percent = np.abs(
                    np.max(getattr(cs_normal, attr) - getattr(cs_large, attr))
                    * 100
                ) / np.max(getattr(cs_normal, attr))
                print(f"Max Deviation: {maxdev}, as %: {maxdev_percent}")
                print("\n")
                allgood = False
        assert allgood
Exemplo n.º 4
0
    def _spectrum_function(self):

        rms_spec = np.zeros(len(self.energy_intervals))
        rms_spec_err = np.zeros_like(rms_spec)
        for i, eint in enumerate(self.energy_intervals):
            base_lc, ref_lc = self._construct_lightcurves(eint,
                                                          exclude=False)
            try:
                xspect = AveragedCrossspectrum(base_lc, ref_lc,
                                               segment_size=self.segment_size,
                                               norm='frac')
            except AssertionError as e:
                # Avoid "Mean count rate is <= 0. Something went wrong" assertion.
                simon("AssertionError: " + str(e))
            else:
                good = (xspect.freq >= self.freq_interval[0]) & \
                       (xspect.freq < self.freq_interval[1])
                rms_spec[i] = np.sqrt(np.sum(xspect.power[good] * xspect.df))

                # Root squared sum of errors of the spectrum
                root_sq_err_sum = \
                    np.sqrt(np.sum((xspect.power_err[good] * xspect.df) ** 2))
                # But the rms is the squared root. So,
                # Error propagation
                rms_spec_err[i] = 1 / (2 * rms_spec[i]) * root_sq_err_sum

        return rms_spec, rms_spec_err
Exemplo n.º 5
0
 def test_calc_cpds_zarr_not_installed(self):
     with pytest.raises(ImportError) as excinfo:
         AveragedCrossspectrum(
             self.lc1, self.lc2, segment_size=8192, large_data=True, silent=True,
             legacy=True
         )
     assert "The large_data option requires zarr" in str(excinfo.value)
Exemplo n.º 6
0
    def test_calc_cpds(self):
        cs_normal = AveragedCrossspectrum(self.lc1,
                                          self.lc2,
                                          segment_size=8192,
                                          silent=True)
        cs_large = AveragedCrossspectrum(self.lc1,
                                         self.lc2,
                                         segment_size=8192,
                                         large_data=True,
                                         silent=True)

        attrs = [
            "freq",
            "power",
            "power_err",
            "unnorm_power",
            "df",
            "n",
            "nphots1",
            "nphots2",
            "m",
            "gti",
        ]
        assert cs_normal.freq.size == cs_large.freq.size

        allgood = True
        for attr in attrs:
            if not np.allclose(
                    getattr(cs_normal, attr),
                    getattr(cs_large, attr),
                    rtol=0.1,
                    atol=0.1,
            ):
                print(f"Attribute = {attr} ")
                print(f"Raw Array: \nOriginal: {getattr(cs_normal, attr)}, \n"
                      f"Large: {getattr(cs_large, attr)}")
                maxdev = np.amax(
                    getattr(cs_normal, attr) - getattr(cs_large, attr))
                maxdev_percent = np.abs(
                    np.max(getattr(cs_normal, attr) - getattr(cs_large, attr))
                    * 100) / np.max(getattr(cs_normal, attr))
                print(f"Max Deviation: {maxdev}, as %: {maxdev_percent}")
                print("\n")
                allgood = False
        assert allgood
Exemplo n.º 7
0
 def test_invalid_data_to_cpds(self):
     with pytest.raises(ValueError) as excinfo:
         AveragedCrossspectrum(
             "sdfasfsa",
             "sdfasfsa",
             segment_size=4096,
             large_data=True,
             silent=True,
         )
     assert "Invalid input data type: str" in str(excinfo.value)
Exemplo n.º 8
0
 def test_invalid_data_to_cpds(self):
     with pytest.raises(ValueError) as excinfo:
         AveragedCrossspectrum(
             [self.lc1, self.lc1],
             [self.lc2, self.lc2],
             segment_size=4096,
             large_data=True,
             silent=True,
         )
     assert "Invalid input data type: list" in str(excinfo.value)
Exemplo n.º 9
0
 def test_events_to_cpds_unimplemented(self):
     """Large memory option not implemented for events (and maybe never will)"""
     with pytest.raises(NotImplementedError) as excinfo:
         ev1 = EventList(np.random.uniform(0, 10, 10))
         AveragedCrossspectrum(
             ev1,
             ev1,
             dt=0.01,
             segment_size=5,
             large_data=True,
             silent=True,
         )
Exemplo n.º 10
0
def load_pds(fname):
    """Load PDS from a file."""
    if get_file_format(fname) == 'pickle':
        data = _load_data_pickle(fname)
    elif get_file_format(fname) == 'nc':
        data = _load_data_nc(fname)

    type_string = data['__sr__class__type__']
    if 'AveragedPowerspectrum' in type_string:
        cpds = AveragedPowerspectrum()
    elif 'Powerspectrum' in type_string:
        cpds = Powerspectrum()
    elif 'AveragedCrossspectrum' in type_string:
        cpds = AveragedCrossspectrum()
    elif 'Crossspectrum' in type_string:
        cpds = Crossspectrum()
    else:
        raise ValueError('Unrecognized data type in file')

    data.pop('__sr__class__type__')
    for key in data.keys():
        setattr(cpds, key, data[key])

    lc1_name = fname.replace(HEN_FILE_EXTENSION,
                             '__lc1__' + HEN_FILE_EXTENSION)
    lc2_name = fname.replace(HEN_FILE_EXTENSION,
                             '__lc2__' + HEN_FILE_EXTENSION)
    pds1_name = fname.replace(HEN_FILE_EXTENSION,
                              '__pds1__' + HEN_FILE_EXTENSION)
    pds2_name = fname.replace(HEN_FILE_EXTENSION,
                              '__pds2__' + HEN_FILE_EXTENSION)
    cs_all_names = glob.glob(
        fname.replace(HEN_FILE_EXTENSION,
                      '__cs__[0-9]__' + HEN_FILE_EXTENSION))

    if os.path.exists(lc1_name):
        cpds.lc1 = load_lcurve(lc1_name)
    if os.path.exists(lc2_name):
        cpds.lc2 = load_lcurve(lc2_name)
    if os.path.exists(pds1_name):
        cpds.pds1 = load_pds(pds1_name)
    if os.path.exists(pds2_name):
        cpds.pds2 = load_pds(pds2_name)
    if len(cs_all_names) > 0:
        cs_all = []
        for c in cs_all_names:
            cs_all.append(load_pds(c))
        cpds.cs_all = cs_all

    return cpds
Exemplo n.º 11
0
    def _spectrum_function(self):

        rms_spec = np.zeros(len(self.energy_intervals))
        rms_spec_err = np.zeros_like(rms_spec)
        for i, eint in enumerate(self.energy_intervals):
            base_lc, ref_lc = self._construct_lightcurves(eint, exclude=False)
            xspect = AveragedCrossspectrum(base_lc,
                                           ref_lc,
                                           segment_size=self.segment_size,
                                           norm='frac')
            good = (xspect.freq >= self.freq_interval[0]) & \
                   (xspect.freq < self.freq_interval[1])
            rms_spec[i] = np.sqrt(np.sum(xspect.power[good] * xspect.df))

            # Root squared sum of errors of the spectrum
            root_sq_err_sum = np.sqrt(np.sum(xspect.power[good]**
                                             2)) * xspect.df
            # But the rms is the squared root. So,
            # Error propagation
            rms_spec_err[i] = 1 / (2 * rms_spec[i]) * root_sq_err_sum

        return rms_spec, rms_spec_err
Exemplo n.º 12
0
 def initial_checks(self, *args, **kwargs):
     return AveragedCrossspectrum.initial_checks(self, *args, **kwargs)
Exemplo n.º 13
0
def calc_cpds(lcfile1,
              lcfile2,
              fftlen,
              save_dyn=False,
              bintime=1,
              pdsrebin=1,
              outname='cpds' + HEN_FILE_EXTENSION,
              normalization='leahy',
              back_ctrate=0.,
              noclobber=False):
    """Calculate the CPDS from a pair of input light curve files.

    Parameters
    ----------
    lcfile1 : str
        The first light curve file
    lcfile2 : str
        The second light curve file
    fftlen : float
        The length of the chunks over which FFTs will be calculated, in seconds

    Other Parameters
    ----------------
    save_dyn : bool
        If True, save the dynamical power spectrum
    bintime : float
        The bin time. If different from that of the light curve, a rebinning is
        performed
    pdsrebin : int
        Rebin the PDS of this factor.
    normalization : str
        'Leahy' or 'rms'. Default 'Leahy'
    back_ctrate : float
        The non-source count rate
    noclobber : bool
        If True, do not overwrite existing files
    outname : str
        Output file name for the cpds. Default: cpds.[nc|p]
    """
    if noclobber and os.path.exists(outname):
        print('File exists, and noclobber option used. Skipping')
        return

    logging.info("Loading file %s..." % lcfile1)
    lc1 = load_lcurve(lcfile1)
    logging.info("Loading file %s..." % lcfile2)
    lc2 = load_lcurve(lcfile2)
    instr1 = lc1.instr
    instr2 = lc2.instr

    assert lc1.dt == lc2.dt, 'Light curves are sampled differently'
    dt = lc1.dt

    if bintime > dt:
        lcrebin = np.rint(bintime / dt)
        logging.info("Rebinning lcs by a factor %d" % lcrebin)
        lc1 = lc1.rebin(lcrebin)
        lc1.instr = instr1
        lc2 = lc2.rebin(lcrebin)
        lc2.instr = instr2

    if lc1.mjdref != lc2.mjdref:
        lc2 = lc2.change_mjdref(lc1.mjdref)

    ctrate = np.sqrt(lc1.meanrate * lc2.meanrate)

    cpds = AveragedCrossspectrum(lc1,
                                 lc2,
                                 segment_size=fftlen,
                                 norm=normalization.lower())

    if pdsrebin is not None and pdsrebin != 1:
        cpds = cpds.rebin(pdsrebin)

    cpds.instrs = instr1 + ',' + instr2
    cpds.fftlen = fftlen
    cpds.back_phots = back_ctrate * fftlen
    cpds.mjdref = lc1.mjdref
    lags, lags_err = cpds.time_lag()
    cpds.lag = lags
    cpds.lag_err = lags

    logging.info('Saving CPDS to %s' % outname)
    save_pds(cpds, outname)
Exemplo n.º 14
0
def load_pds(fname, nosub=False):
    """Load PDS from a file."""
    if get_file_format(fname) == 'pickle':
        data = _load_data_pickle(fname)
    elif get_file_format(fname) == 'nc':
        data = _load_data_nc(fname)

    type_string = data['__sr__class__type__']
    if 'AveragedPowerspectrum' in type_string:
        cpds = AveragedPowerspectrum()
    elif 'Powerspectrum' in type_string:
        cpds = Powerspectrum()
    elif 'AveragedCrossspectrum' in type_string:
        cpds = AveragedCrossspectrum()
    elif 'Crossspectrum' in type_string:
        cpds = Crossspectrum()
    else:
        raise ValueError('Unrecognized data type in file')

    data.pop('__sr__class__type__')
    for key in data.keys():
        setattr(cpds, key, data[key])

    if 'amplitude' in list(data.keys()):
        cpds.amplitude = bool(data["amplitude"])

    outdir = fname.replace(HEN_FILE_EXTENSION, "")
    modelfiles = glob.glob(
        os.path.join(outdir, fname.replace(HEN_FILE_EXTENSION, '__mod*__.p')))
    cpds.best_fits = None
    if len(modelfiles) >= 1:
        bmodels = []
        for mfile in modelfiles:
            if os.path.exists(mfile):
                bmodels.append(load_model(mfile)[0])
        cpds.best_fits = bmodels

    if nosub:
        return cpds

    lc1_name = os.path.join(outdir, '__lc1__' + HEN_FILE_EXTENSION)
    lc2_name = os.path.join(outdir, '__lc2__' + HEN_FILE_EXTENSION)
    pds1_name = os.path.join(outdir, '__pds1__' + HEN_FILE_EXTENSION)
    pds2_name = os.path.join(outdir, '__pds2__' + HEN_FILE_EXTENSION)
    cs_all_names = glob.glob(
        os.path.join(outdir, '__cs__[0-9]__' + HEN_FILE_EXTENSION))

    if os.path.exists(lc1_name):
        cpds.lc1 = load_lcurve(lc1_name)
    if os.path.exists(lc2_name):
        cpds.lc2 = load_lcurve(lc2_name)
    if os.path.exists(pds1_name):
        cpds.pds1 = load_pds(pds1_name)
    if os.path.exists(pds2_name):
        cpds.pds2 = load_pds(pds2_name)
    if len(cs_all_names) > 0:
        cs_all = []
        for c in cs_all_names:
            cs_all.append(load_pds(c))
        cpds.cs_all = cs_all

    return cpds