def test_calc_pds(self): ps_normal = AveragedPowerspectrum( self.lc1, segment_size=8192, silent=True, norm="leahy" ) with pytest.warns(UserWarning) as record: ps_large = AveragedPowerspectrum( self.lc1, segment_size=8192, large_data=True, silent=True, norm="leahy", ) assert np.any(["The large_data option " in r.message.args[0] for r in record]) attrs = [ "freq", "power", "power_err", "unnorm_power", "df", "n", "nphots", "gti", "m", ] allgood = True assert ps_normal.freq.size == ps_large.freq.size for attr in attrs: if not np.allclose( getattr(ps_normal, attr), getattr(ps_large, attr), rtol=0.1, atol=0.1, ): allgood = False print(f"Attribute = {attr} ") print( f"Raw Array: \nOriginal: {getattr(ps_normal, attr)}, " f"\nLarge: {getattr(ps_large, attr)}" ) maxdev = np.amax( getattr(ps_normal, attr) - getattr(ps_large, attr) ) maxdev_percent = np.abs( np.max(getattr(ps_normal, attr) - getattr(ps_large, attr)) * 100 ) / np.max(getattr(ps_normal, attr)) print(f"Max Deviation: {maxdev}, as %: {maxdev_percent}") print("\n") assert allgood
def load_pds(fname): """Load PDS from a file.""" if get_file_format(fname) == 'pickle': data = _load_data_pickle(fname) elif get_file_format(fname) == 'nc': data = _load_data_nc(fname) type_string = data['__sr__class__type__'] if 'AveragedPowerspectrum' in type_string: cpds = AveragedPowerspectrum() elif 'Powerspectrum' in type_string: cpds = Powerspectrum() elif 'AveragedCrossspectrum' in type_string: cpds = AveragedCrossspectrum() elif 'Crossspectrum' in type_string: cpds = Crossspectrum() else: raise ValueError('Unrecognized data type in file') data.pop('__sr__class__type__') for key in data.keys(): setattr(cpds, key, data[key]) lc1_name = fname.replace(HEN_FILE_EXTENSION, '__lc1__' + HEN_FILE_EXTENSION) lc2_name = fname.replace(HEN_FILE_EXTENSION, '__lc2__' + HEN_FILE_EXTENSION) pds1_name = fname.replace(HEN_FILE_EXTENSION, '__pds1__' + HEN_FILE_EXTENSION) pds2_name = fname.replace(HEN_FILE_EXTENSION, '__pds2__' + HEN_FILE_EXTENSION) cs_all_names = glob.glob( fname.replace(HEN_FILE_EXTENSION, '__cs__[0-9]__' + HEN_FILE_EXTENSION)) if os.path.exists(lc1_name): cpds.lc1 = load_lcurve(lc1_name) if os.path.exists(lc2_name): cpds.lc2 = load_lcurve(lc2_name) if os.path.exists(pds1_name): cpds.pds1 = load_pds(pds1_name) if os.path.exists(pds2_name): cpds.pds2 = load_pds(pds2_name) if len(cs_all_names) > 0: cs_all = [] for c in cs_all_names: cs_all.append(load_pds(c)) cpds.cs_all = cs_all return cpds
def test_invalid_data_to_pds(self): with pytest.raises(ValueError) as excinfo: AveragedPowerspectrum("sdfasfsa", segment_size=2048, large_data=True, silent=True) assert "Invalid input data type: str" in str(excinfo.value)
def test_zhang_model_accurate(): bintime = 1 / 4096 deadtime = 2.5e-3 length = 2000 fftlen = 5 r = 300 events, events_dt = simulate_events(r, length, deadtime=deadtime) lc_dt = Lightcurve.make_lightcurve(events_dt, bintime, tstart=0, tseg=length) pds = AveragedPowerspectrum(lc_dt, fftlen, norm='leahy') zh_f, zh_p = pds_model_zhang(1000, r, deadtime, bintime, limit_k=100) deadtime_fun = interp1d(zh_f, zh_p, bounds_error=False, fill_value="extrapolate") ratio = pds.power / deadtime_fun(pds.freq) assert np.isclose(np.mean(ratio), 1, atol=0.001) assert np.isclose(np.std(ratio), 1 / np.sqrt(pds.m), atol=0.001)
def calc_pds(lcfile, fftlen, save_dyn=False, bintime=1, pdsrebin=1, normalization='leahy', back_ctrate=0., noclobber=False, outname=None): """Calculate the PDS from an input light curve file. Parameters ---------- lcfile : str The light curve file fftlen : float The length of the chunks over which FFTs will be calculated, in seconds Other Parameters ---------------- save_dyn : bool If True, save the dynamical power spectrum bintime : float The bin time. If different from that of the light curve, a rebinning is performed pdsrebin : int Rebin the PDS of this factor. normalization : str 'Leahy' or 'rms' back_ctrate : float The non-source count rate noclobber : bool If True, do not overwrite existing files outname : str If speficied, output file name. If not specified or None, the new file will have the same root as the input light curve and the '_pds' suffix """ root = hen_root(lcfile) outname = root + '_pds' + HEN_FILE_EXTENSION if noclobber and os.path.exists(outname): print('File exists, and noclobber option used. Skipping') return logging.info("Loading file %s..." % lcfile) lc = load_lcurve(lcfile) instr = lc.instr if bintime > lc.dt: lcrebin = np.rint(bintime / lc.dt) logging.info("Rebinning lcs by a factor %d" % lcrebin) lc = lc.rebin(lcrebin) lc.instr = instr pds = AveragedPowerspectrum(lc, segment_size=fftlen, norm=normalization.lower()) if pdsrebin is not None and pdsrebin != 1: pds = pds.rebin(pdsrebin) pds.instr = instr pds.fftlen = fftlen pds.back_phots = back_ctrate * fftlen pds.mjdref = lc.mjdref logging.info('Saving PDS to %s' % outname) save_pds(pds, outname)
def load_pds(fname, nosub=False): """Load PDS from a file.""" if get_file_format(fname) == 'pickle': data = _load_data_pickle(fname) elif get_file_format(fname) == 'nc': data = _load_data_nc(fname) type_string = data['__sr__class__type__'] if 'AveragedPowerspectrum' in type_string: cpds = AveragedPowerspectrum() elif 'Powerspectrum' in type_string: cpds = Powerspectrum() elif 'AveragedCrossspectrum' in type_string: cpds = AveragedCrossspectrum() elif 'Crossspectrum' in type_string: cpds = Crossspectrum() else: raise ValueError('Unrecognized data type in file') data.pop('__sr__class__type__') for key in data.keys(): setattr(cpds, key, data[key]) if 'amplitude' in list(data.keys()): cpds.amplitude = bool(data["amplitude"]) outdir = fname.replace(HEN_FILE_EXTENSION, "") modelfiles = glob.glob( os.path.join(outdir, fname.replace(HEN_FILE_EXTENSION, '__mod*__.p'))) cpds.best_fits = None if len(modelfiles) >= 1: bmodels = [] for mfile in modelfiles: if os.path.exists(mfile): bmodels.append(load_model(mfile)[0]) cpds.best_fits = bmodels if nosub: return cpds lc1_name = os.path.join(outdir, '__lc1__' + HEN_FILE_EXTENSION) lc2_name = os.path.join(outdir, '__lc2__' + HEN_FILE_EXTENSION) pds1_name = os.path.join(outdir, '__pds1__' + HEN_FILE_EXTENSION) pds2_name = os.path.join(outdir, '__pds2__' + HEN_FILE_EXTENSION) cs_all_names = glob.glob( os.path.join(outdir, '__cs__[0-9]__' + HEN_FILE_EXTENSION)) if os.path.exists(lc1_name): cpds.lc1 = load_lcurve(lc1_name) if os.path.exists(lc2_name): cpds.lc2 = load_lcurve(lc2_name) if os.path.exists(pds1_name): cpds.pds1 = load_pds(pds1_name) if os.path.exists(pds2_name): cpds.pds2 = load_pds(pds2_name) if len(cs_all_names) > 0: cs_all = [] for c in cs_all_names: cs_all.append(load_pds(c)) cpds.cs_all = cs_all return cpds
def test_calc_pds_zarr_not_installed(self): with pytest.raises(ImportError) as excinfo: AveragedPowerspectrum( self.lc1, segment_size=8192, large_data=True, silent=True ) assert "The large_data option requires zarr" in str(excinfo.value)