Пример #1
0
 def test_make_empty_periodogram(self):
     ps = Powerspectrum()
     assert ps.norm == "frac"
     assert ps.freq is None
     assert ps.power is None
     assert ps.power_err is None
     assert ps.df is None
     assert ps.m == 1
     assert ps.n is None
Пример #2
0
 def test_make_periodogram_from_lightcurve(self):
     ps = Powerspectrum(lc=self.lc)
     assert ps.freq is not None
     assert ps.ps is not None
     assert ps.df == 1.0 / self.lc.tseg
     assert ps.norm == "rms"
     assert ps.m == 1
     assert ps.n == self.lc.time.shape[0]
     assert ps.nphots == np.sum(self.lc.counts)
Пример #3
0
 def test_rebin_uses_mean(self):
     """
     Make sure the rebin-method uses "mean" to average instead of summing
     powers by default, and that this is not changed in the future!
     Note: function defaults come as a tuple, so the first keyword argument
     had better be 'method'
     """
     ps = Powerspectrum(self.lc, norm="Leahy")
     assert ps.rebin.__defaults__[2] == "mean"
Пример #4
0
    def test_leahy_norm_correct(self):
        time = np.linspace(0, 10.0, 1e6)
        counts = np.random.poisson(1000, size=time.shape[0])

        lc = Lightcurve(time, counts)
        ps = Powerspectrum(lc, norm="leahy")
        print(np.mean(ps.power[1:]))

        assert np.isclose(np.mean(ps.power[1:]), 2.0, atol=0.01, rtol=0.01)
Пример #5
0
 def test_classical_significances_trial_correction(self):
     ps = Powerspectrum(lc=self.lc, norm="leahy")
     # change the powers so that just one exceeds the threshold
     ps.power = np.zeros_like(ps.power) + 2.0
     index = 1
     ps.power[index] = 10.0
     threshold = 0.01
     pval = ps.classical_significances(threshold=threshold,
                                       trial_correction=True)
     assert np.size(pval) == 0
Пример #6
0
 def test_rms_normalization_correct(self):
     """
     In rms normalization, the integral of the powers should be
     equal to the variance of the light curve divided by the mean
     of the light curve squared.
     """
     ps = Powerspectrum(lc=self.lc, norm="rms")
     ps_int = np.sum(ps.ps[:-1]*ps.df) + ps.ps[-1]*ps.df/2.
     std_lc = np.var(self.lc.counts)/np.mean(self.lc.counts)**2.
     assert np.isclose(ps_int, std_lc, atol=0.01, rtol=0.01)
Пример #7
0
 def test_rebin(self, df):
     """
     TODO: Not sure how to write tests for the rebin method!
     """
     ps = Powerspectrum(lc=self.lc, norm="Leahy")
     bin_ps = ps.rebin(df)
     assert np.isclose(bin_ps.freq[1] - bin_ps.freq[0], bin_ps.df,
                       atol=1e-4, rtol=1e-4)
     assert np.isclose(bin_ps.freq[0], (ps.freq[0] - ps.df * 0.5 + bin_ps.df * 0.5),
                       atol=1e-4, rtol=1e-4)
Пример #8
0
 def test_make_periodogram_from_lightcurve(self, legacy):
     ps = Powerspectrum(self.lc, legacy=legacy)
     assert ps.freq is not None
     assert ps.power is not None
     assert ps.power_err is not None
     assert np.isclose(ps.df, 1.0 / self.lc.tseg)
     assert ps.norm == "frac"
     assert ps.m == 1
     assert ps.n == self.lc.time.shape[0]
     assert ps.nphots == np.sum(self.lc.counts)
Пример #9
0
    def test_calibrate_lrt_works_with_sampling(self):
        m = 1
        nfreq = 100
        freq = np.linspace(1, 10, nfreq)
        rng = np.random.RandomState(100)
        noise = rng.exponential(size=nfreq)
        model = models.Const1D()
        model.amplitude = 2.0
        p = model(freq)
        power = noise * p

        ps = Powerspectrum()
        ps.freq = freq
        ps.power = power
        ps.m = m
        ps.df = freq[1] - freq[0]
        ps.norm = "leahy"

        lpost = PSDPosterior(ps.freq, ps.power, model, m=1)

        p_amplitude_1 = lambda amplitude: \
            scipy.stats.norm(loc=2.0, scale=1.0).pdf(amplitude)

        p_alpha_0 = lambda alpha: \
            scipy.stats.uniform(0.0, 5.0).pdf(alpha)

        p_amplitude_0 = lambda amplitude: \
            scipy.stats.norm(loc=self.a2_mean, scale=self.a2_var).pdf(
                amplitude)


        priors = {"amplitude": p_amplitude_1}

        priors2 = {"amplitude_1": p_amplitude_1,
                      "amplitude_0": p_amplitude_0,
                      "alpha_0": p_alpha_0}


        lpost.logprior = set_logprior(lpost, priors)

        model2 = models.PowerLaw1D() + models.Const1D()
        model2.x_0_0.fixed = True
        lpost2 = PSDPosterior(ps.freq, ps.power, model2, 1)
        lpost2.logprior = set_logprior(lpost2, priors2)

        pe = PSDParEst(ps)

        with catch_warnings(RuntimeWarning):
            pval = pe.calibrate_lrt(lpost, [2.0], lpost2,
                                    [2.0, 1.0, 2.0], sample=None,
                                    max_post=True, nsim=10, nwalkers=10,
                                    burnin=10, niter=10,
                                    seed=100)

        assert pval > 0.001
Пример #10
0
        def setup_class(cls):
            m = 1
            nfreq = 100
            freq = np.arange(nfreq)
            noise = np.random.exponential(size=nfreq)
            power = noise * 2.0

            ps = Powerspectrum()
            ps.freq = freq
            ps.power = power
            ps.m = m
            ps.df = freq[1] - freq[0]
            ps.norm = "leahy"

            cls.ps = ps
            cls.a_mean, cls.a_var = 2.0, 1.0

            cls.model = models.Const1D()

            p_amplitude = lambda amplitude: \
                scipy.stats.norm(loc=cls.a_mean, scale=cls.a_var).pdf(
                    amplitude)

            cls.priors = {"amplitude": p_amplitude}
            cls.lpost = PSDPosterior(cls.ps.freq,
                                     cls.ps.power,
                                     cls.model,
                                     m=cls.ps.m)
            cls.lpost.logprior = set_logprior(cls.lpost, cls.priors)

            cls.fitmethod = "BFGS"
            cls.max_post = True
            cls.t0 = [2.0]
            cls.neg = True

            pe = ParameterEstimation()
            res = pe.fit(cls.lpost, cls.t0)

            cls.nwalkers = 50
            cls.niter = 100

            np.random.seed(200)
            p0 = np.array([
                np.random.multivariate_normal(res.p_opt, res.cov)
                for i in range(cls.nwalkers)
            ])

            cls.sampler = emcee.EnsembleSampler(cls.nwalkers,
                                                len(res.p_opt),
                                                cls.lpost,
                                                args=[False],
                                                threads=1)

            with catch_warnings(RuntimeWarning):
                _, _, _ = cls.sampler.run_mcmc(p0, cls.niter)
Пример #11
0
    def test_leahy_norm_total_variance(self):
        """
        In Leahy normalization, the total variance should be the sum of
        powers multiplied by the number of counts and divided by the
        square of the number of data points in the light curve
        """
        ps = Powerspectrum(lc=self.lc, norm="Leahy")
        ps_var = (np.sum(self.lc.counts)/ps.n**2.) * \
            (np.sum(ps.power[:-1]) + ps.power[-1]/2.)

        assert np.isclose(ps_var, np.var(self.lc.counts), atol=0.01)
Пример #12
0
    def test_leahy_norm_Poisson_noise(self):
        """
        In Leahy normalization, the poisson noise level (so, in the absence of
        a signal, the average power) should be equal to 2.
        """
        time = np.linspace(0, 10.0, 1e5)
        counts = np.random.poisson(1000, size=time.shape[0])

        lc = Lightcurve(time, counts)
        ps = Powerspectrum(lc, norm="leahy")

        assert np.isclose(np.mean(ps.power[1:]), 2.0, atol=0.01, rtol=0.01)
Пример #13
0
    def test_fractional_rms_in_leahy_norm(self):
        """
        fractional rms should only be *approximately* equal the standard
        deviation divided by the mean of the light curve. Therefore, we allow
        for a larger tolerance in np.isclose()
        """
        ps = Powerspectrum(lc=self.lc, norm="Leahy")
        rms_ps, rms_err = ps.compute_rms(min_freq=ps.freq[0],
                                         max_freq=ps.freq[-1])

        rms_lc = np.std(self.lc.counts) / np.mean(self.lc.counts)
        assert np.isclose(rms_ps, rms_lc, atol=0.01)
Пример #14
0
    def test_load_and_save_pds(self):
        pds = Powerspectrum()
        pds.freq = np.linspace(0, 10, 15)
        pds.power = np.random.poisson(30, 15)
        pds.mjdref = 54385.3254923845
        pds.gti = np.longdouble([[-0.5, 3.5]])

        save_to_intermediate_file(pds, self.dum)
        pds2 = load_from_intermediate_file(self.dum)
        assert np.allclose(pds.gti, pds2.gti)
        assert np.allclose(pds.mjdref, pds2.mjdref)
        assert np.allclose(pds.gti, pds2.gti)
        assert pds.m == pds2.m
Пример #15
0
def get_lc_onesource(k,src_index,num_trials=2):
    k_trial =0
    FP = [];
    period = [];
    cts_num=[];
    peakP=[];
    power_P=[]
    path = '/Users/baotong/Desktop/CDFS/txt_all_obs_0.5_8_ep{0}/'.format(k)
    epoch_file = np.loadtxt(path + 'CDFS_epoch_ep{0}.txt'.format(k))
    tstart = epoch_file[:, 0];tstop = epoch_file[:, 1]
    ID=epoch_file[:,2];exptime = epoch_file[:, 3]
    evt_file = np.loadtxt(path + '{0}.txt'.format(src_index))
    bkgevt_file = np.loadtxt(path + '{0}_bkg.txt'.format(src_index))
    for i in range(len(ID)):
        index = len(np.where(evt_file[:,2] == ID[i])[0])
        index_b=len(np.where(bkgevt_file[:,2] == ID[i])[0])
        cts_num.append(index-index_b/12.)
    dt = 100
    T_exp = 11000154.981141508
    freq=np.arange(1/T_exp,0.5/dt,1/(5*T_exp))
    freq=freq[np.where(freq > 1 / 20000.)]
    if os.path.exists(path+'/simulation/{0}_LS_simP.csv'.format(src_index)):
        print('caution! file exists')
        return None
    with open(path + '/simulation/{0}_LS_simP.csv'.format(src_index), 'a+') as csvfile:
        header = freq
        header = header.astype('str')
        writer = csv.writer(csvfile)
        while k_trial <num_trials:
            ev_all = EventList()
            for i in range(len(exptime)):
                cts_rate = cts_num[i]/(2*exptime[i]) * dt  # 实际的cts-rate应为这个的2倍
                num_bins = int(exptime[i] / dt)
                sim = simulator.Simulator(N=num_bins, mean=cts_rate, dt=dt)
                w = np.arange(1 / exptime[i], 0.5 / dt, 1 / exptime[i])
                spectrum = bending_po(w, [2.3e-3, 3.4, 0.40, 4.3e-4])
                # spectrum = bending_po(w, [2.3e-3, 3.4, 0.40, 4.3e-4]) + generalized_lorentzian(w, [1.0518215e-3,1.0518215e-3/16,200,2])
                lc = sim.simulate(spectrum)
                # lc.counts += cts_rate
                lc.counts[np.where(lc.counts<0)]=0
                ps = Powerspectrum(lc, norm='abs')
                ev = EventList()
                ev.time = sim_evtlist(lc) + tstart[i]
                ev_all = ev_all.join(ev)
            # print(len(ev_all.time))
            lc_new = ev_all.to_lc(dt=dt, tstart=ev_all.time[0]-0.5*dt, tseg=ev_all.time[-1]-ev_all.time[0])
            # T_exp=lc_new.time[-1]-lc_new.time[0]
            temp=get_LS(lc_new.time, lc_new.counts, freq=freq,trial=k_trial)
            writer.writerows([temp[-1]])
            k_trial+=1
Пример #16
0
    def test_rebin_makes_right_attributes(self):
        ps = Powerspectrum(lc = self.lc, norm="Leahy")
        ## replace powers
        ps.ps = np.ones_like(ps.ps)*2.0
        rebin_factor = 2.0
        bin_ps = ps.rebin(rebin_factor*ps.df)

        assert bin_ps.freq is not None
        assert bin_ps.ps is not None
        assert bin_ps.df == rebin_factor*1.0/self.lc.tseg
        assert bin_ps.norm.lower() == "leahy"
        assert bin_ps.m == 2
        assert bin_ps.n == self.lc.time.shape[0]
        assert bin_ps.nphots == np.sum(self.lc.counts)
Пример #17
0
    def test_total_variance(self):
        """
        the integral of powers (or Riemann sum) should be close
        to the variance divided by twice the length of the light curve.

        Note: make sure the factors of ncounts match!
        Also, make sure to *exclude* the zeroth power!
        """
        ps = Powerspectrum(lc=self.lc)
        nn = ps.n
        pp = ps.unnorm_power / np.float(nn)**2
        p_int = np.sum(pp[:-1] * ps.df) + (pp[-1] * ps.df) / 2
        var_lc = np.var(self.lc.counts) / (2. * self.lc.tseg)
        assert np.isclose(p_int, var_lc, atol=0.01, rtol=0.01)
Пример #18
0
def plot_psd(lc):
    ps = Powerspectrum(lc, norm='frac')
    fig, ax1 = plt.subplots(1, 1, figsize=(9, 6), sharex=True)
    ax1.loglog()
    ax1.step(ps.freq, ps.power, lw=2, color='blue')
    ax1.set_xlabel("Frequency (Hz)", fontproperties=font1)
    ax1.set_ylabel("Power ", fontproperties=font1)
    ax1.tick_params(axis='x', labelsize=16)
    ax1.tick_params(axis='y', labelsize=16)
    ax1.tick_params(which='major', width=1.5, length=7)
    ax1.tick_params(which='minor', width=1.5, length=4)
    for axis in ['top', 'bottom', 'left', 'right']:
        ax1.spines[axis].set_linewidth(1.5)
    plt.show()
    return ps
Пример #19
0
    def test_classical_significances_threshold(self):
        ps = Powerspectrum(lc = self.lc, norm="leahy")

        ## change the powers so that just one exceeds the threshold
        ps.ps = np.zeros(ps.ps.shape[0])+2.0

        index = 1
        ps.ps[index] = 10.0

        threshold = 0.01

        pval = ps.classical_significances(threshold=threshold,
                                          trial_correction=False)
        assert pval[0,0] < threshold
        assert pval[1,0] == index
Пример #20
0
    def test_multitaper_lombscargle_consistency(self, norm):

        mtp = Multitaper(self.lc, adaptive=False, norm=norm)
        mtp_ls = Multitaper(self.lc, lombscargle=True, adaptive=False, norm=norm)

        # Check if 99% of the points in the PSDs are within the set tolerance
        assert np.sum(np.isclose(mtp.power, mtp_ls.power,
                      atol=0.022*np.max(mtp_ls.power))) >= 0.99*mtp_ls.power.size

        # Check if the freq vals are the same
        ps = Powerspectrum(self.lc, norm=norm)

        assert np.allclose(mtp.freq, mtp_ls.freq)
        assert np.allclose(mtp.freq, ps.freq)
        assert mtp.power.shape == ps.power.shape
Пример #21
0
    def test_pvals_is_numpy_array(self):
        ps = Powerspectrum(lc=self.lc, norm="leahy")
        # change the powers so that just one exceeds the threshold
        ps.power = np.zeros_like(ps.power) + 2.0

        index = 1
        ps.power[index] = 10.0

        threshold = 1.0

        pval = ps.classical_significances(threshold=threshold,
                                          trial_correction=True)

        assert isinstance(pval, np.ndarray)
        assert pval.shape[0] == 2
Пример #22
0
    def test_fitting_with_ties_and_bounds(self, capsys):
        double_f = lambda model: model.x_0_0 * 2
        model = self.model.copy()
        model = self.model + models.Lorentz1D(amplitude=model.amplitude_0,
                                              x_0=model.x_0_0 * 2,
                                              fwhm=model.fwhm_0)
        model.x_0_0 = self.model.x_0_0
        model.amplitude_0 = self.model.amplitude_0
        model.amplitude_1 = self.model.amplitude_1
        model.fwhm_0 = self.model.fwhm_0
        model.x_0_2.tied = double_f
        model.fwhm_0.bounds = [0, 10]
        model.amplitude_0.fixed = True

        p = model(self.ps.freq)

        noise = np.random.exponential(size=len(p))
        power = noise * p

        ps = Powerspectrum()
        ps.freq = self.ps.freq
        ps.power = power
        ps.m = self.ps.m
        ps.df = self.ps.df
        ps.norm = "leahy"

        pe = PSDParEst(ps)
        llike = PSDLogLikelihood(ps.freq, ps.power, model)

        true_pars = [
            self.amplitude_0, self.x_0_0, self.fwhm_0, self.amplitude_1,
            model.amplitude_2.value, model.x_0_2.value, model.fwhm_2.value
        ]
        res = pe.fit(llike, true_pars)

        res.print_summary(llike)
        out, err = capsys.readouterr()
        assert "100.00000            (Fixed)" in out
        pattern = \
            re.compile(r"5\) Parameter x_0_2\s+: [0-9]\.[0-9]{5}\s+\(Tied\)")
        assert pattern.search(out)

        compare_pars = [
            self.x_0_0, self.fwhm_0, self.amplitude_1, model.amplitude_2.value,
            model.fwhm_2.value
        ]

        assert np.all(np.isclose(compare_pars, res.p_opt, rtol=0.5))
Пример #23
0
def plot_pds(time, flux):
    ## 画光变曲线,即光子流量随时间的变化 ##
    lc = Lightcurve(time, flux)
    fig, ax = plt.subplots(1, 1, figsize=(10, 6))
    ax.plot(lc.time, lc.counts, lw=2, color='blue')
    ax.set_xlabel("Time (s)", fontproperties=font1)
    ax.set_ylabel("Counts (cts)/bin", fontproperties=font1)
    ax.tick_params(axis='x', labelsize=16)
    ax.tick_params(axis='y', labelsize=16)
    ax.tick_params(which='major', width=1.5, length=7)
    ax.tick_params(which='minor', width=1.5, length=4)
    plt.show()

    ## 画功率谱图 ##
    ps = Powerspectrum(lc, norm='leahy')
    fig, ax1 = plt.subplots(1, 1, figsize=(9, 6), sharex=True)
    ax1.loglog()
    ax1.step(ps.freq, ps.power, lw=2, color='blue')
    ax1.set_ylabel("Frequency (Hz)", fontproperties=font1)
    ax1.set_ylabel("Power ", fontproperties=font1)
    ax1.set_yscale('log')
    ax1.tick_params(axis='x', labelsize=16)
    ax1.tick_params(axis='y', labelsize=16)
    ax1.tick_params(which='major', width=1.5, length=7)
    ax1.tick_params(which='minor', width=1.5, length=4)
    for axis in ['top', 'bottom', 'left', 'right']:
        ax1.spines[axis].set_linewidth(1.5)
    plt.show()

    ## 画average的功率谱,对于绝大多数的源不需要做这张图,除非所给的参考文献中有做average power density spectrum ###
    avg_ps = AveragedPowerspectrum(lc,
                                   500,
                                   dt=lc.time[1] - lc.time[0],
                                   norm='leahy')
    print("Number of segments: %d" % avg_ps.m)
    fig, ax1 = plt.subplots(1, 1, figsize=(9, 6))
    ax1.loglog()
    ax1.step(avg_ps.freq, avg_ps.power, lw=2, color='blue')
    ax1.set_xlabel("Frequency (Hz)", fontproperties=font1)
    ax1.set_ylabel("Power ", fontproperties=font1)
    ax1.set_yscale('log')
    ax1.tick_params(axis='x', labelsize=16)
    ax1.tick_params(axis='y', labelsize=16)
    ax1.tick_params(which='major', width=1.5, length=7)
    ax1.tick_params(which='minor', width=1.5, length=4)
    for axis in ['top', 'bottom', 'left', 'right']:
        ax1.spines[axis].set_linewidth(1.5)
    plt.show()
Пример #24
0
    def test_plotfits_log_pow(self):
        ps = Powerspectrum()
        ps.freq = self.ps.freq
        ps.power = self.ps.power
        ps.m = self.ps.m
        ps.df = self.ps.df
        ps.norm = "none"
        pe = PSDParEst(ps)

        t0 = [2.0, 1, 1, 1]
        res = pe.fit(self.lpost, t0)

        pe.plotfits(res, res2=res, save_plot=True, log=True)

        assert os.path.exists("test_ps_fit.png")
        os.unlink("test_ps_fit.png")
Пример #25
0
    def test_rebin_makes_right_attributes(self, legacy):
        ps = Powerspectrum(self.lc, norm="Leahy", legacy=legacy)
        # replace powers
        ps.power = np.ones_like(ps.power) * 2.0

        rebin_factor = 2
        bin_ps = ps.rebin(rebin_factor*ps.df)

        assert bin_ps.freq is not None
        assert bin_ps.power is not None
        assert bin_ps.power is not None
        assert np.isclose(bin_ps.df, rebin_factor * 1.0 / self.lc.tseg)
        assert bin_ps.norm.lower() == "leahy"
        assert bin_ps.m == 2
        assert bin_ps.n == self.lc.time.shape[0]
        assert bin_ps.nphots == np.sum(self.lc.counts)
Пример #26
0
    def test_abs_norm_Poisson_noise(self):
        """
        Poisson noise level for a light curve with absolute rms-squared
        normalization should be approximately 2 * the mean count rate of the
        light curve.
        """
        np.random.seed(101)

        time = np.linspace(0, 1., 1e4)
        counts = np.random.poisson(0.01, size=time.shape[0])

        lc = Lightcurve(time, counts)
        ps = Powerspectrum(lc, norm="abs")
        abs_noise = 2. * 100  # expected Poisson noise level;
        # hardcoded value from above
        assert np.isclose(np.mean(ps.power[1:]), abs_noise, atol=50)
Пример #27
0
    def setup_class(cls):
        tstart = 0.0
        tend = 10.0
        cls.dt = 0.0001
        cls.segment_size = tend - tstart

        times = np.sort(np.random.uniform(tstart, tend, 1000))
        gti = np.array([[tstart, tend]])

        cls.events = EventList(times, gti=gti)

        cls.lc = cls.events
        cls.leahy_pds = AveragedPowerspectrum(
            cls.lc, segment_size=cls.segment_size, dt=cls.dt, norm="leahy", silent=True)

        cls.leahy_pds_sng = Powerspectrum(
            cls.lc, dt=cls.dt, norm="leahy")
Пример #28
0
    def setup_class(cls):
        np.random.seed(1000)
        m = 1
        nfreq = 100
        freq = np.arange(nfreq)
        noise = np.random.exponential(size=nfreq)
        power = noise * 2.0

        ps = Powerspectrum()
        ps.freq = freq
        ps.power = power
        ps.m = m
        ps.n = freq.shape[0]
        ps.df = freq[1] - freq[0]
        ps.norm = "leahy"

        cls.ps = ps
        cls.a_mean, cls.a_var = 2.0, 1.0

        cls.model = models.Const1D()

        p_amplitude = lambda amplitude: \
            scipy.stats.norm(loc=cls.a_mean, scale=cls.a_var).pdf(amplitude)

        cls.priors = {"amplitude": p_amplitude}
        cls.lpost = PSDPosterior(cls.ps.freq,
                                 cls.ps.power,
                                 cls.model,
                                 m=cls.ps.m)
        cls.lpost.logprior = set_logprior(cls.lpost, cls.priors)

        cls.fitmethod = "powell"
        cls.max_post = True
        cls.t0 = np.array([2.0])
        cls.neg = True

        cls.opt = scipy.optimize.minimize(cls.lpost,
                                          cls.t0,
                                          method=cls.fitmethod,
                                          args=cls.neg,
                                          tol=1.e-10)

        cls.opt.x = np.atleast_1d(cls.opt.x)
        cls.optres = OptimizationResultsSubclassDummy(cls.lpost,
                                                      cls.opt,
                                                      neg=True)
Пример #29
0
    def test_plotfits_pow(self):
        t0 = [2.0, 1, 1, 1]
        ps = Powerspectrum()
        ps.freq = self.ps.freq
        ps.power = self.ps.power
        ps.m = self.ps.m
        ps.df = self.ps.df
        ps.norm = "none"
        pe = PSDParEst(ps)
        lpost = PSDPosterior(self.ps, self.model, self.priors)

        res = pe.fit(self.lpost, t0)

        pe.plotfits(res, res2=res, save_plot=True)

        assert os.path.exists("test_ps_fit.png")
        os.unlink("test_ps_fit.png")
Пример #30
0
def get_lc_byspec_1hr(k,j,num_trials=1000):
    k_trial=0
    FP = [];
    period = [];
    while k_trial <num_trials:
        path = '/Users/baotong/Desktop/CDFS/txt_all_obs_0.5_8_ep{0}/'.format(k)
        epoch_file=np.loadtxt(path+'CDFS_epoch_ep{0}.txt'.format(k))
        # path = '/Users/baotong/Desktop/CDFS/txt_all_obs_0.5_8/'
        # epoch_file = np.loadtxt(path + 'CDFS_epoch.txt')
        tstart=epoch_file[:,0];tstop=epoch_file[:,1];exptime=epoch_file[:,3]
        ev_all = EventList()
        for i in range(len(exptime)):
            dt=100
            cts_rate=cr[j] *dt  #实际的cts-rate应为这个的2倍
            num_bins=int(exptime[i]/dt)
            sim = simulator.Simulator(N=num_bins, mean=cts_rate, dt=dt)
            w = np.arange(1 / exptime[i], 0.5 / dt, 1 / exptime[i])
            # w = np.fft.rfftfreq(sim.N, d=sim.dt)[1:]
            # spectrum = smoothbknpo(w, [0.01, 2, 1e-2, 1e-3])
            # spectrum = bending_po(w, [2.3e-3, 3.4, 0.40, 4.3e-4]) ## 这是RE J1034+396 的参数
            # spectrum = bending_po(w, [2.3e-3, 3.4, 0.40, 4.3e-4]) + generalized_lorentzian(w, [6.68e-4 ,6.68e-4 /16,200,2])
            spectrum = bending_po(w, [2.3e-3, 3.4, 0., 4.3e-4]) + generalized_lorentzian(w,[5.6e-4, 5.6e-4 / 16, 200,2])
            # spectrum =powerlaw + generalized_lorentzian(w, [1 / 1000., 1 / 10000., 0.5*np.max(powerlaw), 2])
            lc = sim.simulate(spectrum)
            lc.counts += cts_rate
            lc.counts[np.where(lc.counts<0)]=0
            ps=Powerspectrum(lc,norm='abs')
            ev = EventList()
            # ev.simulate_times(use_spline=False,lc=lc,bin_time=dt)
            # ev.time+=tstart[i]
            ev.time=sim_evtlist(lc)+tstart[i]
            # print(ev.time)RX J1301.9+2747
            ev_all=ev_all.join(ev)
        print('cts={0}'.format(len(ev_all.time)))
        lc_new = ev_all.to_lc(dt=dt, tstart=ev_all.time[0]-0.5*dt, tseg=ev_all.time[-1]-ev_all.time[0])
        T_exp=lc_new.time[-1]-lc_new.time[0]
        freq = np.arange(1 / T_exp, 0.5 / dt, 1 / (5 * T_exp))
        freq=freq[np.where(freq > 1 / 20000.)]
        # print(len(freq))
        temp=get_LS(lc_new.time, lc_new.counts, freq=freq)
        FP.append(temp[0]);period.append(temp[1])
        k_trial+=1
    result=np.column_stack((FP,period))
    np.savetxt(path+'simulation/'+'trial_out_1hr_{0}_REJ1034+396_test_noC.txt'.format(cr_str[j]),result,fmt="%10.5f %10.5f")
    return ev_all