예제 #1
0
def theoretical_spectrum_func(channel, scale=1.0):
    """

    Parameters
    ----------
    channel : str
        channel in {A, E, T}
    scale : float
        scale factor applied to the data, such that
        data_rescaled = data * scale

    Returns
    -------
    psd_fn : callable
        PSD function in the requested channel
        [rescaled Fractional Frequency / Hz]

    """

    if channel == 'a_mat':
        psd_fn = lambda x: tdi.noisepsd_AE(x, model='SciRDv1') * scale ** 2
    elif channel == 'E':
        psd_fn = lambda x: tdi.noisepsd_AE(x, model='SciRDv1') * scale ** 2
    elif channel == 'T':
        psd_fn = lambda x: tdi.noisepsd_T(x, model='SciRDv1') * scale ** 2

    return psd_fn
예제 #2
0
    def psd_fn(self, x):

        if self.channel == 'A':
            return tdi.noisepsd_AE(x, model='SciRDv1') * self.scale ** 2
        elif self.channel == 'E':
            return tdi.noisepsd_AE(x, model='SciRDv1') * self.scale ** 2
        elif self.channel == 'T':
            return tdi.noisepsd_T(x, model='SciRDv1') * self.scale ** 2
예제 #3
0
def generate_noise_AE(freq):

    psd_j = tdi.noisepsd_AE(freq[1:])
    # Gererate ramdom realisation of the noise
    n_real = np.random.normal(loc=0.0, scale=np.sqrt(psd_j / 2))
    n_imag = np.random.normal(loc=0.0, scale=np.sqrt(psd_j / 2))
    n_real = np.insert(n_real, 0, 0., axis=0)
    n_imag = np.insert(n_imag, 0, 0., axis=0)

    return n_real + 1j * n_imag
예제 #4
0
    def test_waveform_freq(self,
                           config_file="../configs/config_ldc.ini",
                           plot=True):

        config = configparser.ConfigParser()
        config.read(config_file)
        fftwisdom.load_wisdom()

        # Unpacking the hdf5 file and getting data and source parameters
        p, td = loadings.load_ldc_data(config["InputData"]["FilePath"])

        # Pre-processing data: anti-aliasing and filtering
        tm, xd, yd, zd, q, t_offset, tobs, del_t, p_sampl = preprocess.preprocess_ldc_data(
            p, td, config)

        # Frequencies
        freq_d = np.fft.fftfreq(len(tm), del_t * q)
        # Restrict the frequency band to high SNR region
        inds = np.where(
            (float(config['Model']['MinimumFrequency']) <= freq_d)
            & (freq_d <= float(config['Model']['MaximumFrequency'])))[0]
        # Theoretical PSD
        sa = tdi.noisepsd_AE(freq_d[inds], model='Proposal', includewd=None)

        # Convert Michelson TDI to A, E, T (time domain)
        ad, ed, td = ldctools.convert_XYZ_to_AET(xd, yd, zd)

        # Transform to frequency domain
        wd = np.ones(ad.shape[0])
        wd_full = wd[:]
        mask = wd[:]
        a_df, e_df, t_df = preprocess.time_to_frequency(ad,
                                                        ed,
                                                        td,
                                                        wd,
                                                        del_t,
                                                        q,
                                                        compensate_window=True)

        # Instantiate likelihood class
        ll_cls = likelihoodmodel.LogLike(
            [mask * ad, mask * ed],
            sa,
            inds,
            tobs,
            del_t * q,
            normalized=config['Model'].getboolean('normalized'),
            t_offset=t_offset,
            channels=[1, 2],
            scale=config["InputData"].getfloat("rescale"),
            model_cls=None,
            psd_cls=None,
            wd=wd,
            wd_full=wd_full)

        t1 = time.time()
        if config['Model'].getboolean('reduced'):
            i_sampl_intr = [0, 1, 2, 3, 4, 7, 8]
            aft, eft = ll_cls.compute_signal_reduced(p_sampl[i_sampl_intr])
        else:
            aft, eft = ll_cls.compute_signal(p_sampl)
        t2 = time.time()
        print('Waveform Calculated in ' + str(t2 - t1) + ' seconds.')

        rms = rms_error(aft, a_df[inds], relative=True)
        print("Cumulative relative error is " + str(rms))

        self.assertLess(
            rms, 1e-3,
            "Cumulative relative error sould be less than 0.02 (2 percents)")

        # Plotting
        if plot:
            from plottools import presets
            presets.plotconfig(ctype='time', lbsize=16, lgsize=14)

            # Frequency plot
            fig1, ax1 = plt.subplots(nrows=2, sharex=True, sharey=True)
            ax1[0].semilogx(freq_d[inds], np.real(a_df[inds]))
            ax1[0].semilogx(freq_d[inds], np.real(aft), '--')
            ax1[0].set_ylabel("Fractional frequency")
            # ax1[0].legend()
            ax1[1].semilogx(freq_d[inds], np.imag(a_df[inds]))
            ax1[1].semilogx(freq_d[inds], np.imag(aft), '--')
            ax1[1].set_xlabel("Frequency [Hz]")
            ax1[1].set_ylabel("Fractional frequency")
            # ax1[1].legend()
            plt.show()
예제 #5
0
                                                   ldc=False,
                                                   tref=0)
    freq_signal_sym3 = [
        np.concatenate(
            (sf[0:n_pos3 + 1], np.conjugate(sf[1:n_pos3 + 2][::-1])))
        for sf in [af3, ef3, tf3]
    ]
    time_signal3 = [ifft(sf_sym)[0:n3] for sf_sym in freq_signal_sym3]
    t_vect3 = np.arange(t_start, t_end, del_t)

    # ==================================================================================================================
    # Generate noise
    # ==================================================================================================================
    freq_psd = np.fft.fftfreq(2 * N) * fs
    freq_psd[0] = freq_psd[1]
    psd_ae = tdi.noisepsd_AE(np.abs(freq_psd), model='SciRDv1')
    psd_t = tdi.noisepsd_T(np.abs(freq_psd), model='SciRDv1')
    a_noise = datamodel.generate_noise_from_psd(np.sqrt(psd_ae),
                                                fs,
                                                myseed=np.int(1234))[0:N]
    e_noise = datamodel.generate_noise_from_psd(np.sqrt(psd_ae),
                                                fs,
                                                myseed=np.int(5678))[0:N]
    t_noise = datamodel.generate_noise_from_psd(np.sqrt(psd_t),
                                                fs,
                                                myseed=np.int(9101))[0:N]

    # ==================================================================================================================
    # Form data
    # ==================================================================================================================
    a_meas = np.real(a_noise + time_signal[0])
예제 #6
0
    def test_likelihood_with_gaps(
            self,
            config_file="../configs/config_ldc_single_gap.ini",
            plot=True):

        config = configparser.ConfigParser()
        config.read(config_file)
        fftwisdom.load_wisdom()

        # Unpacking the hdf5 file and getting data and source parameters
        p, td = loadings.load_ldc_data(config["InputData"]["FilePath"])

        # Pre-processing data: anti-aliasing and filtering
        tm, xd, yd, zd, q, t_offset, tobs, del_t, p_sampl = preprocess.preprocess_ldc_data(
            p, td, config)

        # Introducing gaps if requested
        wd, wd_full, mask = loadings.load_gaps(config, tm)
        print("Ideal decay number: " + str(
            np.int((config["InputData"].getfloat("EndTime") - p_sampl[2]) /
                   (2 * del_t))))

        # Now we get extract the data and transform it to frequency domain
        freq_d = np.fft.fftfreq(len(tm), del_t * q)
        # Convert Michelson TDI to A, E, T (time domain)
        ad, ed, td = ldctools.convert_XYZ_to_AET(xd, yd, zd)

        # Restrict the frequency band to high SNR region, and exclude distorted frequencies due to gaps
        if (config["TimeWindowing"].getboolean('gaps')) & (
                not config["Imputation"].getboolean('imputation')):
            f1, f2 = physics.find_distorted_interval(mask,
                                                     p_sampl,
                                                     t_offset,
                                                     del_t,
                                                     margin=0.4)
            f1 = np.max([f1, 0])
            f2 = np.min([f2, 1 / (2 * del_t)])
            inds = np.where(
                (float(config['Model']['MinimumFrequency']) <= freq_d)
                & (freq_d <= float(config['Model']['MaximumFrequency']))
                & ((freq_d <= f1) | (freq_d >= f2)))[0]
        else:
            inds = np.where(
                (float(config['Model']['MinimumFrequency']) <= freq_d)
                & (freq_d <= float(config['Model']['MaximumFrequency'])))[0]

        # Restriction of sampling parameters to instrinsic ones Mc, q, tc, chi1, chi2, np.sin(bet), lam
        i_sampl_intr = [0, 1, 2, 3, 4, 7, 8]
        print(
            "================================================================="
        )
        fftwisdom.save_wisdom()

        # Auxiliary parameter classes
        psd_cls = None
        # One-sided PSD
        sa = tdi.noisepsd_AE(freq_d[inds], model='Proposal', includewd=None)

        data_cls = None

        # Instantiate likelihood class
        ll_cls = likelihoodmodel.LogLike(
            [mask * ad, mask * ed],
            sa,
            inds,
            tobs,
            del_t * q,
            normalized=config['Model'].getboolean('normalized'),
            t_offset=t_offset,
            channels=[1, 2],
            scale=config["InputData"].getfloat("rescale"),
            model_cls=data_cls,
            psd_cls=psd_cls,
            wd=wd,
            wd_full=wd_full)
        # # Waveform generation in the Frequency domain
        # t1 = time.time()
        # if config['Model'].getboolean('reduced'):
        #     i_sampl_intr = [0, 1, 2, 3, 4, 7, 8]
        #     aft, eft = ll_cls.compute_signal_reduced(p_sampl[i_sampl_intr])
        # else:
        #     aft, eft = ll_cls.compute_signal(p_sampl)
        # t2 = time.time()
        # print('Waveform Calculated in ' + str(t2 - t1) + ' seconds.')

        # Get the windowed discrete Fourier transform used in the likelihood
        a_df_like, e_df_like = ll_cls.data_dft
        # Compare to the DFT of the complete data set
        a_df = fft(
            ad * wd_full) * del_t * q * wd_full.shape[0] / np.sum(wd_full)
        e_df = fft(
            ed * wd_full) * del_t * q * wd_full.shape[0] / np.sum(wd_full)

        # Plotting
        if plot:
            from plottools import presets
            presets.plotconfig(ctype='time', lbsize=16, lgsize=14)

            # Time plot
            fig0, ax0 = plt.subplots(nrows=1)
            ax0.semilogx(tm, ad, 'k')
            ax0.semilogx(tm, wd * np.max(ad), 'r')
            ax0.semilogx(tm, mask * np.max(ad), 'gray')

            # Frequency plot
            fig1, ax1 = plt.subplots(nrows=2, sharex=True, sharey=True)
            ax1[0].semilogx(freq_d[inds], np.real(a_df[inds]))
            ax1[0].semilogx(freq_d[inds], np.real(a_df_like), '--')
            ax1[0].set_ylabel("Fractional frequency", fontsize=16)
            # ax1[0].legend()
            ax1[1].semilogx(freq_d[inds], np.imag(a_df[inds]))
            ax1[1].semilogx(freq_d[inds], np.imag(a_df_like), '--')
            ax1[1].set_xlabel("Frequency [Hz]", fontsize=16)
            ax1[1].set_ylabel("Fractional frequency", fontsize=16)
            # ax1[1].legend()
            for i in range(len(ax1)):
                ax1[i].axvline(x=f1,
                               ymin=0,
                               ymax=np.max(np.real(a_df[inds])),
                               color='r',
                               linestyle='--')
                ax1[i].axvline(x=f2,
                               ymin=0,
                               ymax=np.max(np.real(a_df[inds])),
                               color='r',
                               linestyle='--')
            plt.show()

        rms = rms_error(a_df_like, a_df[inds], relative=True)

        print("Cumulative relative error is " + str(rms))
        self.assertLess(
            rms, 5e-2,
            "Cumulative relative error sould be less than 0.05 (5 percents)")
예제 #7
0
    print("Reduced-model signal computation time: " + str(t2 - t1))
    print("=================================================================")

    # Verification of parameters compatibility m1, m2, chi1, chi2, Deltat, dist, inc, phi, lambd, beta, psi
    params0 = physics.like_to_waveform(p_sampl)
    # # Testing the right offset
    # offsets = np.linspace(50.60, 53, 50)
    # results = [lisabeta_template(params, freqD, tobs, tref=0, toffset=toffset) for toffset in offsets]
    # rms = np.array([np.sum(np.abs(ADf - res[0])**2) for res in results])
    fftwisdom.save_wisdom()

    # ==================================================================================================================
    # Comparing log-likelihoood
    # ==================================================================================================================
    # One-sided PSD
    SA = tdi.noisepsd_AE(freqD[inds], model='Proposal', includewd=None)
    # Consider only A and E TDI data in frequency domain
    dataAE = [ADf[inds], EDf[inds]]
    # And in time domain
    data_ae_time = [ad, ed]
    templateAE = [aft, eft]
    llA1, llE1 = SimpleLogLik(dataAE, templateAE, SA, df, tdi='AET')
    llA2, llE2 = SimpleLogLik(dataAE, dataAE, SA, df, tdi='AET')
    llA3, llE3 = SimpleLogLik(templateAE, templateAE, SA, df, tdi='AET')
    print('compare A', llA1, llA2, llA3)
    print('compare E', llE1, llE2, llE3)
    print('total lloglik', llA1 + llE1, llA2 + llE2, llA3 + llE3)
    # Full computation of likelihood
    ll_cls = likelihoodmodel.LogLike(data_ae_time,
                                     SA,
                                     freqD[inds],
예제 #8
0

if __name__ == '__main__':

    from matplotlib import pyplot as plt
    import tdi
    fftwisdom.load_wisdom()

    N = 2**19

    t = np.arange(0, N)
    f = np.fft.fftfreq(N)

    f[0] = f[1]
    scale = 1e-21
    S = tdi.noisepsd_AE(np.abs(f), model='Proposal') / scale**2
    f2N = np.fft.fftfreq(2 * N)
    f2N[0] = f2N[1]
    S_2N = tdi.noisepsd_AE(np.abs(f2N), model='Proposal') / scale**2

    # ==========================================================================
    # Generate data
    print("Generating data...")
    b, S, Noise_TF = noise.generateNoiseFromDSP(np.sqrt(S[0:N]),
                                                fs,
                                                myseed=1354561)
    y = np.real(b[0:N])
    print("Data generated")

    N_est = 100
    PSD = PSD_estimate(N_est, N)