def test_compare_ecorr_likelihood(self):
        """Compare basis and kernel ecorr methods."""

        selection = Selection(selections.nanograv_backends)
        ef = white_signals.MeasurementNoise()
        ec = white_signals.EcorrKernelNoise(selection=selection)
        ec2 = gp_signals.EcorrBasisModel(selection=selection)
        tm = gp_signals.TimingModel()
        m = ef + ec + tm
        m2 = ef + ec2 + tm

        pta1 = signal_base.PTA([m(p) for p in self.psrs])
        pta2 = signal_base.PTA([m2(p) for p in self.psrs])

        params = parameter.sample(pta1.params)
        l1 = pta1.get_lnlikelihood(params)

        # need to translate some names for EcorrBasis
        basis_params = {}
        for parname, parval in params.items():
            if "log10_ecorr" in parname:
                toks = parname.split("_")
                basisname = toks[0] + "_basis_ecorr_" + "_".join(toks[1:])
                basis_params[basisname] = parval
        params.update(basis_params)
        l2 = pta2.get_lnlikelihood(params)

        msg = "Likelihood mismatch between ECORR methods"
        assert np.allclose(l1, l2), msg
Esempio n. 2
0
    def test_efac_backend(self):
        """Test that backend-efac signal returns correct covariance."""
        # set up signal and parameters
        efac = parameter.Uniform(0.1, 5)
        selection = Selection(selections.by_backend)
        ef = white_signals.MeasurementNoise(efac=efac, selection=selection)
        efm = ef(self.psr)

        # parameters
        efacs = [1.3, 1.4, 1.5, 1.6]
        params = {
            "B1855+09_430_ASP_efac": efacs[0],
            "B1855+09_430_PUPPI_efac": efacs[1],
            "B1855+09_L-wide_ASP_efac": efacs[2],
            "B1855+09_L-wide_PUPPI_efac": efacs[3],
        }

        # correct value
        flags = ["430_ASP", "430_PUPPI", "L-wide_ASP", "L-wide_PUPPI"]
        nvec0 = np.zeros_like(self.psr.toas)
        for ct, flag in enumerate(np.unique(flags)):
            ind = flag == self.psr.backend_flags
            nvec0[ind] = efacs[ct]**2 * self.psr.toaerrs[ind]**2

        # test
        msg = "EFAC covariance incorrect."
        assert np.all(efm.get_ndiag(params) == nvec0), msg
Esempio n. 3
0
    def __init__(self, psrs, params=None):

        print('Initializing the model...')

        efac = parameter.Constant()
        equad = parameter.Constant()
        ef = white_signals.MeasurementNoise(efac=efac)
        eq = white_signals.EquadNoise(log10_equad=equad)

        tm = gp_signals.TimingModel(use_svd=True)

        s = eq + ef + tm

        model = []
        for p in psrs:
            model.append(s(p))
        self.pta = signal_base.PTA(model)

        # set white noise parameters
        if params is None:
            print('No noise dictionary provided!...')
        else:
            self.pta.set_default_params(params)

        self.psrs = psrs
        self.params = params

        self.Nmats = None
Esempio n. 4
0
def white_noise_block(vary=False):
    """
    Returns the white noise block of the model:
        1. EFAC per backend/receiver system
        2. EQUAD per backend/receiver system
        3. ECORR per backend/receiver system
    :param vary:
        If set to true we vary these parameters
        with uniform priors. Otherwise they are set to constants
        with values to be set later.
    """

    # define selection by observing backend
    selection = selections.Selection(selections.by_backend)

    # white noise parameters
    if vary:
        efac = parameter.Uniform(0.01, 10.0)
        equad = parameter.Uniform(-8.5, -5)
        ecorr = parameter.Uniform(-8.5, -5)
    else:
        efac = parameter.Constant()
        equad = parameter.Constant()
        ecorr = parameter.Constant()

    # white noise signals
    ef = white_signals.MeasurementNoise(efac=efac, selection=selection)
    eq = white_signals.EquadNoise(log10_equad=equad, selection=selection)
    ec = white_signals.EcorrKernelNoise(log10_ecorr=ecorr, selection=selection)

    # combine signals
    s = ef + eq + ec

    return s
Esempio n. 5
0
    def add_efac(self):
        """Return dictionary containing EFAC white noise signal
        attributes

        :return: OrderedDict of EFAC signal
        """
        efac_dct = dict()
        efac = parameter.Uniform(0.001, 5.0)
        ef = white_signals.MeasurementNoise(efac=efac,
                                            selection=self.selection)
        self.efac_sig = ef(self.psr)
        for ii, param in enumerate(self.efac_sig.param_names):
            Nvec = self.psr.toaerrs**2 * self.efac_sig._masks[ii]
            newsignal = OrderedDict({
                'type': 'efac',
                'name': param,
                'pmin': [0.001],
                'pmax': [5.0],
                'pstart': [1.0],
                'interval': [True],
                'numpars': 1,
                'Nvec': Nvec
            })
            efac_dct.update({param: newsignal})

        return efac_dct
        def test_ephemeris(self):
            """Test physical-ephemeris delay, made three ways: from
            marginalized GP, from coefficient-based GP, from
            deterministic model."""

            ef = white_signals.MeasurementNoise(
                efac=parameter.Uniform(0.1, 5.0))

            eph = gp_signals.FourierBasisCommonGP_physicalephem(
                sat_orb_elements=None)

            ephc = gp_signals.FourierBasisCommonGP_physicalephem(
                sat_orb_elements=None, coefficients=True)

            ephd = deterministic_signals.PhysicalEphemerisSignal(
                sat_orb_elements=False)

            model = ef + eph
            modelc = ef + ephc
            modeld = ef + ephd

            pta = signal_base.PTA([model(self.psr), model(self.psr2)])
            ptac = signal_base.PTA([modelc(self.psr), modelc(self.psr2)])
            ptad = signal_base.PTA([modeld(self.psr), modeld(self.psr2)])

            cf = 1e-3 * np.random.randn(11)
            cf[0] = 1e-5  # this is more sensitive to linearity

            bs = pta.get_basis()
            da = [np.dot(bs[0], cf), np.dot(bs[1], cf)]

            params = {
                "B1855+09_efac": 1,
                "B1937+21_efac": 1,
                "B1855+09_phys_ephem_gp_coefficients": cf,
                "B1937+21_phys_ephem_gp_coefficients": cf,
            }
            db = ptac.get_delay(params=params)

            dparams = {
                "B1855+09_efac": 1,
                "B1937+21_efac": 1,
                "frame_drift_rate": cf[0],
                "d_jupiter_mass": cf[1],
                "d_saturn_mass": cf[2],
                "d_uranus_mass": cf[3],
                "d_neptune_mass": cf[4],
                "jup_orb_elements": cf[5:],
            }
            dc = ptad.get_delay(params=dparams)

            msg = "Reconstructed ephemeris signals differ!"

            assert np.allclose(da[0], db[0]), msg
            assert np.allclose(da[1], db[1]), msg

            # we don't expect an exact match since we are linearizing
            assert np.allclose(da[0], dc[0], atol=1e-3), msg
            assert np.allclose(da[1], dc[1], atol=1e-3), msg
Esempio n. 7
0
 def efac(self,option="by_backend"):
   """
   EFAC signal:  multiplies ToA variance by EFAC**2, where ToA variance
   are diagonal components of the Likelihood covariance matrix.
   """
   if option not in selections.__dict__.keys():
     raise ValueError('EFAC option must be Enterprise selection function name')
   se=selections.Selection(selections.__dict__[option])
   efacpr = interpret_white_noise_prior(self.params.efac)
   efs = white_signals.MeasurementNoise(efac=efacpr,selection=se)
   return efs
Esempio n. 8
0
    def test_vector_parameter_like(self):
        """Test vector parameter in a likelihood"""

        # white noise
        efac = parameter.Uniform(0.5, 2)
        ef = white_signals.MeasurementNoise(efac=efac)

        # red noise
        nf = 3
        spec = free_spectrum(log10_rho=parameter.Uniform(-20, -10, size=nf))
        rn = gp_signals.FourierBasisGP(spec, components=nf)

        # timing model
        tm = gp_signals.TimingModel()

        # combined signal
        s = ef + rn + tm

        # PTA
        pta = signal_base.PTA([s(self.psr)])

        # parameters
        xs = np.hstack(p.sample() for p in pta.params)
        params = {
            "B1855+09_red_noise_log10_rho": xs[1:],
            "B1855+09_efac": xs[0]
        }

        # test log likelihood
        msg = "Likelihoods do not match"
        assert pta.get_lnlikelihood(xs) == pta.get_lnlikelihood(params), msg

        # test log prior
        msg = "Priors do not match"
        assert pta.get_lnprior(xs) == pta.get_lnprior(params), msg

        # test prior value
        prior = 1 / (2 - 0.5) * (1 / 10)**3
        msg = "Prior value incorrect."
        assert np.allclose(pta.get_lnprior(xs), np.log(prior)), msg

        # test PTA level parameter names
        pnames = [
            "B1855+09_efac",
            "B1855+09_red_noise_log10_rho_0",
            "B1855+09_red_noise_log10_rho_1",
            "B1855+09_red_noise_log10_rho_2",
        ]
        msg = "Incorrect parameter names"
        assert pta.param_names == pnames
        def test_formalism(self):
            # create marginalized model
            ef = white_signals.MeasurementNoise(
                efac=parameter.Uniform(0.1, 5.0))
            tm = gp_signals.TimingModel()
            ec = gp_signals.EcorrBasisModel(
                log10_ecorr=parameter.Uniform(-10, -5))
            pl = utils.powerlaw(log10_A=parameter.Uniform(-18, -12),
                                gamma=parameter.Uniform(1, 7))
            rn = gp_signals.FourierBasisGP(spectrum=pl, components=10)
            model = ef + tm + ec + rn
            pta = signal_base.PTA([model(self.psr)])

            # create hierarchical model
            tmc = gp_signals.TimingModel(coefficients=True)
            ecc = gp_signals.EcorrBasisModel(log10_ecorr=parameter.Uniform(
                -10, -5),
                                             coefficients=True)
            rnc = gp_signals.FourierBasisGP(spectrum=pl,
                                            components=10,
                                            coefficients=True)
            modelc = ef + tmc + ecc + rnc
            ptac = signal_base.PTA([modelc(self.psr)])

            ps = {
                "B1855+09_efac": 1,
                "B1855+09_basis_ecorr_log10_ecorr": -6,
                "B1855+09_red_noise_log10_A": -14,
                "B1855+09_red_noise_gamma": 3,
            }
            psc = utils.get_coefficients(pta, ps)

            d1 = ptac.get_delay(psc)[0]
            d2 = (np.dot(pta.pulsarmodels[0].signals[1].get_basis(ps),
                         psc["B1855+09_linear_timing_model_coefficients"]) +
                  np.dot(pta.pulsarmodels[0].signals[2].get_basis(ps),
                         psc["B1855+09_basis_ecorr_coefficients"]) +
                  np.dot(pta.pulsarmodels[0].signals[3].get_basis(ps),
                         psc["B1855+09_red_noise_coefficients"]))

            msg = "Implicit and explicit PTA delays are different."
            assert np.allclose(d1, d2), msg

            l1 = pta.get_lnlikelihood(ps)
            l2 = ptac.get_lnlikelihood(psc)

            # I don't know how to integrate l2 to match l1...
            msg = "Marginal and hierarchical likelihoods should be different."
            assert l1 != l2, msg
Esempio n. 10
0
def initialize_pta_sim(psrs, fgw):

    # continuous GW signal
    s = models.cw_block_circ(log10_fgw=np.log10(fgw), psrTerm=True)

    # white noise
    efac = parameter.Constant(1.0)
    s += white_signals.MeasurementNoise(efac=efac)

    # linearized timing model
    s += gp_signals.TimingModel(use_svd=True)

    model = [s(psr) for psr in psrs]
    pta = signal_base.PTA(model)

    return pta
Esempio n. 11
0
    def test_efac(self):
        """Test that efac signal returns correct covariance."""
        # set up signal and parameters
        efac = parameter.Uniform(0.1, 5)
        ef = white_signals.MeasurementNoise(efac=efac)
        efm = ef(self.psr)

        # parameters
        efac = 1.5
        params = {"B1855+09_efac": efac}

        # correct value
        nvec0 = efac**2 * self.psr.toaerrs**2

        # test
        msg = "EFAC covariance incorrect."
        assert np.all(efm.get_ndiag(params) == nvec0), msg
Esempio n. 12
0
def initialize_pta_sim(psrs, fgw,
                       inc_efac=True, inc_equad=False, inc_ecorr=False,
                       selection=None,
                       inc_red_noise=False, noisedict=None):
    
    # continuous GW signal
    s = models.cw_block_circ(log10_fgw=np.log10(fgw), psrTerm=True)
    
    # linearized timing model
    s += gp_signals.TimingModel(use_svd=True)

    # white noise
    if selection == 'backend':
        selection = selections.Selection(selections.by_backend)

    if inc_efac:
        efac = parameter.Constant()
        s += white_signals.MeasurementNoise(efac=efac, selection=selection)
    
    if inc_equad:
        equad = parameter.Constant()
        s += white_signals.EquadNoise(log10_equad=equad,
                                      selection=selection)
    if inc_ecorr:
        ecorr = parameter.Constant()
        s += gp_signals.EcorrBasisModel(log10_ecorr=ecorr,
                                        selection=selection)

    if inc_red_noise:
        log10_A = parameter.Constant()
        gamma = parameter.Constant()
        pl = utils.powerlaw(log10_A=log10_A, gamma=gamma)
        s += gp_signals.FourierBasisGP(pl, components=30)

    model = [s(psr) for psr in psrs]
    pta = signal_base.PTA(model)

    # set white noise parameters
    if noisedict is None:
        print('No noise dictionary provided!...')
    else:
        pta.set_default_params(noisedict)
    
    return pta
Esempio n. 13
0
    def test_add_efac_equad(self):
        """Test that addition of efac and equad signal returns
        correct covariance.
        """
        # set up signals
        efac = parameter.Uniform(0.1, 5)
        ef = white_signals.MeasurementNoise(efac=efac)
        equad = parameter.Uniform(-10, -5)
        eq = white_signals.EquadNoise(log10_equad=equad)
        s = ef + eq
        m = s(self.psr)

        # set parameters
        efac = 1.5
        equad = -6.4
        params = {"B1855+09_efac": efac, "B1855+09_log10_equad": equad}

        # correct value
        nvec0 = efac**2 * self.psr.toaerrs**2
        nvec0 += 10**(2 * equad) * np.ones_like(self.psr.toas)

        # test
        msg = "EFAC/EQUAD covariance incorrect."
        assert np.all(m.get_ndiag(params) == nvec0), msg
Esempio n. 14
0
    def test_wideband(self):
        ms = white_signals.MeasurementNoise(selection=Selection(selections.by_backend))

        dm = gp_signals.WidebandTimingModel(
            dmefac=parameter.Uniform(0.9, 1.1),
            dmefac_selection=Selection(selections.by_backend),
            dmjump=parameter.Normal(0, 1),
            dmjump_selection=Selection(selections.by_frontend),
        )

        model = ms + dm

        pta = signal_base.PTA([model(self.psr)])

        ps = parameter.sample(pta.params)

        pta.get_lnlikelihood(ps)

        dmtiming = pta.pulsarmodels[0].signals[1]

        msg = "DMEFAC masks do not cover the data."
        assert np.all(sum(dmtiming._dmefac_masks) == 1), msg

        msg = "DMJUMP masks do not cover the data."
        assert np.all(sum(dmtiming._dmjump_masks) == 1), msg

        # start with zero DMEFAC and DMJUMP
        p0 = {par.name: (1 if "dmefac" in par.name else 0) for par in dmtiming.params}

        pta.get_lnlikelihood(params=p0)

        phi0 = dmtiming.get_phi(params=p0)
        dl0 = dmtiming.get_delay(params=p0)

        dm_flags, dme_flags = np.array(self.psr.flags["pp_dm"], "d"), np.array(self.psr.flags["pp_dme"], "d")

        delays = np.zeros_like(self.psr.toas)

        check = 0
        for index, par in enumerate(self.psr.fitpars):
            if "DMX" not in par:
                msg = "Problem with unbound timing parameters"
                assert phi0[index] == 1e40, msg
            else:
                dmx = self.psr.dmx[par]
                which = (dmx["DMXR1"] <= (self.psr.stoas / 86400)) & ((self.psr.stoas / 86400) < dmx["DMXR2"])
                check += which

                avgdm = np.sum(dm_flags[which] / dme_flags[which] ** 2) / np.sum(1.0 / dme_flags[which] ** 2)
                vardm = 1.0 / np.sum(1.0 / dme_flags[which] ** 2)

                msg = "Priors do not match"
                assert np.allclose(vardm, phi0[index]), msg

                delays[which] = (avgdm - self.psr.dm - dmx["DMX"]) / (2.41e-4 * self.psr.freqs[which] ** 2)

        msg = "Not all TOAs are covered by DMX"
        assert np.all(check == 1)

        msg = "Delays do not match"
        assert np.allclose(dl0, delays), msg

        # sample DMEFACs randomly
        p1 = {par.name: (parameter.sample(par)[par.name] if "dmefac" in par.name else 0) for par in dmtiming.params}

        pta.get_lnlikelihood(params=p1)

        phi1 = dmtiming.get_phi(params=p1)
        dl1 = dmtiming.get_delay(params=p1)

        sel = Selection(selections.by_backend)(self.psr)
        msg = "Problem making selection"
        assert np.all(sum(m for m in sel.masks.values()) == 1), msg

        dme_flags_var = dme_flags.copy()

        for key, mask in sel.masks.items():
            dmefac = p1["J1832-0836_" + key + "_dmefac"]
            dme_flags_var[mask] *= dmefac

        for index, par in enumerate(self.psr.fitpars):
            if "DMX" not in par:
                msg = "Problem with unbound timing parameters"
                assert phi1[index] == 1e40, msg
            else:
                dmx = self.psr.dmx[par]
                which = (dmx["DMXR1"] <= (self.psr.stoas / 86400)) & ((self.psr.stoas / 86400) < dmx["DMXR2"])

                avgdm = np.sum(dm_flags[which] / dme_flags_var[which] ** 2) / np.sum(1.0 / dme_flags_var[which] ** 2)
                vardm = 1.0 / np.sum(1.0 / dme_flags_var[which] ** 2)

                msg = "Priors do not match"
                assert np.allclose(vardm, phi1[index]), msg

                delays[which] = (avgdm - self.psr.dm - dmx["DMX"]) / (2.41e-4 * self.psr.freqs[which] ** 2)

        msg = "Delays do not match"
        assert np.allclose(dl1, delays), msg
Esempio n. 15
0
# GW parameters (initialize with names here to use parameters in common across pulsars)
log10_A_gw_1 = parameter.Uniform(-18, -13)('zlog10_A_gw')
gamma_gw_1 = parameter.Constant(13 / 3)('zgamma_gw')

# Second GW parameters
log10_A_gw_2 = parameter.Uniform(-18, -13)('zlog10_A_other_gw')
gamma_gw_2 = parameter.Constant(7 / 3)('zgamma_other_gw')

##### Set up signals #####

# timing model
tm = gp_signals.TimingModel()

# white noise
ef = white_signals.MeasurementNoise(efac=efac, selection=selection)
eq = white_signals.EquadNoise(log10_equad=log10_equad, selection=selection)
ec = white_signals.EcorrKernelNoise(log10_ecorr=log10_ecorr,
                                    selection=selection)

# red noise (powerlaw with 30 frequencies)
pl = utils.powerlaw(log10_A=red_noise_log10_A, gamma=red_noise_gamma)
rn = gp_signals.FourierBasisGP(spectrum=pl, components=30, Tspan=Tspan)

cpl_1 = utils.powerlaw(log10_A=log10_A_gw_1, gamma=gamma_gw_1)
cpl_2 = utils.powerlaw(log10_A=log10_A_gw_2, gamma=gamma_gw_2)

#Common red noise process with no correlations
crn_1 = gp_signals.FourierBasisGP(spectrum=cpl_1,
                                  components=30,
                                  Tspan=Tspan,
Esempio n. 16
0
parfiles = sorted(glob.glob('data/*.par'))
timfiles = sorted(glob.glob('data/*.tim'))

psrs = []
for p, t in zip(parfiles, timfiles):
    psr = Pulsar(p, t)
    psrs.append(psr)

##### parameters and priors #####

# Uniform prior on EFAC
# efac = parameter.Uniform(0.1, 5.0)
efac = parameter.LinearExp(0.1, 5.0)

# white noise
ef = white_signals.MeasurementNoise(efac=efac)

# timing model
tm = gp_signals.TimingModel()

# full model is sum of components
model = ef + tm

# initialize PTA
pta = signal_base.PTA([model(psrs[0])])

priors = bilby_warp.get_bilby_prior_dict(pta)
print(priors)
parameters = dict.fromkeys(priors.keys())
likelihood = bilby_warp.PTABilbyLikelihood(pta, parameters)
label = 'test_bilby'
Esempio n. 17
0
def gwb_ul(psrs_cut, num_points):
    # find the maximum time span to set GW frequency sampling
    tmin = [p.toas.min() for p in psrs_cut]
    tmax = [p.toas.max() for p in psrs_cut]
    Tspan = np.max(tmax) - np.min(tmin)
    # define selection by observing backend
    selection = selections.Selection(selections.by_backend)
    # white noise parameters
    # we set these ourselves so we know the most likely values!
    efac = parameter.Constant(1)
    # quad = parameter.Constant(0)
    # ecorr = parameter.Constant(0)

    # red noise parameters
    log10_A = parameter.LinearExp(-20, -11)
    gamma = parameter.Uniform(0, 7)

    # GW parameters (initialize with names here to use parameters in common across pulsars)
    log10_A_gw = parameter.LinearExp(-18, -12)('log10_A_gw')
    gamma_gw = parameter.Constant(4.33)('gamma_gw')
    # white noise
    ef = white_signals.MeasurementNoise(efac=efac, selection=selection)
    # eq = white_signals.EquadNoise(log10_equad=equad, selection=selection)
    # ec = white_signals.EcorrKernelNoise(log10_ecorr=ecorr, selection=selection)

    # red noise (powerlaw with 30 frequencies)
    pl = utils.powerlaw(log10_A=log10_A, gamma=gamma)
    rn = gp_signals.FourierBasisGP(spectrum=pl, components=30, Tspan=Tspan)

    # gwb (no spatial correlations)
    cpl = utils.powerlaw(log10_A=log10_A_gw, gamma=gamma_gw)
    gw = gp_signals.FourierBasisGP(spectrum=cpl,
                                   components=30,
                                   Tspan=Tspan,
                                   name='gw')

    # timing model
    tm = gp_signals.TimingModel(
        use_svd=True)  # stabilizing timing model design matrix with SVD
    s = ef + rn + gw + tm

    # intialize PTA
    models = []

    for p in psrs_cut:
        models.append(s(p))

    pta = signal_base.PTA(models)
    outDir = './chains/psrs/{0}'.format(psrs_cut[0].name)
    sample = sampler.setup_sampler(pta, outdir=outDir)
    x0 = np.hstack([p.sample() for p in pta.params])

    # sampler for N steps
    N = int(
        num_points)  # normally, we would use 5e6 samples (this will save time)
    sample.sample(
        x0,
        N,
        SCAMweight=30,
        AMweight=15,
        DEweight=50,
    )

    chain = np.loadtxt(os.path.join(outDir, 'chain_1.txt'))
    pars = np.loadtxt(outDir + '/pars.txt', dtype=np.unicode_)
    ind = list(pars).index('log10_A_gw')

    UL, unc = model_utils.ul(chain[:, ind])
    return UL, unc
    def test_conditional_gp(self):
        ef = white_signals.MeasurementNoise(efac=parameter.Uniform(0.1, 5.0))
        tm = gp_signals.TimingModel()
        ec = gp_signals.EcorrBasisModel(log10_ecorr=parameter.Uniform(-10, -5))
        pl = utils.powerlaw(log10_A=parameter.Uniform(-18, -12),
                            gamma=parameter.Uniform(1, 7))
        rn = gp_signals.FourierBasisGP(spectrum=pl,
                                       components=10,
                                       combine=False)

        model = ef + tm + ec + rn
        pta = signal_base.PTA([model(self.psr), model(self.psr2)])

        p0 = {
            "B1855+09_basis_ecorr_log10_ecorr": -6.051740765663904,
            "B1855+09_efac": 2.9027266737466095,
            "B1855+09_red_noise_gamma": 6.9720332277819725,
            "B1855+09_red_noise_log10_A": -16.749192700991543,
            "B1937+21_basis_ecorr_log10_ecorr": -9.726747733721872,
            "B1937+21_efac": 3.959178240268702,
            "B1937+21_red_noise_gamma": 2.9030772884814797,
            "B1937+21_red_noise_log10_A": -17.978562921948992,
        }

        c = utils.ConditionalGP(pta)
        cmean = c.get_mean_coefficients(p0)

        # build index for the global coefficient vector
        idx, ntot = {}, 0
        for l, v in cmean.items():
            idx[l] = slice(ntot, ntot + len(v))
            ntot = ntot + len(v)

        # repeat the computation using the common-signal formalism
        TNrs = pta.get_TNr(p0)
        TNTs = pta.get_TNT(p0)
        phiinvs = pta.get_phiinv(p0, logdet=False, method="cliques")

        TNr = np.concatenate(TNrs)
        Sigma = sps.block_diag(TNTs, "csc") + sps.block_diag(
            [np.diag(phiinvs[0]), np.diag(phiinvs[1])])

        ch = cholesky(Sigma)
        mn = ch(TNr)
        iSigma = sps.linalg.inv(Sigma)

        # check mean values
        msg = "Conditional GP coefficient value does not match"
        for l, v in cmean.items():
            assert np.allclose(mn[idx[l]], v, atol=1e-4, rtol=1e-4), msg

        # check variances
        par = "B1937+21_linear_timing_model_coefficients"
        c1 = np.cov(
            np.array([cs[par] for cs in c.sample_coefficients(p0, n=10000)]).T)
        c2 = iSigma[idx[par], idx[par]].toarray().T
        msg = "Conditional GP coefficient variance does not match"
        assert np.allclose(c1, c2, atol=1e-4, rtol=1e-4), msg

        # check mean processes
        proc = "B1937+21_linear_timing_model"
        p1 = c.get_mean_processes(p0)[proc]
        p2 = np.dot(pta["B1937+21"]["linear_timing_model"].get_basis(),
                    mn[idx[par]])
        msg = "Conditional GP time series does not match"
        assert np.allclose(p1, p2, atol=1e-4, rtol=1e-4), msg

        # check mean of sampled processes
        p2 = np.mean(np.array(
            [pc[proc] for pc in c.sample_processes(p0, n=1000)]),
                     axis=0)
        msg = "Mean of sampled conditional GP processes does not match"
        assert np.allclose(p1, p2, atol=1e-4, rtol=1e-4)

        # now try with a common process

        crn = gp_signals.FourierBasisCommonGP(spectrum=pl,
                                              orf=utils.hd_orf(),
                                              components=10,
                                              combine=False)

        model = ef + tm + ec + crn
        pta = signal_base.PTA([model(self.psr), model(self.psr2)])

        p0 = {
            "B1855+09_basis_ecorr_log10_ecorr": -5.861847220080768,
            "B1855+09_efac": 4.588342210948306,
            "B1937+21_basis_ecorr_log10_ecorr": -9.151872649912377,
            "B1937+21_efac": 0.8947815819783302,
            "common_fourier_gamma": 6.638289750637263,
            "common_fourier_log10_A": -15.68180643904114,
        }

        c = utils.ConditionalGP(pta)
        cmean = c.get_mean_coefficients(p0)

        idx, ntot = {}, 0
        for l, v in cmean.items():
            idx[l] = slice(ntot, ntot + len(v))
            ntot = ntot + len(v)

        TNrs = pta.get_TNr(p0)
        TNTs = pta.get_TNT(p0)
        phiinvs = pta.get_phiinv(p0, logdet=False, method="cliques")

        TNr = np.concatenate(TNrs)
        Sigma = sps.block_diag(TNTs, "csc") + sps.csc_matrix(phiinvs)

        ch = cholesky(Sigma)
        mn = ch(TNr)

        msg = "Conditional GP coefficient value does not match for common GP"
        for l, v in cmean.items():
            assert np.allclose(mn[idx[l]], v)
Esempio n. 19
0
def model_simple(psrs, psd='powerlaw', efac=False, n_gwbfreqs=30,
                 components=30, freqs=None,
                 vary_gamma=False, upper_limit=False, bayesephem=False,
                 select='backend', red_noise=False, Tspan=None, hd_orf=False,
                 rn_dropout=False, dp_threshold=0.5):
    """
    Reads in list of enterprise Pulsar instance and returns a PTA
    instantiated with the most simple model allowable for enterprise:
    per pulsar:
        1. fixed EFAC per backend/receiver system at 1.0
        2. Linear timing model.
        3. Red noise modeled as a power-law with
            30 sampling frequencies. Default=False
    global:
        1.Common red noise modeled with user defined PSD with
        30 sampling frequencies. Available PSDs are
        ['powerlaw', 'turnover' 'spectrum']
        2. Optional physical ephemeris modeling.
    :param psd:
        PSD to use for common red noise signal. Available options
        are ['powerlaw', 'turnover' 'spectrum']. 'powerlaw' is default
        value.
    :param gamma_common:
        Fixed common red process spectral index value. By default we
        vary the spectral index over the range [0, 7].
    :param upper_limit:
        Perform upper limit on common red noise amplitude. By default
        this is set to False. Note that when performing upper limits it
        is recommended that the spectral index also be fixed to a specific
        value.
    :param bayesephem:
        Include BayesEphem model. Set to False by default
    """

    amp_prior = 'uniform' if upper_limit else 'log-uniform'

    # find the maximum time span to set GW frequency sampling
    if Tspan is None:
        Tspan = model_utils.get_tspan(psrs)

    # timing model
    model = gp_signals.TimingModel()

    #Only White Noise is EFAC set to 1.0
    selection = selections.Selection(selections.by_backend)
    if efac:
        ef = parameter.Uniform(0.1,10.0)
    else:
        ef = parameter.Constant(1.00)

    model += white_signals.MeasurementNoise(efac=ef, selection=selection)

    # common red noise block
    if upper_limit:
        log10_A_gw = parameter.LinearExp(-18,-12)('gw_log10_A')
    else:
        log10_A_gw = parameter.Uniform(-18,-12)('gw_log10_A')

    if vary_gamma:
        gamma_gw = parameter.Uniform(0,7)('gw_gamma')
    else:
        gamma_gw = parameter.Constant(4.33)('gw_gamma')

    pl = signal_base.Function(utils.powerlaw, log10_A=log10_A_gw,
                              gamma=gamma_gw)


    if hd_orf:
        if freqs is None:
            gw = gp_signals.FourierBasisCommonGP(spectrum=pl,
                                                 orf=utils.hd_orf(),
                                                 components=n_gwbfreqs,
                                                 Tspan=Tspan,
                                                 name='gw')
        else:
            gw = gp_signals.FourierBasisCommonGP(spectrum=pl,
                                                 orf=utils.hd_orf(),
                                                 modes=freqs,
                                                 name='gw')
        model += gw
    else:
        if freqs is None:
            crn = gp_signals.FourierBasisGP(spectrum=pl, components=n_gwbfreqs,
                                            Tspan=Tspan, name='gw')
        else:
            crn = gp_signals.FourierBasisGP(spectrum=pl, modes=freqs,
                                            name='gw')
        model += crn

    if red_noise and rn_dropout:
        if amp_prior == 'uniform':
            log10_A = parameter.LinearExp(-20, -11)
        elif amp_prior == 'log-uniform':
            log10_A = parameter.Uniform(-20, -11)
        else:
            log10_A = parameter.Uniform(-20, -11)

        gamma = parameter.Uniform(0, 7)
        k_drop = parameter.Uniform(0, 1)
        if dp_threshold == 6.0:
            dp_threshold = parameter.Uniform(0,1)('k_threshold')
        pl = dropout.dropout_powerlaw(log10_A=log10_A, gamma=gamma,
                                     k_drop=k_drop, k_threshold=dp_threshold)
        rn = gp_signals.FourierBasisGP(pl, components=components,
                                       Tspan=Tspan, name='red_noise')
        model += rn
    elif red_noise:
        # red noise
        model += models.red_noise_block(prior=amp_prior, Tspan=Tspan,
                                        components=components)

    # ephemeris model
    if bayesephem:
        model += deterministic_signals.PhysicalEphemerisSignal(use_epoch_toas=True)

    # set up PTA
    pta = signal_base.PTA([model(p) for p in psrs])

    return pta
Esempio n. 20
0
def white_noise_block(vary=False,
                      inc_ecorr=False,
                      gp_ecorr=False,
                      efac1=False,
                      select='backend',
                      name=None):
    """
    Returns the white noise block of the model:

        1. EFAC per backend/receiver system
        2. EQUAD per backend/receiver system
        3. ECORR per backend/receiver system

    :param vary:
        If set to true we vary these parameters
        with uniform priors. Otherwise they are set to constants
        with values to be set later.
    :param inc_ecorr:
        include ECORR, needed for NANOGrav channelized TOAs
    :param gp_ecorr:
        whether to use the Gaussian process model for ECORR
    :param efac1:
        use a strong prior on EFAC = Normal(mu=1, stdev=0.1)
    """

    if select == 'backend':
        # define selection by observing backend
        backend = selections.Selection(selections.by_backend)
        # define selection by nanograv backends
        backend_ng = selections.Selection(selections.nanograv_backends)
    else:
        # define no selection
        backend = selections.Selection(selections.no_selection)

    # white noise parameters
    if vary:
        if efac1:
            efac = parameter.Normal(1.0, 0.1)
        else:
            efac = parameter.Uniform(0.01, 10.0)
        equad = parameter.Uniform(-8.5, -5)
        if inc_ecorr:
            ecorr = parameter.Uniform(-8.5, -5)
    else:
        efac = parameter.Constant()
        equad = parameter.Constant()
        if inc_ecorr:
            ecorr = parameter.Constant()

    # white noise signals
    ef = white_signals.MeasurementNoise(efac=efac,
                                        selection=backend,
                                        name=name)
    eq = white_signals.EquadNoise(log10_equad=equad,
                                  selection=backend,
                                  name=name)
    if inc_ecorr:
        if gp_ecorr:
            if name is None:
                ec = gp_signals.EcorrBasisModel(log10_ecorr=ecorr,
                                                selection=backend_ng)
            else:
                ec = gp_signals.EcorrBasisModel(log10_ecorr=ecorr,
                                                selection=backend_ng,
                                                name=name)

        else:
            ec = white_signals.EcorrKernelNoise(log10_ecorr=ecorr,
                                                selection=backend_ng,
                                                name=name)

    # combine signals
    if inc_ecorr:
        s = ef + eq + ec
    elif not inc_ecorr:
        s = ef + eq

    return s
Esempio n. 21
0
    def test_pta(self):

        # get parameters from PAL2 style noise files
        params = get_noise_from_pal2(datadir + "/B1855+09_noise.txt")
        params2 = get_noise_from_pal2(datadir + "/J1909-3744_noise.txt")
        params.update(params2)

        # setup basic model
        efac = parameter.Constant()
        equad = parameter.Constant()
        ecorr = parameter.Constant()
        log10_A = parameter.Constant()
        gamma = parameter.Constant()

        selection = Selection(selections.by_backend)

        ms = white_signals.MeasurementNoise(efac=efac,
                                            log10_t2equad=equad,
                                            selection=selection)
        ec = white_signals.EcorrKernelNoise(log10_ecorr=ecorr,
                                            selection=selection)

        pl = utils.powerlaw(log10_A=log10_A, gamma=gamma)
        rn = gp_signals.FourierBasisGP(pl)

        s = ms + ec + rn
        pta = s(self.psrs[0]) + s(self.psrs[1])

        # set parameters
        pta.set_default_params(params)

        # get parameters
        efacs, equads, ecorrs, log10_A, gamma = [], [], [], [], []
        for pname in [p.name for p in self.psrs]:
            efacs.append([
                params[key] for key in sorted(params.keys())
                if "efac" in key and pname in key
            ])
            equads.append([
                params[key] for key in sorted(params.keys())
                if "equad" in key and pname in key
            ])
            ecorrs.append([
                params[key] for key in sorted(params.keys())
                if "ecorr" in key and pname in key
            ])
            log10_A.append(params["{}_red_noise_log10_A".format(pname)])
            gamma.append(params["{}_red_noise_gamma".format(pname)])

        # correct value
        tflags = [sorted(list(np.unique(p.backend_flags))) for p in self.psrs]
        cfs, logdets, phis = [], [], []
        for ii, (psr, flags) in enumerate(zip(self.psrs, tflags)):
            nvec0 = np.zeros_like(psr.toas)
            for ct, flag in enumerate(flags):
                ind = psr.backend_flags == flag
                nvec0[ind] = efacs[ii][ct]**2 * (
                    psr.toaerrs[ind]**2 +
                    10**(2 * equads[ii][ct]) * np.ones(np.sum(ind)))

            # get the basis
            bflags = psr.backend_flags
            Umats = []
            for flag in np.unique(bflags):
                mask = bflags == flag
                Umats.append(
                    utils.create_quantization_matrix(psr.toas[mask])[0])
            nepoch = sum(U.shape[1] for U in Umats)
            U = np.zeros((len(psr.toas), nepoch))
            jvec = np.zeros(nepoch)
            netot = 0
            for ct, flag in enumerate(np.unique(bflags)):
                mask = bflags == flag
                nn = Umats[ct].shape[1]
                U[mask, netot:nn + netot] = Umats[ct]
                jvec[netot:nn + netot] = 10**(2 * ecorrs[ii][ct])
                netot += nn

            # get covariance matrix
            cov = np.diag(nvec0) + np.dot(U * jvec[None, :], U.T)
            cf = sl.cho_factor(cov)
            logdet = np.sum(2 * np.log(np.diag(cf[0])))
            cfs.append(cf)
            logdets.append(logdet)

            F, f2 = utils.createfourierdesignmatrix_red(psr.toas, nmodes=20)
            phi = utils.powerlaw(f2, log10_A=log10_A[ii], gamma=gamma[ii])
            phis.append(phi)

        # tests
        Ns = pta.get_ndiag(params)
        pphis = pta.get_phi(params)
        pphiinvs = pta.get_phiinv(params)
        Ts = pta.get_basis(params)
        zipped = zip(logdets, cfs, phis, self.psrs, Ns, pphis, pphiinvs, Ts)
        for logdet, cf, phi, psr, N, pphi, pphiinv, T in zipped:
            msg = "EFAC/ECORR logdet incorrect."
            assert np.allclose(N.solve(psr.residuals, logdet=True)[1],
                               logdet,
                               rtol=1e-10), msg

            msg = "EFAC/ECORR D1 solve incorrect."
            assert np.allclose(N.solve(psr.residuals),
                               sl.cho_solve(cf, psr.residuals),
                               rtol=1e-10), msg

            msg = "EFAC/ECORR 1D1 solve incorrect."
            assert np.allclose(
                N.solve(psr.residuals, left_array=psr.residuals),
                np.dot(psr.residuals, sl.cho_solve(cf, psr.residuals)),
                rtol=1e-10,
            ), msg

            msg = "EFAC/ECORR 2D1 solve incorrect."
            assert np.allclose(N.solve(psr.residuals, left_array=T),
                               np.dot(T.T, sl.cho_solve(cf, psr.residuals)),
                               rtol=1e-10), msg

            msg = "EFAC/ECORR 2D2 solve incorrect."
            assert np.allclose(N.solve(T, left_array=T),
                               np.dot(T.T, sl.cho_solve(cf, T)),
                               rtol=1e-10), msg

            # spectrum test
            msg = "Spectrum incorrect for GP Fourier signal."
            assert np.all(pphi == phi), msg

            # inverse spectrum test
            msg = "Spectrum inverse incorrect for GP Fourier signal."
            assert np.all(pphiinv == 1 / phi), msg
Esempio n. 22
0
    def test_single_pulsar(self):

        # get parameters from PAL2 style noise files
        params = get_noise_from_pal2(datadir + "/B1855+09_noise.txt")

        # setup basic model
        efac = parameter.Constant()
        equad = parameter.Constant()
        ecorr = parameter.Constant()
        log10_A = parameter.Constant()
        gamma = parameter.Constant()

        selection = Selection(selections.by_backend)

        ms = white_signals.MeasurementNoise(efac=efac,
                                            log10_t2equad=equad,
                                            selection=selection)
        ec = white_signals.EcorrKernelNoise(log10_ecorr=ecorr,
                                            selection=selection)

        pl = utils.powerlaw(log10_A=log10_A, gamma=gamma)
        rn = gp_signals.FourierBasisGP(pl)

        s = ms + ec + rn
        m = s(self.psrs[0])

        # set parameters
        m.set_default_params(params)

        # get parameters
        efacs = [params[key] for key in sorted(params.keys()) if "efac" in key]
        equads = [
            params[key] for key in sorted(params.keys()) if "equad" in key
        ]
        ecorrs = [
            params[key] for key in sorted(params.keys()) if "ecorr" in key
        ]
        log10_A = params["B1855+09_red_noise_log10_A"]
        gamma = params["B1855+09_red_noise_gamma"]

        # correct value
        flags = ["430_ASP", "430_PUPPI", "L-wide_ASP", "L-wide_PUPPI"]
        nvec0 = np.zeros_like(self.psrs[0].toas)
        for ct, flag in enumerate(np.unique(flags)):
            ind = flag == self.psrs[0].backend_flags
            nvec0[ind] = efacs[ct]**2 * (
                self.psrs[0].toaerrs[ind]**2 +
                10**(2 * equads[ct]) * np.ones(np.sum(ind)))

        # get the basis
        bflags = self.psrs[0].backend_flags
        Umats = []
        for flag in np.unique(bflags):
            mask = bflags == flag
            Umats.append(
                utils.create_quantization_matrix(self.psrs[0].toas[mask])[0])
        nepoch = sum(U.shape[1] for U in Umats)
        U = np.zeros((len(self.psrs[0].toas), nepoch))
        jvec = np.zeros(nepoch)
        netot = 0
        for ct, flag in enumerate(np.unique(bflags)):
            mask = bflags == flag
            nn = Umats[ct].shape[1]
            U[mask, netot:nn + netot] = Umats[ct]
            jvec[netot:nn + netot] = 10**(2 * ecorrs[ct])
            netot += nn

        # get covariance matrix
        cov = np.diag(nvec0) + np.dot(U * jvec[None, :], U.T)
        cf = sl.cho_factor(cov)
        logdet = np.sum(2 * np.log(np.diag(cf[0])))

        # test
        msg = "EFAC/ECORR logdet incorrect."
        N = m.get_ndiag(params)
        assert np.allclose(N.solve(self.psrs[0].residuals, logdet=True)[1],
                           logdet,
                           rtol=1e-10), msg

        msg = "EFAC/ECORR D1 solve incorrect."
        assert np.allclose(N.solve(self.psrs[0].residuals),
                           sl.cho_solve(cf, self.psrs[0].residuals),
                           rtol=1e-10), msg

        msg = "EFAC/ECORR 1D1 solve incorrect."
        assert np.allclose(
            N.solve(self.psrs[0].residuals, left_array=self.psrs[0].residuals),
            np.dot(self.psrs[0].residuals,
                   sl.cho_solve(cf, self.psrs[0].residuals)),
            rtol=1e-10,
        ), msg

        msg = "EFAC/ECORR 2D1 solve incorrect."
        T = m.get_basis(params)
        assert np.allclose(
            N.solve(self.psrs[0].residuals, left_array=T),
            np.dot(T.T, sl.cho_solve(cf, self.psrs[0].residuals)),
            rtol=1e-10,
        ), msg

        msg = "EFAC/ECORR 2D2 solve incorrect."
        assert np.allclose(N.solve(T, left_array=T),
                           np.dot(T.T, sl.cho_solve(cf, T)),
                           rtol=1e-10), msg

        F, f2 = utils.createfourierdesignmatrix_red(self.psrs[0].toas,
                                                    nmodes=20)

        # spectrum test
        phi = utils.powerlaw(f2, log10_A=log10_A, gamma=gamma)
        msg = "Spectrum incorrect for GP Fourier signal."
        assert np.all(m.get_phi(params) == phi), msg

        # inverse spectrum test
        msg = "Spectrum inverse incorrect for GP Fourier signal."
        assert np.all(m.get_phiinv(params) == 1 / phi), msg
Esempio n. 23
0
            gw_log10_A = parameter.Uniform(-18, -11)('gw_log10_A')
        gw_gamma = parameter.Constant(args.gamma_gw)('gw_gamma')

    # White noise parameter priors
    efac = parameter.Constant()
    equad = parameter.Constant()
    ecorr = parameter.Constant()

    Nf = args.nfreqs
    freqs = np.linspace(1 / Tspan, Nf / Tspan, Nf)

    # # white noise
    selection = selections.Selection(selections.nanograv_backends)

    ef = white_signals.MeasurementNoise(efac=efac,
                                        log10_t2equad=equad,
                                        selection=selection)
    ec = white_signals.EcorrKernelNoise(log10_ecorr=ecorr, selection=selection)

    # red noise (powerlaw with 30 frequencies)
    pl = utils.powerlaw(log10_A=log10_A, gamma=gamma)
    rn = gp_signals.FourierBasisGP(spectrum=pl, modes=freqs)

    # timing model
    tm = gp_signals.TimingModel()

    # gw (powerlaw with 5 frequencies)

    gw_pl = utils.powerlaw(log10_A=gw_log10_A, gamma=gw_gamma)
    gw_pshift = gp_signals.FourierBasisGP(
        spectrum=gw_pl,
Esempio n. 24
0
    def compute_like(self, npsrs=1, inc_corr=False, inc_kernel=False):

        # get parameters from PAL2 style noise files
        params = get_noise_from_pal2(datadir + "/B1855+09_noise.txt")
        params2 = get_noise_from_pal2(datadir + "/J1909-3744_noise.txt")
        params.update(params2)

        psrs = self.psrs if npsrs == 2 else [self.psrs[0]]

        if inc_corr:
            params.update({"GW_gamma": 4.33, "GW_log10_A": -15.0})

        # find the maximum time span to set GW frequency sampling
        tmin = [p.toas.min() for p in psrs]
        tmax = [p.toas.max() for p in psrs]
        Tspan = np.max(tmax) - np.min(tmin)

        # setup basic model
        efac = parameter.Constant()
        equad = parameter.Constant()
        ecorr = parameter.Constant()
        log10_A = parameter.Constant()
        gamma = parameter.Constant()

        selection = Selection(selections.by_backend)

        ef = white_signals.MeasurementNoise(efac=efac, selection=selection)
        eq = white_signals.EquadNoise(log10_equad=equad, selection=selection)
        ec = white_signals.EcorrKernelNoise(log10_ecorr=ecorr,
                                            selection=selection)

        pl = utils.powerlaw(log10_A=log10_A, gamma=gamma)
        rn = gp_signals.FourierBasisGP(pl)

        orf = utils.hd_orf()
        crn = gp_signals.FourierBasisCommonGP(pl,
                                              orf,
                                              components=20,
                                              name="GW",
                                              Tspan=Tspan)

        tm = gp_signals.TimingModel()

        log10_sigma = parameter.Uniform(-10, -5)
        log10_lam = parameter.Uniform(np.log10(86400), np.log10(1500 * 86400))
        basis = create_quant_matrix(dt=7 * 86400)
        prior = se_kernel(log10_sigma=log10_sigma, log10_lam=log10_lam)
        se = gp_signals.BasisGP(prior, basis, name="se")

        # set up kernel stuff
        if isinstance(inc_kernel, bool):
            inc_kernel = [inc_kernel] * npsrs

        if inc_corr:
            s = ef + eq + ec + rn + crn + tm
        else:
            s = ef + eq + ec + rn + tm

        models = []
        for ik, psr in zip(inc_kernel, psrs):
            snew = s + se if ik else s
            models.append(snew(psr))

        pta = signal_base.PTA(models)

        # set parameters
        pta.set_default_params(params)

        # SE kernel parameters
        log10_sigmas, log10_lams = [-7.0, -6.5], [7.0, 6.5]
        params.update({
            "B1855+09_se_log10_lam": log10_lams[0],
            "B1855+09_se_log10_sigma": log10_sigmas[0],
            "J1909-3744_se_log10_lam": log10_lams[1],
            "J1909-3744_se_log10_sigma": log10_sigmas[1],
        })

        # get parameters
        efacs, equads, ecorrs, log10_A, gamma = [], [], [], [], []
        lsig, llam = [], []
        for pname in [p.name for p in psrs]:
            efacs.append([
                params[key] for key in sorted(params.keys())
                if "efac" in key and pname in key
            ])
            equads.append([
                params[key] for key in sorted(params.keys())
                if "equad" in key and pname in key
            ])
            ecorrs.append([
                params[key] for key in sorted(params.keys())
                if "ecorr" in key and pname in key
            ])
            log10_A.append(params["{}_red_noise_log10_A".format(pname)])
            gamma.append(params["{}_red_noise_gamma".format(pname)])
            lsig.append(params["{}_se_log10_sigma".format(pname)])
            llam.append(params["{}_se_log10_lam".format(pname)])
        GW_gamma = 4.33
        GW_log10_A = -15.0

        # correct value
        tflags = [sorted(list(np.unique(p.backend_flags))) for p in psrs]
        cfs, logdets, phis, Ts = [], [], [], []
        for ii, (ik, psr, flags) in enumerate(zip(inc_kernel, psrs, tflags)):
            nvec0 = np.zeros_like(psr.toas)
            for ct, flag in enumerate(flags):
                ind = psr.backend_flags == flag
                nvec0[ind] = efacs[ii][ct]**2 * psr.toaerrs[ind]**2
                nvec0[ind] += 10**(2 * equads[ii][ct]) * np.ones(np.sum(ind))

            # get the basis
            bflags = psr.backend_flags
            Umats = []
            for flag in np.unique(bflags):
                mask = bflags == flag
                Umats.append(
                    utils.create_quantization_matrix(psr.toas[mask])[0])
            nepoch = sum(U.shape[1] for U in Umats)
            U = np.zeros((len(psr.toas), nepoch))
            jvec = np.zeros(nepoch)
            netot = 0
            for ct, flag in enumerate(np.unique(bflags)):
                mask = bflags == flag
                nn = Umats[ct].shape[1]
                U[mask, netot:nn + netot] = Umats[ct]
                jvec[netot:nn + netot] = 10**(2 * ecorrs[ii][ct])
                netot += nn

            # get covariance matrix
            cov = np.diag(nvec0) + np.dot(U * jvec[None, :], U.T)
            cf = sl.cho_factor(cov)
            logdet = np.sum(2 * np.log(np.diag(cf[0])))
            cfs.append(cf)
            logdets.append(logdet)

            F, f2 = utils.createfourierdesignmatrix_red(psr.toas,
                                                        nmodes=20,
                                                        Tspan=Tspan)
            Mmat = psr.Mmat.copy()
            norm = np.sqrt(np.sum(Mmat**2, axis=0))
            Mmat /= norm
            U2, avetoas = create_quant_matrix(psr.toas, dt=7 * 86400)
            if ik:
                T = np.hstack((F, Mmat, U2))
            else:
                T = np.hstack((F, Mmat))
            Ts.append(T)
            phi = utils.powerlaw(f2, log10_A=log10_A[ii], gamma=gamma[ii])
            if inc_corr:
                phigw = utils.powerlaw(f2, log10_A=GW_log10_A, gamma=GW_gamma)
            else:
                phigw = np.zeros(40)
            K = se_kernel(avetoas,
                          log10_sigma=log10_sigmas[ii],
                          log10_lam=log10_lams[ii])
            k = np.diag(
                np.concatenate((phi + phigw, np.ones(Mmat.shape[1]) * 1e40)))
            if ik:
                k = sl.block_diag(k, K)
            phis.append(k)

        # manually compute loglike
        loglike = 0
        TNrs, TNTs = [], []
        for ct, psr in enumerate(psrs):
            TNrs.append(np.dot(Ts[ct].T, sl.cho_solve(cfs[ct], psr.residuals)))
            TNTs.append(np.dot(Ts[ct].T, sl.cho_solve(cfs[ct], Ts[ct])))
            loglike += -0.5 * (
                np.dot(psr.residuals, sl.cho_solve(cfs[ct], psr.residuals)) +
                logdets[ct])

        TNr = np.concatenate(TNrs)
        phi = sl.block_diag(*phis)

        if inc_corr:
            hd = utils.hd_orf(psrs[0].pos, psrs[1].pos)
            phi[len(phis[0]):len(phis[0]) + 40, :40] = np.diag(phigw * hd)
            phi[:40, len(phis[0]):len(phis[0]) + 40] = np.diag(phigw * hd)

        cf = sl.cho_factor(phi)
        phiinv = sl.cho_solve(cf, np.eye(phi.shape[0]))
        logdetphi = np.sum(2 * np.log(np.diag(cf[0])))
        Sigma = sl.block_diag(*TNTs) + phiinv

        cf = sl.cho_factor(Sigma)
        expval = sl.cho_solve(cf, TNr)
        logdetsigma = np.sum(2 * np.log(np.diag(cf[0])))

        loglike -= 0.5 * (logdetphi + logdetsigma)
        loglike += 0.5 * np.dot(TNr, expval)

        method = ["partition", "sparse", "cliques"]
        for mth in method:
            eloglike = pta.get_lnlikelihood(params, phiinv_method=mth)
            msg = "Incorrect like for npsr={}, phiinv={}".format(npsrs, mth)
            assert np.allclose(eloglike, loglike), msg
Esempio n. 25
0
    def _ecorr_test_ipta(self, method="sparse"):
        """Test of sparse/sherman-morrison ecorr signal and solve methods."""
        selection = Selection(selections.nanograv_backends)

        efac = parameter.Uniform(0.1, 5)
        ecorr = parameter.Uniform(-10, -5)
        ef = white_signals.MeasurementNoise(efac=efac)
        ec = white_signals.EcorrKernelNoise(log10_ecorr=ecorr,
                                            selection=selection,
                                            method=method)
        tm = gp_signals.TimingModel()
        s = ef + ec + tm
        m = s(self.ipsr)

        # set parameters
        efacs = [1.3]
        ecorrs = [-6.1, -6.2, -6.3, -6.4, -7.2, -8.4, -7.1, -7.9]
        params = {
            "J1713+0747_efac": efacs[0],
            "J1713+0747_ASP-L_log10_ecorr": ecorrs[0],
            "J1713+0747_ASP-S_log10_ecorr": ecorrs[1],
            "J1713+0747_GASP-8_log10_ecorr": ecorrs[2],
            "J1713+0747_GASP-L_log10_ecorr": ecorrs[3],
            "J1713+0747_GUPPI-8_log10_ecorr": ecorrs[4],
            "J1713+0747_GUPPI-L_log10_ecorr": ecorrs[5],
            "J1713+0747_PUPPI-L_log10_ecorr": ecorrs[6],
            "J1713+0747_PUPPI-S_log10_ecorr": ecorrs[7],
        }

        # get EFAC Nvec
        nvec0 = efacs[0]**2 * self.ipsr.toaerrs**2

        # get the basis
        flags = [
            "ASP-L", "ASP-S", "GASP-8", "GASP-L", "GUPPI-8", "GUPPI-L",
            "PUPPI-L", "PUPPI-S"
        ]
        bflags = self.ipsr.backend_flags
        Umats = []
        for flag in np.unique(bflags):
            if flag in flags:
                mask = bflags == flag
                Umats.append(
                    utils.create_quantization_matrix(self.ipsr.toas[mask],
                                                     nmin=2)[0])
        nepoch = sum(U.shape[1] for U in Umats)
        U = np.zeros((len(self.ipsr.toas), nepoch))
        jvec = np.zeros(nepoch)
        netot, ct = 0, 0
        for flag in np.unique(bflags):
            if flag in flags:
                mask = bflags == flag
                nn = Umats[ct].shape[1]
                U[mask, netot:nn + netot] = Umats[ct]
                jvec[netot:nn + netot] = 10**(2 * ecorrs[ct])
                netot += nn
                ct += 1

        # get covariance matrix
        wd = Woodbury(nvec0, U, jvec)

        # test
        msg = "EFAC/ECORR {} logdet incorrect.".format(method)
        N = m.get_ndiag(params)
        assert np.allclose(N.solve(self.ipsr.residuals, logdet=True)[1],
                           wd.logdet(),
                           rtol=1e-8), msg

        msg = "EFAC/ECORR {} D1 solve incorrect.".format(method)
        assert np.allclose(N.solve(self.ipsr.residuals),
                           wd.solve(self.ipsr.residuals),
                           rtol=1e-8), msg

        msg = "EFAC/ECORR {} 1D1 solve incorrect.".format(method)
        assert np.allclose(
            N.solve(self.ipsr.residuals, left_array=self.ipsr.residuals),
            np.dot(self.ipsr.residuals, wd.solve(self.ipsr.residuals)),
            rtol=1e-8,
        ), msg

        msg = "EFAC/ECORR {} 2D1 solve incorrect.".format(method)
        T = m.get_basis()
        assert np.allclose(N.solve(self.ipsr.residuals, left_array=T),
                           np.dot(T.T, wd.solve(self.ipsr.residuals)),
                           rtol=1e-8), msg

        msg = "EFAC/ECORR {} 2D2 solve incorrect.".format(method)
        assert np.allclose(N.solve(T, left_array=T),
                           np.dot(T.T, wd.solve(T)),
                           rtol=1e-8), msg
Esempio n. 26
0
    def _ecorr_test(self, method="sparse"):
        """Test of sparse/sherman-morrison ecorr signal and solve methods."""
        selection = Selection(selections.by_backend)

        efac = parameter.Uniform(0.1, 5)
        ecorr = parameter.Uniform(-10, -5)
        ef = white_signals.MeasurementNoise(efac=efac, selection=selection)
        ec = white_signals.EcorrKernelNoise(log10_ecorr=ecorr,
                                            selection=selection,
                                            method=method)
        tm = gp_signals.TimingModel()
        s = ef + ec + tm
        m = s(self.psr)

        # set parameters
        efacs = [1.3, 1.4, 1.5, 1.6]
        ecorrs = [-6.1, -6.2, -6.3, -6.4]
        params = {
            "B1855+09_430_ASP_efac": efacs[0],
            "B1855+09_430_PUPPI_efac": efacs[1],
            "B1855+09_L-wide_ASP_efac": efacs[2],
            "B1855+09_L-wide_PUPPI_efac": efacs[3],
            "B1855+09_430_ASP_log10_ecorr": ecorrs[0],
            "B1855+09_430_PUPPI_log10_ecorr": ecorrs[1],
            "B1855+09_L-wide_ASP_log10_ecorr": ecorrs[2],
            "B1855+09_L-wide_PUPPI_log10_ecorr": ecorrs[3],
        }

        # get EFAC Nvec
        flags = ["430_ASP", "430_PUPPI", "L-wide_ASP", "L-wide_PUPPI"]
        nvec0 = np.zeros_like(self.psr.toas)
        for ct, flag in enumerate(np.unique(flags)):
            ind = flag == self.psr.backend_flags
            nvec0[ind] = efacs[ct]**2 * self.psr.toaerrs[ind]**2

        # get the basis
        bflags = self.psr.backend_flags
        Umats = []
        for flag in np.unique(bflags):
            mask = bflags == flag
            Umats.append(
                utils.create_quantization_matrix(self.psr.toas[mask],
                                                 nmin=2)[0])
        nepoch = sum(U.shape[1] for U in Umats)
        U = np.zeros((len(self.psr.toas), nepoch))
        jvec = np.zeros(nepoch)
        netot = 0
        for ct, flag in enumerate(np.unique(bflags)):
            mask = bflags == flag
            nn = Umats[ct].shape[1]
            U[mask, netot:nn + netot] = Umats[ct]
            jvec[netot:nn + netot] = 10**(2 * ecorrs[ct])
            netot += nn

        # get covariance matrix
        wd = Woodbury(nvec0, U, jvec)

        # test
        msg = "EFAC/ECORR {} logdet incorrect.".format(method)
        N = m.get_ndiag(params)
        assert np.allclose(N.solve(self.psr.residuals, logdet=True)[1],
                           wd.logdet(),
                           rtol=1e-10), msg

        msg = "EFAC/ECORR {} D1 solve incorrect.".format(method)
        assert np.allclose(N.solve(self.psr.residuals),
                           wd.solve(self.psr.residuals),
                           rtol=1e-10), msg

        msg = "EFAC/ECORR {} 1D1 solve incorrect.".format(method)
        assert np.allclose(
            N.solve(self.psr.residuals, left_array=self.psr.residuals),
            np.dot(self.psr.residuals, wd.solve(self.psr.residuals)),
            rtol=1e-10,
        ), msg

        msg = "EFAC/ECORR {} 2D1 solve incorrect.".format(method)
        T = m.get_basis()
        assert np.allclose(N.solve(self.psr.residuals, left_array=T),
                           np.dot(T.T, wd.solve(self.psr.residuals)),
                           rtol=1e-10), msg

        msg = "EFAC/ECORR {} 2D2 solve incorrect.".format(method)
        assert np.allclose(N.solve(T, left_array=T),
                           np.dot(T.T, wd.solve(T)),
                           rtol=1e-10), msg
Esempio n. 27
0
    def test_add_efac_equad_backend(self):
        """Test that addition of efac-backend and equad-backend signal returns
        correct covariance.
        """
        selection = Selection(selections.by_backend)

        efac = parameter.Uniform(0.1, 5)
        equad = parameter.Uniform(-10, -5)
        ef = white_signals.MeasurementNoise(efac=efac, selection=selection)
        eq = white_signals.EquadNoise(log10_equad=equad, selection=selection)
        s = ef + eq
        m = s(self.psr)

        # set parameters
        efacs = [1.3, 1.4, 1.5, 1.6]
        equads = [-6.1, -6.2, -6.3, -6.4]
        params = {
            "B1855+09_430_ASP_efac": efacs[0],
            "B1855+09_430_PUPPI_efac": efacs[1],
            "B1855+09_L-wide_ASP_efac": efacs[2],
            "B1855+09_L-wide_PUPPI_efac": efacs[3],
            "B1855+09_430_ASP_log10_equad": equads[0],
            "B1855+09_430_PUPPI_log10_equad": equads[1],
            "B1855+09_L-wide_ASP_log10_equad": equads[2],
            "B1855+09_L-wide_PUPPI_log10_equad": equads[3],
        }

        # correct value
        flags = ["430_ASP", "430_PUPPI", "L-wide_ASP", "L-wide_PUPPI"]
        nvec0 = np.zeros_like(self.psr.toas)
        for ct, flag in enumerate(np.unique(flags)):
            ind = flag == self.psr.backend_flags
            nvec0[ind] = efacs[ct]**2 * self.psr.toaerrs[ind]**2
            nvec0[ind] += 10**(2 * equads[ct]) * np.ones(np.sum(ind))

        logdet = np.sum(np.log(nvec0))

        # test
        msg = "EFAC/EQUAD covariance incorrect."
        assert np.all(m.get_ndiag(params) == nvec0), msg

        msg = "EFAC/EQUAD logdet incorrect."
        N = m.get_ndiag(params)
        assert np.allclose(N.solve(self.psr.residuals, logdet=True)[1],
                           logdet,
                           rtol=1e-10), msg

        msg = "EFAC/EQUAD D1 solve incorrect."
        assert np.allclose(N.solve(self.psr.residuals),
                           self.psr.residuals / nvec0,
                           rtol=1e-10), msg

        msg = "EFAC/EQUAD 1D1 solve incorrect."
        assert np.allclose(
            N.solve(self.psr.residuals, left_array=self.psr.residuals),
            np.dot(self.psr.residuals / nvec0, self.psr.residuals),
            rtol=1e-10,
        ), msg

        msg = "EFAC/EQUAD 2D1 solve incorrect."
        T = self.psr.Mmat
        assert np.allclose(N.solve(self.psr.residuals, left_array=T),
                           np.dot(T.T, self.psr.residuals / nvec0),
                           rtol=1e-10), msg

        msg = "EFAC/EQUAD 2D2 solve incorrect."
        assert np.allclose(N.solve(T, left_array=T),
                           np.dot(T.T, T / nvec0[:, None]),
                           rtol=1e-10), msg
        def test_common_red_noise(self):
            """Test of a coefficient-based common GP."""
            pl = utils.powerlaw(log10_A=parameter.Uniform(-18, -12),
                                gamma=parameter.Uniform(1, 7))

            ef = white_signals.MeasurementNoise(
                efac=parameter.Uniform(0.1, 5.0))

            Tspan = max(self.psr.toas.max(), self.psr2.toas.max()) - min(
                self.psr.toas.max(), self.psr2.toas.max())

            pl = utils.powerlaw(log10_A=parameter.Uniform(-18, -12),
                                gamma=parameter.Uniform(1, 7))

            rn = gp_signals.FourierBasisCommonGP(spectrum=pl,
                                                 orf=utils.hd_orf(),
                                                 components=20,
                                                 Tspan=Tspan)

            model = ef + rn

            rnc = gp_signals.FourierBasisCommonGP(spectrum=pl,
                                                  orf=utils.hd_orf(),
                                                  components=20,
                                                  Tspan=Tspan,
                                                  coefficients=True)

            modelc = ef + rnc

            pta = signal_base.PTA([model(self.psr), model(self.psr2)])
            ptac = signal_base.PTA([modelc(self.psr), modelc(self.psr2)])

            params = {
                "B1855+09_efac": 1.0,
                "B1937+21_efac": 1.0,
                "common_fourier_gamma": 5,
                "common_fourier_log10_A": -15,
            }

            # get GP delays in two different ways

            cf, cf2 = np.random.randn(40), np.random.randn(40)

            bs = pta.get_basis(params)
            da = [np.dot(bs[0], cf), np.dot(bs[1], cf2)]

            params.update({
                "B1855+09_common_fourier_coefficients": cf,
                "B1937+21_common_fourier_coefficients": cf2
            })

            db = ptac.get_delay(params)

            msg = "Implicit and explicit GP delays are different."
            assert np.allclose(da[0], db[0]), msg
            assert np.allclose(da[1], db[1]), msg

            cpar = [p for p in ptac.params if "coefficients" in p.name]

            def shouldfail():
                return cpar[0].get_logpdf(params)

            self.assertRaises(NotImplementedError, shouldfail)
Esempio n. 29
0
    def test_pta_phiinv_methods(self):
        ef = white_signals.MeasurementNoise(efac=parameter.Uniform(0.1, 5))

        span = np.max(self.psrs[0].toas) - np.min(self.psrs[0].toas)

        pl = utils.powerlaw(log10_A=parameter.Uniform(-16, -13),
                            gamma=parameter.Uniform(1, 7))

        orf = utils.hd_orf()
        vrf = utils.dipole_orf()

        rn = gp_signals.FourierBasisGP(spectrum=pl, components=30, Tspan=span)

        hdrn = gp_signals.FourierBasisCommonGP(spectrum=pl,
                                               orf=orf,
                                               components=20,
                                               Tspan=span,
                                               name='gw')

        vrn = gp_signals.FourierBasisCommonGP(spectrum=pl,
                                              orf=vrf,
                                              components=20,
                                              Tspan=span,
                                              name='vec')

        vrn2 = gp_signals.FourierBasisCommonGP(spectrum=pl,
                                               orf=vrf,
                                               components=20,
                                               Tspan=span * 1.234,
                                               name='vec2')

        # two common processes, sharing basis partially

        model = ef + rn + hdrn  # + vrn

        pta = signal_base.PTA([model(psr) for psr in self.psrs])

        ps = parameter.sample(pta.params)

        phi = pta.get_phi(ps)
        ldp = np.linalg.slogdet(phi)[1]

        inv1, ld1 = pta.get_phiinv(ps, method='cliques', logdet=True)
        inv2, ld2 = pta.get_phiinv(ps, method='partition', logdet=True)
        inv3, ld3 = pta.get_phiinv(ps, method='sparse', logdet=True)
        if not isinstance(inv3, np.ndarray):
            inv3 = inv3.toarray()

        for ld in [ld1, ld2, ld3]:
            msg = "Wrong phi log determinant for two common processes"
            assert np.allclose(ldp, ld, rtol=1e-15, atol=1e-6), msg

        for inv in [inv1, inv2, inv3]:
            msg = "Wrong phi inverse for two common processes"
            assert np.allclose(np.dot(phi, inv),
                               np.eye(phi.shape[0]),
                               rtol=1e-15,
                               atol=1e-6), msg

        for inva, invb in itertools.combinations([inv1, inv2, inv3], 2):
            assert np.allclose(inva, invb)

        # two common processes, no sharing basis

        model = ef + rn + vrn2

        pta = signal_base.PTA([model(psr) for psr in self.psrs])

        ps = parameter.sample(pta.params)

        phi = pta.get_phi(ps)
        ldp = np.linalg.slogdet(phi)[1]

        inv1, ld1 = pta.get_phiinv(ps, method='cliques', logdet=True)
        inv2, ld2 = pta.get_phiinv(ps, method='partition', logdet=True)
        inv3, ld3 = pta.get_phiinv(ps, method='sparse', logdet=True)
        if not isinstance(inv3, np.ndarray):
            inv3 = inv3.toarray()

        for ld in [ld1, ld2, ld3]:
            msg = "Wrong phi log determinant for two common processes"
            assert np.allclose(ldp, ld, rtol=1e-15, atol=1e-6), msg

        for inv in [inv1, inv2, inv3]:
            msg = "Wrong phi inverse for two processes"
            assert np.allclose(np.dot(phi, inv),
                               np.eye(phi.shape[0]),
                               rtol=1e-15,
                               atol=1e-6), msg

        for inva, invb in itertools.combinations([inv1, inv2, inv3], 2):
            assert np.allclose(inva, invb)

        # three common processes, sharing basis partially

        model = ef + rn + hdrn + vrn

        pta = signal_base.PTA([model(psr) for psr in self.psrs])

        ps = parameter.sample(pta.params)

        phi = pta.get_phi(ps)
        ldp = np.linalg.slogdet(phi)[1]

        inv1, ld1 = pta.get_phiinv(ps, method='cliques', logdet=True)
        inv2, ld2 = pta.get_phiinv(ps, method='partition', logdet=True)
        inv3, ld3 = pta.get_phiinv(ps, method='sparse', logdet=True)
        if not isinstance(inv3, np.ndarray):
            inv3 = inv3.toarray()

        for ld in [ld1, ld3]:
            msg = "Wrong phi log determinant for two common processes"
            assert np.allclose(ldp, ld, rtol=1e-15, atol=1e-6), msg

        for inv in [inv1, inv3]:
            msg = "Wrong phi inverse for three common processes"
            assert np.allclose(np.dot(phi, inv),
                               np.eye(phi.shape[0]),
                               rtol=1e-15,
                               atol=1e-6), msg

        for inva, invb in itertools.combinations([inv1, inv3], 2):
            assert np.allclose(inva, invb)

        # four common processes, three sharing basis partially

        model = ef + rn + hdrn + vrn + vrn2

        pta = signal_base.PTA([model(psr) for psr in self.psrs])

        ps = parameter.sample(pta.params)

        phi = pta.get_phi(ps)
        ldp = np.linalg.slogdet(phi)[1]

        inv1, ld1 = pta.get_phiinv(ps, method='cliques', logdet=True)
        inv2, ld2 = pta.get_phiinv(ps, method='partition', logdet=True)
        inv3, ld3 = pta.get_phiinv(ps, method='sparse', logdet=True)
        if not isinstance(inv3, np.ndarray):
            inv3 = inv3.toarray()

        for ld in [ld1, ld3]:
            msg = "Wrong phi log determinant for two common processes"
            assert np.allclose(ldp, ld, rtol=1e-15, atol=1e-6), msg

        for inv in [inv1, inv3]:
            msg = "Wrong phi inverse for four processes"
            assert np.allclose(np.dot(phi, inv),
                               np.eye(phi.shape[0]),
                               rtol=1e-15,
                               atol=1e-6), msg

        for inva, invb in itertools.combinations([inv1, inv3], 2):
            assert np.allclose(inva, invb)