def test_parameterized_orf(self): T1 = 3.16e8 pl = utils.powerlaw(log10_A=parameter.Uniform(-18, -12), gamma=parameter.Uniform(1, 7)) orf = hd_orf_generic(a=parameter.Uniform(0, 5), b=parameter.Uniform(0, 5), c=parameter.Uniform(0, 5)) rn = gp_signals.FourierBasisGP(spectrum=pl, Tspan=T1, components=30) crn = gp_signals.FourierBasisCommonGP(spectrum=pl, orf=orf, components=30, name="gw", Tspan=T1) model = rn + crn pta = model(self.psrs[0]) + model(self.psrs[1]) lA1, gamma1 = -13, 1e-15 lA2, gamma2 = -13.3, 1e-15 lAc, gammac = -13.1, 1e-15 a, b, c = 1.9, 0.4, 0.23 params = { "gw_log10_A": lAc, "gw_gamma": gammac, "gw_a": a, "gw_b": b, "gw_c": c, "B1855+09_red_noise_log10_A": lA1, "B1855+09_red_noise_gamma": gamma1, "J1909-3744_red_noise_log10_A": lA2, "J1909-3744_red_noise_gamma": gamma2, } phi = pta.get_phi(params) phiinv = pta.get_phiinv(params) F1, f1 = utils.createfourierdesignmatrix_red(self.psrs[0].toas, nmodes=30, Tspan=T1) F2, f2 = utils.createfourierdesignmatrix_red(self.psrs[1].toas, nmodes=30, Tspan=T1) msg = "F matrix incorrect" assert np.allclose(pta.get_basis(params)[0], F1, rtol=1e-10), msg assert np.allclose(pta.get_basis(params)[1], F2, rtol=1e-10), msg nftot = 120 phidiag = np.zeros(nftot) phit = np.zeros((nftot, nftot)) phidiag[:60] = utils.powerlaw(f1, lA1, gamma1) phidiag[:60] += utils.powerlaw(f1, lAc, gammac) phidiag[60:] = utils.powerlaw(f2, lA2, gamma2) phidiag[60:] += utils.powerlaw(f2, lAc, gammac) phit[np.diag_indices(nftot)] = phidiag orf = hd_orf_generic(self.psrs[0].pos, self.psrs[1].pos, a=a, b=b, c=c) spec = utils.powerlaw(f1, log10_A=lAc, gamma=gammac) phit[:60, 60:] = np.diag(orf * spec) phit[60:, :60] = phit[:60, 60:] msg = "{} {}".format(np.diag(phi), np.diag(phit)) assert np.allclose(phi, phit, rtol=1e-15, atol=1e-17), msg msg = "PTA Phi inverse is incorrect {}.".format(params) assert np.allclose(phiinv, np.linalg.inv(phit), rtol=1e-15, atol=1e-17), msg
def test_pta_phiinv_methods(self): ef = white_signals.MeasurementNoise(efac=parameter.Uniform(0.1, 5)) span = np.max(self.psrs[0].toas) - np.min(self.psrs[0].toas) pl = utils.powerlaw(log10_A=parameter.Uniform(-16, -13), gamma=parameter.Uniform(1, 7)) orf = utils.hd_orf() vrf = utils.dipole_orf() rn = gp_signals.FourierBasisGP(spectrum=pl, components=30, Tspan=span) hdrn = gp_signals.FourierBasisCommonGP(spectrum=pl, orf=orf, components=20, Tspan=span, name="gw") vrn = gp_signals.FourierBasisCommonGP(spectrum=pl, orf=vrf, components=20, Tspan=span, name="vec") vrn2 = gp_signals.FourierBasisCommonGP(spectrum=pl, orf=vrf, components=20, Tspan=span * 1.234, name="vec2") # two common processes, sharing basis partially model = ef + rn + hdrn # + vrn pta = signal_base.PTA([model(psr) for psr in self.psrs]) ps = parameter.sample(pta.params) phi = pta.get_phi(ps) ldp = np.linalg.slogdet(phi)[1] inv1, ld1 = pta.get_phiinv(ps, method="cliques", logdet=True) inv2, ld2 = pta.get_phiinv(ps, method="partition", logdet=True) inv3, ld3 = pta.get_phiinv(ps, method="sparse", logdet=True) if not isinstance(inv3, np.ndarray): inv3 = inv3.toarray() for ld in [ld1, ld2, ld3]: msg = "Wrong phi log determinant for two common processes" assert np.allclose(ldp, ld, rtol=1e-15, atol=1e-6), msg for inv in [inv1, inv2, inv3]: msg = "Wrong phi inverse for two common processes" assert np.allclose(np.dot(phi, inv), np.eye(phi.shape[0]), rtol=1e-15, atol=1e-6), msg for inva, invb in itertools.combinations([inv1, inv2, inv3], 2): assert np.allclose(inva, invb) # two common processes, no sharing basis model = ef + rn + vrn2 pta = signal_base.PTA([model(psr) for psr in self.psrs]) ps = parameter.sample(pta.params) phi = pta.get_phi(ps) ldp = np.linalg.slogdet(phi)[1] inv1, ld1 = pta.get_phiinv(ps, method="cliques", logdet=True) inv2, ld2 = pta.get_phiinv(ps, method="partition", logdet=True) inv3, ld3 = pta.get_phiinv(ps, method="sparse", logdet=True) if not isinstance(inv3, np.ndarray): inv3 = inv3.toarray() for ld in [ld1, ld2, ld3]: msg = "Wrong phi log determinant for two common processes" assert np.allclose(ldp, ld, rtol=1e-15, atol=1e-6), msg for inv in [inv1, inv2, inv3]: msg = "Wrong phi inverse for two processes" assert np.allclose(np.dot(phi, inv), np.eye(phi.shape[0]), rtol=1e-15, atol=1e-6), msg for inva, invb in itertools.combinations([inv1, inv2, inv3], 2): assert np.allclose(inva, invb) # three common processes, sharing basis partially model = ef + rn + hdrn + vrn pta = signal_base.PTA([model(psr) for psr in self.psrs]) ps = parameter.sample(pta.params) phi = pta.get_phi(ps) ldp = np.linalg.slogdet(phi)[1] inv1, ld1 = pta.get_phiinv(ps, method="cliques", logdet=True) inv2, ld2 = pta.get_phiinv(ps, method="partition", logdet=True) inv3, ld3 = pta.get_phiinv(ps, method="sparse", logdet=True) if not isinstance(inv3, np.ndarray): inv3 = inv3.toarray() for ld in [ld1, ld3]: msg = "Wrong phi log determinant for two common processes" assert np.allclose(ldp, ld, rtol=1e-15, atol=1e-6), msg for inv in [inv1, inv3]: msg = "Wrong phi inverse for three common processes" assert np.allclose(np.dot(phi, inv), np.eye(phi.shape[0]), rtol=1e-15, atol=1e-6), msg for inva, invb in itertools.combinations([inv1, inv3], 2): assert np.allclose(inva, invb) # four common processes, three sharing basis partially model = ef + rn + hdrn + vrn + vrn2 pta = signal_base.PTA([model(psr) for psr in self.psrs]) ps = parameter.sample(pta.params) phi = pta.get_phi(ps) ldp = np.linalg.slogdet(phi)[1] inv1, ld1 = pta.get_phiinv(ps, method="cliques", logdet=True) inv2, ld2 = pta.get_phiinv(ps, method="partition", logdet=True) inv3, ld3 = pta.get_phiinv(ps, method="sparse", logdet=True) if not isinstance(inv3, np.ndarray): inv3 = inv3.toarray() for ld in [ld1, ld3]: msg = "Wrong phi log determinant for two common processes" assert np.allclose(ldp, ld, rtol=1e-15, atol=1e-6), msg for inv in [inv1, inv3]: msg = "Wrong phi inverse for four processes" assert np.allclose(np.dot(phi, inv), np.eye(phi.shape[0]), rtol=1e-15, atol=1e-6), msg for inva, invb in itertools.combinations([inv1, inv3], 2): assert np.allclose(inva, invb)
def test_pta_phi(self): T1, T2, T3 = 3.16e8, 3.16e8, 3.16e8 nf1, nf2, nf3 = 2, 2, 1 pl = utils.powerlaw(log10_A=parameter.Uniform(-18, -12), gamma=parameter.Uniform(1, 7)) orf = utils.hd_orf() rn = gp_signals.FourierBasisGP(spectrum=pl, components=nf1, Tspan=T1) crn = gp_signals.FourierBasisCommonGP(spectrum=pl, orf=orf, components=1, name="gw", Tspan=T3) model = rn + crn pta = model(self.psrs[0]) + model(self.psrs[1]) lA1, gamma1 = -13, 1e-15 lA2, gamma2 = -13.3, 1e-15 lAc, gammac = -13.1, 1e-15 params = { "gw_log10_A": lAc, "gw_gamma": gammac, "B1855+09_red_noise_log10_A": lA1, "B1855+09_red_noise_gamma": gamma1, "J1909-3744_red_noise_log10_A": lA2, "J1909-3744_red_noise_gamma": gamma2, } phi = pta.get_phi(params) phiinv = pta.get_phiinv(params) T1, T2, T3 = 3.16e8, 3.16e8, 3.16e8 nf1, nf2, nf3 = 2, 2, 1 F1, f1 = utils.createfourierdesignmatrix_red(self.psrs[0].toas, nf1, Tspan=T1) F2, f2 = utils.createfourierdesignmatrix_red(self.psrs[1].toas, nf2, Tspan=T2) F1c, fc = utils.createfourierdesignmatrix_red(self.psrs[0].toas, nf3, Tspan=T3) F2c, fc = utils.createfourierdesignmatrix_red(self.psrs[1].toas, nf3, Tspan=T3) nftot = 2 * 2 * nf1 phidiag = np.zeros(nftot) phit = np.zeros((nftot, nftot)) phidiag[:4] = utils.powerlaw(f1, lA1, gamma1) phidiag[:2] += utils.powerlaw(fc, lAc, gammac) phidiag[4:] = utils.powerlaw(f2, lA2, gamma2) phidiag[4:6] += utils.powerlaw(fc, lAc, gammac) phit[np.diag_indices(nftot)] = phidiag phit[:2, 4:6] = np.diag( hd_powerlaw(fc, self.psrs[0].pos, self.psrs[1].pos, lAc, gammac)) phit[4:6, :2] = np.diag( hd_powerlaw(fc, self.psrs[0].pos, self.psrs[1].pos, lAc, gammac)) msg = "{} {}".format(np.diag(phi), np.diag(phit)) assert np.allclose(phi, phit, rtol=1e-15, atol=1e-17), msg msg = "PTA Phi inverse is incorrect {}.".format(params) assert np.allclose(phiinv, np.linalg.inv(phit), rtol=1e-15, atol=1e-17), msg
def test_wideband(self): ms = white_signals.MeasurementNoise( selection=Selection(selections.by_backend)) dm = gp_signals.WidebandTimingModel( dmefac=parameter.Uniform(0.9, 1.1), dmefac_selection=Selection(selections.by_backend), log10_dmequad=parameter.Uniform(-7.0, 0.0), log10_dmequad_selection=Selection(selections.by_backend), dmjump=parameter.Normal(0, 1), dmjump_selection=Selection(selections.by_frontend), dmjump_ref=None, name="wideband_timing_model", ) model = ms + dm pta = signal_base.PTA([model(self.psr)]) ps = parameter.sample(pta.params) pta.get_lnlikelihood(ps) dmtiming = pta.pulsarmodels[0].signals[1] msg = "DMEFAC masks do not cover the data." assert np.all(sum(dmtiming._dmefac_masks) == 1), msg msg = "DMEQUAD masks do not cover the data." assert np.all(sum(dmtiming._log10_dmequad_masks) == 1), msg msg = "DMJUMP masks do not cover the data." assert np.all(sum(dmtiming._dmjump_masks) == 1), msg # start with zero DMEFAC, DMEQUAD, and DMJUMP # p0 = {par.name: (1 if "dmefac" in par.name else 0) for par in dmtiming.params} p0 = {} for par in dmtiming.params: if "dmefac" in par.name: p0[par.name] = 1.0 elif "dmequad" in par.name: p0[par.name] = -1e40 # np.inf breaks the masking trick else: p0[par.name] = 0.0 pta.get_lnlikelihood(params=p0) phi0 = dmtiming.get_phi(params=p0) dl0 = dmtiming.get_delay(params=p0) dm_flags, dme_flags = np.array(self.psr.flags["pp_dm"], "d"), np.array(self.psr.flags["pp_dme"], "d") delays = np.zeros_like(self.psr.toas) check = 0 for index, par in enumerate(self.psr.fitpars): if "DMX" not in par: msg = "Problem with unbound timing parameters" assert phi0[index] == 1e40, msg else: dmx = self.psr.dmx[par] which = (dmx["DMXR1"] <= (self.psr.stoas / 86400)) & ( (self.psr.stoas / 86400) < dmx["DMXR2"]) check += which avgdm = np.sum(dm_flags[which] / dme_flags[which]**2) / np.sum( 1.0 / dme_flags[which]**2) vardm = 1.0 / np.sum(1.0 / dme_flags[which]**2) msg = "Priors do not match" assert np.allclose(vardm, phi0[index]), msg delays[which] = (avgdm - self.psr.dm - dmx["DMX"]) / ( 2.41e-4 * self.psr.freqs[which]**2) msg = "Not all TOAs are covered by DMX" assert np.all(check == 1) msg = "Delays do not match" assert np.allclose(dl0, delays), msg # sample DMEFACs and DMEQUADs randomly # p1 = {par.name: (parameter.sample(par)[par.name] if "dmefac" in par.name else 0) for par in dmtiming.params} p1 = { par.name: (parameter.sample(par)[par.name] if "dmefac" in par.name or "dmequad" in par.name else 0) for par in dmtiming.params } pta.get_lnlikelihood(params=p1) phi1 = dmtiming.get_phi(params=p1) dl1 = dmtiming.get_delay(params=p1) sel = Selection(selections.by_backend)(self.psr) msg = "Problem making selection" assert np.all(sum(m for m in sel.masks.values()) == 1), msg dme_flags_var = dme_flags.copy() for key, mask in sel.masks.items(): dmefac = p1["J1832-0836_" + key + "_dmefac"] log10_dmequad = p1["J1832-0836_" + key + "_log10_dmequad"] dmequad = 10**log10_dmequad dme_flags_var[mask] *= dmefac dme_flags_var[mask] = (dme_flags_var[mask]**2 + dmequad**2)**0.5 for index, par in enumerate(self.psr.fitpars): if "DMX" not in par: msg = "Problem with unbound timing parameters" assert phi1[index] == 1e40, msg else: dmx = self.psr.dmx[par] which = (dmx["DMXR1"] <= (self.psr.stoas / 86400)) & ( (self.psr.stoas / 86400) < dmx["DMXR2"]) avgdm = np.sum(dm_flags[which] / dme_flags_var[which]** 2) / np.sum(1.0 / dme_flags_var[which]**2) vardm = 1.0 / np.sum(1.0 / dme_flags_var[which]**2) msg = "Priors do not match" assert np.allclose(vardm, phi1[index]), msg delays[which] = (avgdm - self.psr.dm - dmx["DMX"]) / ( 2.41e-4 * self.psr.freqs[which]**2) msg = "Delays do not match" assert np.allclose(dl1, delays), msg
def WidebandTimingModel( dmefac=parameter.Uniform(pmin=0.1, pmax=10.0), log10_dmequad=parameter.Uniform(pmin=-7.0, pmax=0.0), dmjump=parameter.Uniform(pmin=-0.01, pmax=0.01), dmefac_selection=Selection(selections.no_selection), log10_dmequad_selection=Selection(selections.no_selection), dmjump_selection=Selection(selections.no_selection), dmjump_ref=None, name="wideband_timing_model", ): """Class factory for marginalized linear timing model signals that take wideband TOAs and DMs. Currently assumes DMX for DM model.""" basis = utils.unnormed_tm_basis() # will need to normalize phi otherwise prior = utils.tm_prior() # standard BaseClass = BasisGP(prior, basis, coefficients=False, name=name) class WidebandTimingModel(BaseClass): signal_type = "basis" signal_name = "wideband timing model" signal_id = name basis_combine = False # should never need to be True def __init__(self, psr): super(WidebandTimingModel, self).__init__(psr) self.name = self.psrname + "_" + self.signal_id # make selection for DMEFACs dmefac_select = dmefac_selection(psr) self._dmefac_keys = list(sorted(dmefac_select.masks.keys())) self._dmefac_masks = [ dmefac_select.masks[key] for key in self._dmefac_keys ] # make selection for DMEQUADs log10_dmequad_select = log10_dmequad_selection(psr) self._log10_dmequad_keys = list( sorted(log10_dmequad_select.masks.keys())) self._log10_dmequad_masks = [ log10_dmequad_select.masks[key] for key in self._log10_dmequad_keys ] # make selection for DMJUMPs dmjump_select = dmjump_selection(psr) self._dmjump_keys = list(sorted(dmjump_select.masks.keys())) self._dmjump_masks = [ dmjump_select.masks[key] for key in self._dmjump_keys ] if self._dmjump_keys == [""] and dmjump is not None: raise ValueError( "WidebandTimingModel: can only do DMJUMP with more than one selection." ) # collect parameters self._params = {} self._dmefacs = [] for key in self._dmefac_keys: pname = "_".join([n for n in [psr.name, key, "dmefac"] if n]) param = dmefac(pname) self._dmefacs.append(param) self._params[param.name] = param self._log10_dmequads = [] for key in self._log10_dmequad_keys: pname = "_".join( [n for n in [psr.name, key, "log10_dmequad"] if n]) param = log10_dmequad(pname) self._log10_dmequads.append(param) self._params[param.name] = param self._dmjumps = [] if dmjump is not None: for key in self._dmjump_keys: pname = "_".join( [n for n in [psr.name, key, "dmjump"] if n]) if dmjump_ref is not None: if pname == psr.name + "_" + dmjump_ref + "_dmjump": fixed_dmjump = parameter.Constant(val=0.0) param = fixed_dmjump(pname) else: param = dmjump(pname) else: param = dmjump(pname) self._dmjumps.append(param) self._params[param.name] = param # copy psr quantities self._ntoas = len(psr.toas) self._npars = len(psr.fitpars) self._freqs = psr.freqs # collect DMX information (will be used to make phi and delay) self._dmpar = psr.dm self._dm = np.array(psr.flags["pp_dm"], "d") self._dmerr = np.array(psr.flags["pp_dme"], "d") check = np.zeros_like(psr.toas, "i") # assign TOAs to DMX bins self._dmx, self._dmindex, self._dmwhich = [], [], [] for index, key in enumerate(sorted(psr.dmx)): dmx = psr.dmx[key] if not dmx["fit"]: raise ValueError( "WidebandTimingModel: all DMX parameters must be estimated." ) self._dmx.append(dmx["DMX"]) self._dmindex.append(psr.fitpars.index(key)) self._dmwhich.append((dmx["DMXR1"] <= psr.stoas / 86400) & (psr.stoas / 86400 < dmx["DMXR2"])) check += self._dmwhich[-1] if np.sum(check) != self._ntoas: raise ValueError( "WidebandTimingModel: cannot account for all TOAs in DMX intervals." ) if np.sum(check) != self._ntoas: raise ValueError( "WidebandTimingModel: cannot account for all TOAs in DMX intervals." ) if "DM" in psr.fitpars: raise ValueError( "WidebandTimingModel: DM must not be estimated.") self._ndmx = len(self._dmx) @property def delay_params(self): # cache parameters are all DMEFACS, DMEQUADS, and DMJUMPS return ([p.name for p in self._dmefacs] + [p.name for p in self._log10_dmequads] + [p.name for p in self._dmjumps]) @signal_base.cache_call(["delay_params"]) def get_phi(self, params): """Return wideband timing-model prior.""" # get DMEFAC- and DMEQUAD-adjusted DMX errors dme = self.get_dme(params) # initialize the timing-model "infinite" prior phi = KernelMatrix(1e40 * np.ones(self._npars, "d")) # fill the DMX slots with weighted errors for index, which in zip(self._dmindex, self._dmwhich): phi.set(1.0 / np.sum(1.0 / dme[which]**2), index) return phi def get_phiinv(self, params): """Return inverse prior (using KernelMatrix inv).""" return self.get_phi(params).inv() @signal_base.cache_call(["delay_params"]) def get_delay(self, params): """Return the weighted-mean DM correction that applies for each residual. (Will be the same across each DM bin, before measurement-frequency weighting.)""" dm_delay = np.zeros(self._ntoas, "d") avg_dm = self.get_mean_dm(params) for dmx, which in zip(self._dmx, self._dmwhich): dm_delay[which] = avg_dm[which] - (self._dmpar + dmx) return dm_delay / (2.41e-4 * self._freqs**2) @signal_base.cache_call(["delay_params"]) def get_dm(self, params): """Return DMJUMP-adjusted DM measurements.""" return (sum( (params[jump.name] if jump.name in params else jump.value) * mask for jump, mask in zip(self._dmjumps, self._dmjump_masks)) + self._dm) @signal_base.cache_call(["delay_params"]) def get_dme(self, params): """Return EFAC- and EQUAD-weighted DM errors.""" return (sum( (params[efac.name] if efac.name in params else efac.value) * mask for efac, mask in zip(self._dmefacs, self._dmefac_masks))**2 * self._dmerr**2 + (10**sum( (params[equad.name] if equad.name in params else equad.value) * mask for equad, mask in zip(self._log10_dmequads, self. _log10_dmequad_masks)))**2)**0.5 @signal_base.cache_call(["delay_params"]) def get_mean_dm(self, params): """Get weighted DMX estimates (distributed to TOAs).""" mean_dm = np.zeros(self._ntoas, "d") # DMEFAC- and DMJUMP-adjusted dm, dme = self.get_dm(params), self.get_dme(params) for which in self._dmwhich: mean_dm[which] = np.sum(dm[which] / dme[which]**2) / np.sum( 1.0 / dme[which]**2) return mean_dm @signal_base.cache_call(["delay_params"]) def get_mean_dme(self, params): """Get weighted DMX uncertainties (distributed to TOAs). Note that get_phi computes these variances directly.""" mean_dme = np.zeros(self._ntoas, "d") # DMEFAC- and DMJUMP-adjusted dme = self.get_dme(params) for which in self._dmwhich: mean_dme[which] = np.sqrt(1.0 / np.sum(1.0 / dme[which]**2)) return mean_dme @signal_base.cache_call(["delay_params"]) def get_logsignalprior(self, params): """Get an additional likelihood/prior term to cover terms that would not affect optimization, were they not dependent on DMEFAC, DMEQUAD, and DMJUMP.""" dm, dme = self.get_dm(params), self.get_dme(params) mean_dm, mean_dme = self.get_mean_dm(params), self.get_mean_dme( params) # now this is a bit wasteful, because it makes copies of the mean DMX and DMXERR # and only uses the first value, but it shouldn't cost us too much expterm = -0.5 * np.sum(dm**2 / dme**2) expterm += 0.5 * sum(mean_dm[which][0]**2 / mean_dme[which][0]**2 for which in self._dmwhich) # sum_i [-0.5 * log(dmerr**2)] = -sum_i log dmerr; same for mean_dmerr logterm = -np.sum(np.log(dme)) + sum( np.log(mean_dme[which][0]) for which in self._dmwhich) return expterm + logterm # these are for debugging, but should not enter the likelihood computation def get_delta_dm(self, params, use_mean_dm=False): # DM - DMX delta_dm = np.zeros(self._ntoas, "d") if use_mean_dm: dm = self.get_mean_dm(params) else: dm = self.get_dm(params) # DMJUMP-adjusted for dmx, which in zip(self._dmx, self._dmwhich): delta_dm[which] = dm[which] - (self._dmpar + dmx) return delta_dm def get_dm_chi2(self, params, use_mean_dm=False): # 'DM' chi-sqaured delta_dm = self.get_delta_dm(params, use_mean_dm=use_mean_dm) if use_mean_dm: dme = self.get_mean_dme(params) chi2 = 0.0 for idmx, which in enumerate(self._dmwhich): chi2 += (delta_dm[which][0] / dme[which][0])**2 else: dme = self.get_dme(params) # DMEFAC- and DMEQUAD-adjusted chi2 = np.sum((delta_dm / dme)**2) return chi2 return WidebandTimingModel
sign_param = parameter.Uniform(-1.0, 1.0) elif sign == 'positive': sign_param = 1.0 else: sign_param = -1.0 wf = chrom_exp_decay(log10_Amp=log10_Amp_dmexp, t0=t0_dmexp, log10_tau=log10_tau_dmexp, sign_param=sign_param, idx=idx) dmexp = deterministic_signals.Deterministic(wf, name=name) return dmexp index = parameter.Uniform(0, 5) ppta_dip = dm_exponential_dip(57450, 57560, idx=index, sign='negative', name='exp2') kwargs = copy.deepcopy(model_kwargs['0']) kwargs.update({ 'red_var': True, 'psd': 'powerlaw', 'white_vary': False, 'noisedict': noise })
def compute_like(self, npsrs=1, inc_corr=False, inc_kernel=False): # get parameters from PAL2 style noise files params = get_noise_from_pal2(datadir + "/B1855+09_noise.txt") params2 = get_noise_from_pal2(datadir + "/J1909-3744_noise.txt") params.update(params2) psrs = self.psrs if npsrs == 2 else [self.psrs[0]] if inc_corr: params.update({"GW_gamma": 4.33, "GW_log10_A": -15.0}) # find the maximum time span to set GW frequency sampling tmin = [p.toas.min() for p in psrs] tmax = [p.toas.max() for p in psrs] Tspan = np.max(tmax) - np.min(tmin) # setup basic model efac = parameter.Constant() equad = parameter.Constant() ecorr = parameter.Constant() log10_A = parameter.Constant() gamma = parameter.Constant() selection = Selection(selections.by_backend) ef = white_signals.MeasurementNoise(efac=efac, selection=selection) eq = white_signals.EquadNoise(log10_equad=equad, selection=selection) ec = white_signals.EcorrKernelNoise(log10_ecorr=ecorr, selection=selection) pl = utils.powerlaw(log10_A=log10_A, gamma=gamma) rn = gp_signals.FourierBasisGP(pl) orf = utils.hd_orf() crn = gp_signals.FourierBasisCommonGP(pl, orf, components=20, name="GW", Tspan=Tspan) tm = gp_signals.TimingModel() log10_sigma = parameter.Uniform(-10, -5) log10_lam = parameter.Uniform(np.log10(86400), np.log10(1500 * 86400)) basis = create_quant_matrix(dt=7 * 86400) prior = se_kernel(log10_sigma=log10_sigma, log10_lam=log10_lam) se = gp_signals.BasisGP(prior, basis, name="se") # set up kernel stuff if isinstance(inc_kernel, bool): inc_kernel = [inc_kernel] * npsrs if inc_corr: s = ef + eq + ec + rn + crn + tm else: s = ef + eq + ec + rn + tm models = [] for ik, psr in zip(inc_kernel, psrs): snew = s + se if ik else s models.append(snew(psr)) pta = signal_base.PTA(models) # set parameters pta.set_default_params(params) # SE kernel parameters log10_sigmas, log10_lams = [-7.0, -6.5], [7.0, 6.5] params.update( { "B1855+09_se_log10_lam": log10_lams[0], "B1855+09_se_log10_sigma": log10_sigmas[0], "J1909-3744_se_log10_lam": log10_lams[1], "J1909-3744_se_log10_sigma": log10_sigmas[1], } ) # get parameters efacs, equads, ecorrs, log10_A, gamma = [], [], [], [], [] lsig, llam = [], [] for pname in [p.name for p in psrs]: efacs.append([params[key] for key in sorted(params.keys()) if "efac" in key and pname in key]) equads.append([params[key] for key in sorted(params.keys()) if "equad" in key and pname in key]) ecorrs.append([params[key] for key in sorted(params.keys()) if "ecorr" in key and pname in key]) log10_A.append(params["{}_red_noise_log10_A".format(pname)]) gamma.append(params["{}_red_noise_gamma".format(pname)]) lsig.append(params["{}_se_log10_sigma".format(pname)]) llam.append(params["{}_se_log10_lam".format(pname)]) GW_gamma = 4.33 GW_log10_A = -15.0 # correct value tflags = [sorted(list(np.unique(p.backend_flags))) for p in psrs] cfs, logdets, phis, Ts = [], [], [], [] for ii, (ik, psr, flags) in enumerate(zip(inc_kernel, psrs, tflags)): nvec0 = np.zeros_like(psr.toas) for ct, flag in enumerate(flags): ind = psr.backend_flags == flag nvec0[ind] = efacs[ii][ct] ** 2 * psr.toaerrs[ind] ** 2 nvec0[ind] += 10 ** (2 * equads[ii][ct]) * np.ones(np.sum(ind)) # get the basis bflags = psr.backend_flags Umats = [] for flag in np.unique(bflags): mask = bflags == flag Umats.append(utils.create_quantization_matrix(psr.toas[mask])[0]) nepoch = sum(U.shape[1] for U in Umats) U = np.zeros((len(psr.toas), nepoch)) jvec = np.zeros(nepoch) netot = 0 for ct, flag in enumerate(np.unique(bflags)): mask = bflags == flag nn = Umats[ct].shape[1] U[mask, netot : nn + netot] = Umats[ct] jvec[netot : nn + netot] = 10 ** (2 * ecorrs[ii][ct]) netot += nn # get covariance matrix cov = np.diag(nvec0) + np.dot(U * jvec[None, :], U.T) cf = sl.cho_factor(cov) logdet = np.sum(2 * np.log(np.diag(cf[0]))) cfs.append(cf) logdets.append(logdet) F, f2 = utils.createfourierdesignmatrix_red(psr.toas, nmodes=20, Tspan=Tspan) Mmat = psr.Mmat.copy() norm = np.sqrt(np.sum(Mmat ** 2, axis=0)) Mmat /= norm U2, avetoas = create_quant_matrix(psr.toas, dt=7 * 86400) if ik: T = np.hstack((F, Mmat, U2)) else: T = np.hstack((F, Mmat)) Ts.append(T) phi = utils.powerlaw(f2, log10_A=log10_A[ii], gamma=gamma[ii]) if inc_corr: phigw = utils.powerlaw(f2, log10_A=GW_log10_A, gamma=GW_gamma) else: phigw = np.zeros(40) K = se_kernel(avetoas, log10_sigma=log10_sigmas[ii], log10_lam=log10_lams[ii]) k = np.diag(np.concatenate((phi + phigw, np.ones(Mmat.shape[1]) * 1e40))) if ik: k = sl.block_diag(k, K) phis.append(k) # manually compute loglike loglike = 0 TNrs, TNTs = [], [] for ct, psr in enumerate(psrs): TNrs.append(np.dot(Ts[ct].T, sl.cho_solve(cfs[ct], psr.residuals))) TNTs.append(np.dot(Ts[ct].T, sl.cho_solve(cfs[ct], Ts[ct]))) loglike += -0.5 * (np.dot(psr.residuals, sl.cho_solve(cfs[ct], psr.residuals)) + logdets[ct]) TNr = np.concatenate(TNrs) phi = sl.block_diag(*phis) if inc_corr: hd = utils.hd_orf(psrs[0].pos, psrs[1].pos) phi[len(phis[0]) : len(phis[0]) + 40, :40] = np.diag(phigw * hd) phi[:40, len(phis[0]) : len(phis[0]) + 40] = np.diag(phigw * hd) cf = sl.cho_factor(phi) phiinv = sl.cho_solve(cf, np.eye(phi.shape[0])) logdetphi = np.sum(2 * np.log(np.diag(cf[0]))) Sigma = sl.block_diag(*TNTs) + phiinv cf = sl.cho_factor(Sigma) expval = sl.cho_solve(cf, TNr) logdetsigma = np.sum(2 * np.log(np.diag(cf[0]))) loglike -= 0.5 * (logdetphi + logdetsigma) loglike += 0.5 * np.dot(TNr, expval) method = ["partition", "sparse", "cliques"] for mth in method: eloglike = pta.get_lnlikelihood(params, phiinv_method=mth) msg = "Incorrect like for npsr={}, phiinv={}".format(npsrs, mth) assert np.allclose(eloglike, loglike), msg
with open(args.model_kwargs_path, 'r') as fin: model_kwargs = json.load(fin) # Add to exponential dips for J1713+0747 #Model, kernel, extra DMGP, Chrom Kernel, Chrom Quad, Index, GWB model_labels = [ ['A', 'periodic', True, True, 'sq_exp', False, 4, False], ['B', 'periodic', True, True, 'sq_exp', False, 4, True], ] ptas = {} all_kwargs = {} # Periodic GP kernel for DM log10_sigma = parameter.Uniform(-10, -4.8) log10_ell = parameter.Uniform(1, 2.4) log10_p = parameter.Uniform(-2, -1) log10_gam_p = parameter.Uniform(-2, 2) dm_basis = gpk.linear_interp_basis_dm(dt=14 * 86400) dm_prior = gpk.periodic_kernel(log10_sigma=log10_sigma, log10_ell=log10_ell, log10_gam_p=log10_gam_p, log10_p=log10_p) dmgp = gp_signals.BasisGP(dm_prior, dm_basis, name='dm_gp1') # Periodic GP kernel for DM log10_sigma2 = parameter.Uniform(-4.8, -3) log10_ell2 = parameter.Uniform(2.4, 5)