def test_ecorr_backend(self): """Test that ecorr-backend signal returns correct values.""" # set up signal parameter ecorr = parameter.Uniform(-10, -5) selection = Selection(selections.by_backend) ec = gp_signals.EcorrBasisModel(log10_ecorr=ecorr, selection=selection) ecm = ec(self.psr) ecc = gp_signals.EcorrBasisModel(log10_ecorr=ecorr, selection=selection, coefficients=True) eccm = ecc(self.psr) # parameters ecorrs = [-6.1, -6.2, -6.3, -6.4] params = { "B1855+09_basis_ecorr_430_ASP_log10_ecorr": ecorrs[0], "B1855+09_basis_ecorr_430_PUPPI_log10_ecorr": ecorrs[1], "B1855+09_basis_ecorr_L-wide_ASP_log10_ecorr": ecorrs[2], "B1855+09_basis_ecorr_L-wide_PUPPI_log10_ecorr": ecorrs[3], } fmat = ecm.get_basis(params) cf = 1e-6 * np.random.randn(fmat.shape[1]) d1 = np.dot(fmat, cf) for key in ecm._keys: parname = "B1855+09_basis_ecorr_" + key + "_coefficients" params[parname] = cf[ecm._slices[key]] d2 = eccm.get_delay(params) msg = "Implicit and explicit ecorr-basis delays are different." assert np.allclose(d1, d2), msg
def test_efac_backend(self): """Test that backend-efac signal returns correct covariance.""" # set up signal and parameters efac = parameter.Uniform(0.1, 5) selection = Selection(selections.by_backend) ef = white_signals.MeasurementNoise(efac=efac, selection=selection) efm = ef(self.psr) # parameters efacs = [1.3, 1.4, 1.5, 1.6] params = { "B1855+09_430_ASP_efac": efacs[0], "B1855+09_430_PUPPI_efac": efacs[1], "B1855+09_L-wide_ASP_efac": efacs[2], "B1855+09_L-wide_PUPPI_efac": efacs[3], } # correct value flags = ["430_ASP", "430_PUPPI", "L-wide_ASP", "L-wide_PUPPI"] nvec0 = np.zeros_like(self.psr.toas) for ct, flag in enumerate(np.unique(flags)): ind = flag == self.psr.backend_flags nvec0[ind] = efacs[ct]**2 * self.psr.toaerrs[ind]**2 # test msg = "EFAC covariance incorrect." assert np.all(efm.get_ndiag(params) == nvec0), msg
def FourierBasisGP(spectrum, coefficients=False, combine=True, components=20, selection=Selection(selections.no_selection), Tspan=None, modes=None, name='red_noise'): """Convenience function to return a BasisGP class with a fourier basis.""" basis = utils.createfourierdesignmatrix_red(nmodes=components, Tspan=Tspan, modes=modes) BaseClass = BasisGP(spectrum, basis, coefficients=coefficients, combine=combine, selection=selection, name=name) class FourierBasisGP(BaseClass): signal_type = 'basis' signal_name = 'red noise' signal_id = name return FourierBasisGP
def test_compare_ecorr_likelihood(self): """Compare basis and kernel ecorr methods.""" selection = Selection(selections.nanograv_backends) ef = white_signals.MeasurementNoise() ec = white_signals.EcorrKernelNoise(selection=selection) ec2 = gp_signals.EcorrBasisModel(selection=selection) tm = gp_signals.TimingModel() m = ef + ec + tm m2 = ef + ec2 + tm pta1 = signal_base.PTA([m(p) for p in self.psrs]) pta2 = signal_base.PTA([m2(p) for p in self.psrs]) params = parameter.sample(pta1.params) l1 = pta1.get_lnlikelihood(params) # need to translate some names for EcorrBasis basis_params = {} for parname, parval in params.items(): if "log10_ecorr" in parname: toks = parname.split("_") basisname = toks[0] + "_basis_ecorr_" + "_".join(toks[1:]) basis_params[basisname] = parval params.update(basis_params) l2 = pta2.get_lnlikelihood(params) msg = "Likelihood mismatch between ECORR methods" assert np.allclose(l1, l2), msg
def test_equad_backend(self): """Test that backend-equad signal returns correct covariance.""" # set up signal and parameters equad = parameter.Uniform(-10, -5) selection = Selection(selections.by_backend) eq = white_signals.EquadNoise(log10_equad=equad, selection=selection) eqm = eq(self.psr) # parameters equads = [-6.1, -6.2, -6.3, -6.4] params = { "B1855+09_430_ASP_log10_equad": equads[0], "B1855+09_430_PUPPI_log10_equad": equads[1], "B1855+09_L-wide_ASP_log10_equad": equads[2], "B1855+09_L-wide_PUPPI_log10_equad": equads[3], } # correct value flags = ["430_ASP", "430_PUPPI", "L-wide_ASP", "L-wide_PUPPI"] nvec0 = np.zeros_like(self.psr.toas) for ct, flag in enumerate(np.unique(flags)): ind = flag == self.psr.backend_flags nvec0[ind] = 10**(2 * equads[ct]) * np.ones(np.sum(ind)) # test msg = "EQUAD covariance incorrect." assert np.all(eqm.get_ndiag(params) == nvec0), msg
def test_kernel_backend(self): # set up signal parameter selection = Selection(selections.by_backend) log10_sigma = parameter.Uniform(-10, -5) log10_lam = parameter.Uniform(np.log10(86400), np.log10(1500 * 86400)) basis = create_quant_matrix(dt=7 * 86400) prior = se_kernel(log10_sigma=log10_sigma, log10_lam=log10_lam) se = gp_signals.BasisGP(prior, basis, selection=selection, name="se") sem = se(self.psr) # parameters log10_sigmas = [-7, -6, -6.4, -8.5] log10_lams = [8.3, 7.4, 6.8, 5.6] params = { "B1855+09_se_430_ASP_log10_lam": log10_lams[0], "B1855+09_se_430_ASP_log10_sigma": log10_sigmas[0], "B1855+09_se_430_PUPPI_log10_lam": log10_lams[1], "B1855+09_se_430_PUPPI_log10_sigma": log10_sigmas[1], "B1855+09_se_L-wide_ASP_log10_lam": log10_lams[2], "B1855+09_se_L-wide_ASP_log10_sigma": log10_sigmas[2], "B1855+09_se_L-wide_PUPPI_log10_lam": log10_lams[3], "B1855+09_se_L-wide_PUPPI_log10_sigma": log10_sigmas[3], } # get the basis bflags = self.psr.backend_flags Fmats, fs, phis = [], [], [] for ct, flag in enumerate(np.unique(bflags)): mask = bflags == flag U, avetoas = create_quant_matrix(self.psr.toas[mask], dt=7 * 86400) Fmats.append(U) fs.append(avetoas) phis.append( se_kernel(avetoas, log10_sigma=log10_sigmas[ct], log10_lam=log10_lams[ct])) nf = sum(F.shape[1] for F in Fmats) U = np.zeros((len(self.psr.toas), nf)) K = sl.block_diag(*phis) Kinv = np.linalg.inv(K) nftot = 0 for ct, flag in enumerate(np.unique(bflags)): mask = bflags == flag nn = Fmats[ct].shape[1] U[mask, nftot:nn + nftot] = Fmats[ct] nftot += nn msg = "Kernel basis incorrect for backend signal." assert np.allclose(U, sem.get_basis(params)), msg # spectrum test msg = "Kernel incorrect for backend signal." assert np.allclose(sem.get_phi(params), K), msg # inverse spectrum test msg = "Kernel inverse incorrect for backend signal." assert np.allclose(sem.get_phiinv(params), Kinv), msg
def test_fourier_red_noise_backend(self): """Test that red noise-backend signal returns correct values.""" # set up signal parameter pl = utils.powerlaw(log10_A=parameter.Uniform(-18, -12), gamma=parameter.Uniform(1, 7)) selection = Selection(selections.by_backend) rn = gp_signals.FourierBasisGP(spectrum=pl, components=30, selection=selection) rnm = rn(self.psr) # parameters log10_As = [-14, -14.4, -15, -14.8] gammas = [2.3, 4.4, 1.8, 5.6] params = { "B1855+09_red_noise_430_ASP_gamma": gammas[0], "B1855+09_red_noise_430_PUPPI_gamma": gammas[1], "B1855+09_red_noise_L-wide_ASP_gamma": gammas[2], "B1855+09_red_noise_L-wide_PUPPI_gamma": gammas[3], "B1855+09_red_noise_430_ASP_log10_A": log10_As[0], "B1855+09_red_noise_430_PUPPI_log10_A": log10_As[1], "B1855+09_red_noise_L-wide_ASP_log10_A": log10_As[2], "B1855+09_red_noise_L-wide_PUPPI_log10_A": log10_As[3], } # get the basis bflags = self.psr.backend_flags Fmats, fs, phis = [], [], [] for ct, flag in enumerate(np.unique(bflags)): mask = bflags == flag F, f = utils.createfourierdesignmatrix_red(self.psr.toas[mask], 30) Fmats.append(F) fs.append(f) phis.append(utils.powerlaw(f, log10_As[ct], gammas[ct])) nf = sum(F.shape[1] for F in Fmats) F = np.zeros((len(self.psr.toas), nf)) phi = np.hstack([p for p in phis]) nftot = 0 for ct, flag in enumerate(np.unique(bflags)): mask = bflags == flag nn = Fmats[ct].shape[1] F[mask, nftot:nn + nftot] = Fmats[ct] nftot += nn msg = "F matrix incorrect for GP Fourier backend signal." assert np.allclose(F, rnm.get_basis(params)), msg # spectrum test msg = "Spectrum incorrect for GP Fourier backend signal." assert np.all(rnm.get_phi(params) == phi), msg # inverse spectrum test msg = "Spectrum inverse incorrect for GP Fourier backend signal." assert np.all(rnm.get_phiinv(params) == 1 / phi), msg # test shape msg = "F matrix shape incorrect" assert rnm.get_basis(params).shape == F.shape, msg
def MeasurementNoise(efac=parameter.Uniform(0.5, 1.5), selection=Selection(selections.no_selection), name=""): """Class factory for EFAC type measurement noise.""" varianceFunction = efac_ndiag(efac=efac) BaseClass = WhiteNoise(varianceFunction, selection=selection, name=name) class MeasurementNoise(BaseClass): signal_name = "efac" signal_id = "efac_" + name if name else "efac" return MeasurementNoise
def EquadNoise(log10_equad=parameter.Uniform(-10, -5), selection=Selection(selections.no_selection), name=""): """Class factory for EQUAD type measurement noise.""" varianceFunction = equad_ndiag(log10_equad=log10_equad) BaseClass = WhiteNoise(varianceFunction, selection=selection, name=name) class EquadNoise(BaseClass): signal_name = "equad" signal_id = "equad_" + name if name else "equad" return EquadNoise
def TNEquadNoise(log10_tnequad=parameter.Uniform(-10, -5), selection=Selection(selections.no_selection), name=""): """Class factory for TNEQUAD type measurement noise (legacy, not multiplied by EFAC).""" varianceFunction = tnequad_ndiag(log10_tnequad=log10_tnequad) BaseClass = WhiteNoise(varianceFunction, selection=selection, name=name) class TNEquadNoise(BaseClass): signal_name = "tnequad" signal_id = "tnequad_" + name if name else "tnequad" return TNEquadNoise
def test_ecorr_backend(self): """Test that ecorr-backend signal returns correct values.""" # set up signal parameter ecorr = parameter.Uniform(-10, -5) selection = Selection(selections.by_backend) ec = gp_signals.EcorrBasisModel(log10_ecorr=ecorr, selection=selection) ecm = ec(self.psr) # parameters ecorrs = [-6.1, -6.2, -6.3, -6.4] params = { "B1855+09_basis_ecorr_430_ASP_log10_ecorr": ecorrs[0], "B1855+09_basis_ecorr_430_PUPPI_log10_ecorr": ecorrs[1], "B1855+09_basis_ecorr_L-wide_ASP_log10_ecorr": ecorrs[2], "B1855+09_basis_ecorr_L-wide_PUPPI_log10_ecorr": ecorrs[3], } # get the basis bflags = self.psr.backend_flags Umats = [] for flag in np.unique(bflags): mask = bflags == flag Umats.append( utils.create_quantization_matrix(self.psr.toas[mask])[0]) nepoch = sum(U.shape[1] for U in Umats) U = np.zeros((len(self.psr.toas), nepoch)) jvec = np.zeros(nepoch) netot = 0 for ct, flag in enumerate(np.unique(bflags)): mask = bflags == flag nn = Umats[ct].shape[1] U[mask, netot:nn + netot] = Umats[ct] jvec[netot:nn + netot] = 10**(2 * ecorrs[ct]) netot += nn # basis matrix test msg = "U matrix incorrect for Basis Ecorr-backend signal." assert np.allclose(U, ecm.get_basis(params)), msg # Jvec test msg = "Prior vector incorrect for Basis Ecorr backend signal." assert np.all(ecm.get_phi(params) == jvec), msg # inverse Jvec test msg = "Prior vector inverse incorrect for Basis Ecorr backend signal." assert np.all(ecm.get_phiinv(params) == 1 / jvec), msg # test shape msg = "U matrix shape incorrect" assert ecm.get_basis(params).shape == U.shape, msg
def test_delay_backend(self): """Same as :meth:`TestDeterministicSignals.test_delay`, but instantiates the Signal with :func:`enterprise.signals.selections.by_backend`, which creates separated named parameters for 430_ASP, 430_PUPPI, L-wide_ASP, L-wide_PUPPI. The parameters are automatically accounted for in `get_delay()`, but they need to be used explicitly when calling the function directly. The tests therefore reconstructs the delay vector by building selection masks from :meth:`enterprise.Pulsar.backend_flags`.""" # set up signal and parameters log10_Ad = parameter.Uniform(-10, -5) log10_fd = parameter.Uniform(-9, -7) waveform = sine_wave(log10_A=log10_Ad, log10_f=log10_fd) selection = Selection(selections.by_backend) dt = deterministic_signals.Deterministic(waveform, selection=selection) m = dt(self.psr) # parameters lAs = [-7.6, -7.1, -6, -6.4] lfs = [-7.6, -8.0, -9, -8.4] params = { "B1855+09_430_ASP_log10_A": lAs[0], "B1855+09_430_PUPPI_log10_A": lAs[1], "B1855+09_L-wide_ASP_log10_A": lAs[2], "B1855+09_L-wide_PUPPI_log10_A": lAs[3], "B1855+09_430_ASP_log10_f": lfs[0], "B1855+09_430_PUPPI_log10_f": lfs[1], "B1855+09_L-wide_ASP_log10_f": lfs[2], "B1855+09_L-wide_PUPPI_log10_f": lfs[3], } # correct value flags = ["430_ASP", "430_PUPPI", "L-wide_ASP", "L-wide_PUPPI"] delay = np.zeros_like(self.psr.toas) for ct, flag in enumerate(np.unique(flags)): ind = flag == self.psr.backend_flags delay[ind] = sine_wave(self.psr.toas[ind], log10_A=lAs[ct], log10_f=lfs[ct]) # test msg = "Delay incorrect." assert np.all(m.get_delay(params) == delay), msg
def MeasurementNoise( efac=parameter.Uniform(0.5, 1.5), log10_t2equad=None, selection=Selection(selections.no_selection), name="", ): """Class factory for EFAC+EQUAD measurement noise (with tempo/tempo2/pint parameter convention, variance = efac^2 (toaerr^2 + t2equad^2)). Leave out log10_t2equad to use EFAC noise only.""" varianceFunction = (efac_ndiag(efac=efac) if log10_t2equad is None else combined_ndiag(efac=efac, log10_t2equad=log10_t2equad)) BaseClass = WhiteNoise(varianceFunction, selection=selection, name=name) class MeasurementNoise(BaseClass): signal_name = "measurement_noise" signal_id = "measurement_noise_" + name if name else "measurement_noise" return MeasurementNoise
def EcorrBasisModel( log10_ecorr=parameter.Uniform(-10, -5), coefficients=False, selection=Selection(selections.no_selection), name="basis_ecorr", ): """Convenience function to return a BasisGP class with a quantized ECORR basis.""" basis = utils.create_quantization_matrix() prior = ecorr_basis_prior(log10_ecorr=log10_ecorr) BaseClass = BasisGP(prior, basis, coefficients=coefficients, selection=selection, name=name) class EcorrBasisModel(BaseClass): signal_type = "basis" signal_name = "basis ecorr" signal_id = name return EcorrBasisModel
def Deterministic(waveform, selection=Selection(selections.no_selection), name=''): """Class factory for generic deterministic signals.""" class Deterministic(signal_base.Signal): signal_type = 'deterministic' signal_name = name signal_id = name def __init__(self, psr): super(Deterministic, self).__init__(psr) self.name = self.psrname + '_' + self.signal_id self._do_selection(psr, waveform, selection) def _do_selection(self, psr, waveform, selection): sel = selection(psr) self._keys = list(sorted(sel.masks.keys())) self._masks = [sel.masks[key] for key in self._keys] self._delay = np.zeros(len(psr.toas)) self._wf, self._params = {}, {} for key, mask in zip(self._keys, self._masks): pnames = [psr.name, name, key] pname = '_'.join([n for n in pnames if n]) self._wf[key] = waveform(pname, psr=psr) params = self._wf[key]._params.values() for param in params: self._params[param.name] = param @property def delay_params(self): """Get any varying ndiag parameters.""" return [pp.name for pp in self.params] @signal_base.cache_call('delay_params') def get_delay(self, params): """Return signal delay.""" for key, mask in zip(self._keys, self._masks): self._delay[mask] = self._wf[key](params=params, mask=mask) return self._delay return Deterministic
def WhiteNoise(varianceFunction, selection=Selection(selections.no_selection), name=''): """ Class factory for generic white noise signals.""" class WhiteNoise(signal_base.Signal): signal_type = 'white noise' signal_name = name signal_id = name def __init__(self, psr): super(WhiteNoise, self).__init__(psr) self.name = self.psrname + '_' + self.signal_id self._do_selection(psr, varianceFunction, selection) def _do_selection(self, psr, vfn, selection): sel = selection(psr) self._keys = list(sorted(sel.masks.keys())) self._masks = [sel.masks[key] for key in self._keys] self._ndiag, self._params = {}, {} for key, mask in zip(self._keys, self._masks): pnames = [psr.name, name, key] pname = '_'.join([n for n in pnames if n]) self._ndiag[key] = vfn(pname, psr=psr) for param in list(self._ndiag[key]._params.values()): self._params[param.name] = param @property def ndiag_params(self): """Get any varying ndiag parameters.""" return [pp.name for pp in self.params] @signal_base.cache_call('ndiag_params') def get_ndiag(self, params): ret = 0 for key, mask in zip(self._keys, self._masks): ret += self._ndiag[key](params=params) * mask return signal_base.ndarray_alt(ret) return WhiteNoise
def test_delay_backend(self): """Test deterministic signal with selection.""" # set up signal and parameters log10_Ad = parameter.Uniform(-10, -5) log10_fd = parameter.Uniform(-9, -7) waveform = sine_wave(log10_A=log10_Ad, log10_f=log10_fd) selection = Selection(selections.by_backend) dt = deterministic_signals.Deterministic(waveform, selection=selection) m = dt(self.psr) # parameters lAs = [-7.6, -7.1, -6, -6.4] lfs = [-7.6, -8.0, -9, -8.4] params = { "B1855+09_430_ASP_log10_A": lAs[0], "B1855+09_430_PUPPI_log10_A": lAs[1], "B1855+09_L-wide_ASP_log10_A": lAs[2], "B1855+09_L-wide_PUPPI_log10_A": lAs[3], "B1855+09_430_ASP_log10_f": lfs[0], "B1855+09_430_PUPPI_log10_f": lfs[1], "B1855+09_L-wide_ASP_log10_f": lfs[2], "B1855+09_L-wide_PUPPI_log10_f": lfs[3], } # correct value flags = ["430_ASP", "430_PUPPI", "L-wide_ASP", "L-wide_PUPPI"] delay = np.zeros_like(self.psr.toas) for ct, flag in enumerate(np.unique(flags)): ind = flag == self.psr.backend_flags delay[ind] = sine_wave(self.psr.toas[ind], log10_A=lAs[ct], log10_f=lfs[ct]) # test msg = "Delay incorrect." assert np.all(m.get_delay(params) == delay), msg
def test_add_efac_equad_backend(self): """Test that addition of efac-backend and equad-backend signal returns correct covariance. """ selection = Selection(selections.by_backend) efac = parameter.Uniform(0.1, 5) equad = parameter.Uniform(-10, -5) ef = white_signals.MeasurementNoise(efac=efac, selection=selection) eq = white_signals.EquadNoise(log10_equad=equad, selection=selection) s = ef + eq m = s(self.psr) # set parameters efacs = [1.3, 1.4, 1.5, 1.6] equads = [-6.1, -6.2, -6.3, -6.4] params = { "B1855+09_430_ASP_efac": efacs[0], "B1855+09_430_PUPPI_efac": efacs[1], "B1855+09_L-wide_ASP_efac": efacs[2], "B1855+09_L-wide_PUPPI_efac": efacs[3], "B1855+09_430_ASP_log10_equad": equads[0], "B1855+09_430_PUPPI_log10_equad": equads[1], "B1855+09_L-wide_ASP_log10_equad": equads[2], "B1855+09_L-wide_PUPPI_log10_equad": equads[3], } # correct value flags = ["430_ASP", "430_PUPPI", "L-wide_ASP", "L-wide_PUPPI"] nvec0 = np.zeros_like(self.psr.toas) for ct, flag in enumerate(np.unique(flags)): ind = flag == self.psr.backend_flags nvec0[ind] = efacs[ct]**2 * self.psr.toaerrs[ind]**2 nvec0[ind] += 10**(2 * equads[ct]) * np.ones(np.sum(ind)) logdet = np.sum(np.log(nvec0)) # test msg = "EFAC/EQUAD covariance incorrect." assert np.all(m.get_ndiag(params) == nvec0), msg msg = "EFAC/EQUAD logdet incorrect." N = m.get_ndiag(params) assert np.allclose(N.solve(self.psr.residuals, logdet=True)[1], logdet, rtol=1e-10), msg msg = "EFAC/EQUAD D1 solve incorrect." assert np.allclose(N.solve(self.psr.residuals), self.psr.residuals / nvec0, rtol=1e-10), msg msg = "EFAC/EQUAD 1D1 solve incorrect." assert np.allclose( N.solve(self.psr.residuals, left_array=self.psr.residuals), np.dot(self.psr.residuals / nvec0, self.psr.residuals), rtol=1e-10, ), msg msg = "EFAC/EQUAD 2D1 solve incorrect." T = self.psr.Mmat assert np.allclose(N.solve(self.psr.residuals, left_array=T), np.dot(T.T, self.psr.residuals / nvec0), rtol=1e-10), msg msg = "EFAC/EQUAD 2D2 solve incorrect." assert np.allclose(N.solve(T, left_array=T), np.dot(T.T, T / nvec0[:, None]), rtol=1e-10), msg
def EcorrKernelNoise( log10_ecorr=parameter.Uniform(-10, -5), selection=Selection(selections.no_selection), method="sherman-morrison", name="", ): r"""Class factory for ECORR type noise. :param log10_ecorr: ``Parameter`` type for log10 or ecorr parameter. :param selection: ``Selection`` object specifying masks for backends, time segments, etc. :param method: Method for computing noise covariance matrix. Options include `sherman-morrison`, `sparse`, and `block` :return: ``EcorrKernelNoise`` class. ECORR is a noise signal that is used for data with multi-channel TOAs that are nearly simultaneous in time. It is a white noise signal that is uncorrelated epoch to epoch but completely correlated for TOAs in a given observing epoch. For this implementation we use this covariance matrix as part of the white noise covariance matrix :math:`N`. It can be seen from above that this covariance is block diagonal, thus allowing us to exploit special methods to make matrix manipulations easier. In this signal implementation we offer three methods of performing these matrix operations: sherman-morrison Uses the `Sherman-Morrison`_ forumla to compute the matrix inverse and other matrix operations. **Note:** This method can only be used for covariances that can be constructed by the outer product of two vectors, :math:`uv^T`. sparse Uses `Scipy Sparse`_ matrices to construct the block diagonal covariance matrix and perform matrix operations. block Uses a custom scheme that uses the individual blocks from the block diagonal matrix to perform fast matrix inverse and other solve operations. .. note:: The sherman-morrison method is the fastest, followed by the block and then sparse methods, however; the block and sparse methods are more general and should be used if sub-classing this signal for more complicated blocks. .. _Sherman-Morrison: https://en.wikipedia.org/wiki/Sherman-Morrison_formula .. _Scipy Sparse: https://docs.scipy.org/doc/scipy-0.18.1/reference/sparse.html .. # noqa E501 """ if method not in ["sherman-morrison", "block", "sparse"]: msg = "EcorrKernelNoise does not support method: {}".format(method) raise TypeError(msg) class EcorrKernelNoise(signal_base.Signal): signal_type = "white noise" signal_name = "ecorr_" + method signal_id = "_".join(["ecorr", name, method]) if name else "_".join( ["ecorr", method]) def __init__(self, psr): super(EcorrKernelNoise, self).__init__(psr) self.name = self.psrname + "_" + self.signal_id sel = selection(psr) self._params, self._masks = sel("log10_ecorr", log10_ecorr) keys = sorted(self._masks.keys()) masks = [self._masks[key] for key in keys] Umats = [] for key, mask in zip(keys, masks): Umats.append( utils.create_quantization_matrix(psr.toas[mask], nmin=2)[0]) nepoch = sum(U.shape[1] for U in Umats) U = np.zeros((len(psr.toas), nepoch)) self._slices = {} netot = 0 for ct, (key, mask) in enumerate(zip(keys, masks)): nn = Umats[ct].shape[1] U[mask, netot:nn + netot] = Umats[ct] self._slices.update( {key: utils.quant2ind(U[:, netot:nn + netot])}) netot += nn # initialize sparse matrix self._setup(psr) @property def ndiag_params(self): """Get any varying ndiag parameters.""" return [pp.name for pp in self.params] @signal_base.cache_call("ndiag_params") def get_ndiag(self, params): if method == "sherman-morrison": return self._get_ndiag_sherman_morrison(params) elif method == "sparse": return self._get_ndiag_sparse(params) elif method == "block": return self._get_ndiag_block(params) def _setup(self, psr): if method == "sparse": self._setup_sparse(psr) def _setup_sparse(self, psr): Ns = scipy.sparse.csc_matrix((len(psr.toas), len(psr.toas))) for key, slices in self._slices.items(): for slc in slices: if slc.stop - slc.start > 1: Ns[slc, slc] = 1.0 self._Ns = signal_base.csc_matrix_alt(Ns) def _get_ndiag_sparse(self, params): for p in self._params: for slc in self._slices[p]: if slc.stop - slc.start > 1: self._Ns[slc, slc] = 10**(2 * self.get(p, params)) return self._Ns def _get_ndiag_sherman_morrison(self, params): slices, jvec = self._get_jvecs(params) return signal_base.ShermanMorrison(jvec, slices) def _get_ndiag_block(self, params): slices, jvec = self._get_jvecs(params) blocks = [] for jv, slc in zip(jvec, slices): nb = slc.stop - slc.start blocks.append(np.ones((nb, nb)) * jv) return signal_base.BlockMatrix(blocks, slices) def _get_jvecs(self, params): slices = sum( [self._slices[key] for key in sorted(self._slices.keys())], []) jvec = np.concatenate([ np.ones(len(self._slices[key])) * 10**(2 * self.get(key, params)) for key in sorted(self._slices.keys()) ]) return (slices, jvec) return EcorrKernelNoise
def WidebandTimingModel( dmefac=parameter.Uniform(pmin=0.1, pmax=10.0), log10_dmequad=parameter.Uniform(pmin=-7.0, pmax=0.0), dmjump=parameter.Uniform(pmin=-0.01, pmax=0.01), dmefac_selection=Selection(selections.no_selection), log10_dmequad_selection=Selection(selections.no_selection), dmjump_selection=Selection(selections.no_selection), dmjump_ref=None, name="wideband_timing_model", ): """Class factory for marginalized linear timing model signals that take wideband TOAs and DMs. Currently assumes DMX for DM model.""" basis = utils.unnormed_tm_basis() # will need to normalize phi otherwise prior = utils.tm_prior() # standard BaseClass = BasisGP(prior, basis, coefficients=False, name=name) class WidebandTimingModel(BaseClass): signal_type = "basis" signal_name = "wideband timing model" signal_id = name basis_combine = False # should never need to be True def __init__(self, psr): super(WidebandTimingModel, self).__init__(psr) self.name = self.psrname + "_" + self.signal_id # make selection for DMEFACs dmefac_select = dmefac_selection(psr) self._dmefac_keys = list(sorted(dmefac_select.masks.keys())) self._dmefac_masks = [dmefac_select.masks[key] for key in self._dmefac_keys] # make selection for DMEQUADs log10_dmequad_select = log10_dmequad_selection(psr) self._log10_dmequad_keys = list(sorted(log10_dmequad_select.masks.keys())) self._log10_dmequad_masks = [log10_dmequad_select.masks[key] for key in self._log10_dmequad_keys] # make selection for DMJUMPs dmjump_select = dmjump_selection(psr) self._dmjump_keys = list(sorted(dmjump_select.masks.keys())) self._dmjump_masks = [dmjump_select.masks[key] for key in self._dmjump_keys] if self._dmjump_keys == [""] and dmjump is not None: raise ValueError("WidebandTimingModel: can only do DMJUMP with more than one selection.") # collect parameters self._params = {} self._dmefacs = [] for key in self._dmefac_keys: pname = "_".join([n for n in [psr.name, key, "dmefac"] if n]) param = dmefac(pname) self._dmefacs.append(param) self._params[param.name] = param self._log10_dmequads = [] for key in self._log10_dmequad_keys: pname = "_".join([n for n in [psr.name, key, "log10_dmequad"] if n]) param = log10_dmequad(pname) self._log10_dmequads.append(param) self._params[param.name] = param self._dmjumps = [] if dmjump is not None: for key in self._dmjump_keys: pname = "_".join([n for n in [psr.name, key, "dmjump"] if n]) if dmjump_ref is not None: if pname == psr.name + "_" + dmjump_ref + "_dmjump": fixed_dmjump = parameter.Constant(val=0.0) param = fixed_dmjump(pname) else: param = dmjump(pname) else: param = dmjump(pname) self._dmjumps.append(param) self._params[param.name] = param # copy psr quantities self._ntoas = len(psr.toas) self._npars = len(psr.fitpars) self._freqs = psr.freqs # collect DMX information (will be used to make phi and delay) self._dmpar = psr.dm self._dm = np.array(psr.flags["pp_dm"], "d") self._dmerr = np.array(psr.flags["pp_dme"], "d") check = np.zeros_like(psr.toas, "i") # assign TOAs to DMX bins self._dmx, self._dmindex, self._dmwhich = [], [], [] for index, key in enumerate(sorted(psr.dmx)): dmx = psr.dmx[key] if not dmx["fit"]: raise ValueError("WidebandTimingModel: all DMX parameters must be estimated.") self._dmx.append(dmx["DMX"]) self._dmindex.append(psr.fitpars.index(key)) self._dmwhich.append((dmx["DMXR1"] <= psr.stoas / 86400) & (psr.stoas / 86400 < dmx["DMXR2"])) check += self._dmwhich[-1] if np.sum(check) != self._ntoas: raise ValueError("WidebandTimingModel: cannot account for all TOAs in DMX intervals.") if "DM" in psr.fitpars: raise ValueError("WidebandTimingModel: DM must not be estimated.") self._ndmx = len(self._dmx) @property def delay_params(self): # cache parameters are all DMEFACS, DMEQUADS, and DMJUMPS return ( [p.name for p in self._dmefacs] + [p.name for p in self._log10_dmequads] + [p.name for p in self._dmjumps] ) @signal_base.cache_call(["delay_params"]) def get_phi(self, params): """Return wideband timing-model prior.""" # get DMEFAC- and DMEQUAD-adjusted DMX errors dme = self.get_dme(params) # initialize the timing-model "infinite" prior phi = KernelMatrix(1e40 * np.ones(self._npars, "d")) # fill the DMX slots with weighted errors for index, which in zip(self._dmindex, self._dmwhich): phi.set(1.0 / np.sum(1.0 / dme[which] ** 2), index) return phi def get_phiinv(self, params): """Return inverse prior (using KernelMatrix inv).""" return self.get_phi(params).inv() @signal_base.cache_call(["delay_params"]) def get_delay(self, params): """Return the weighted-mean DM correction that applies for each residual. (Will be the same across each DM bin, before measurement-frequency weighting.)""" dm_delay = np.zeros(self._ntoas, "d") avg_dm = self.get_mean_dm(params) for dmx, which in zip(self._dmx, self._dmwhich): dm_delay[which] = avg_dm[which] - (self._dmpar + dmx) return dm_delay / (2.41e-4 * self._freqs ** 2) @signal_base.cache_call(["delay_params"]) def get_dm(self, params): """Return DMJUMP-adjusted DM measurements.""" return ( sum( (params[jump.name] if jump.name in params else jump.value) * mask for jump, mask in zip(self._dmjumps, self._dmjump_masks) ) + self._dm ) @signal_base.cache_call(["delay_params"]) def get_dme(self, params): """Return EFAC- and EQUAD-weighted DM errors.""" return ( sum( (params[efac.name] if efac.name in params else efac.value) * mask for efac, mask in zip(self._dmefacs, self._dmefac_masks) ) ** 2 * self._dmerr ** 2 + ( 10 ** sum( (params[equad.name] if equad.name in params else equad.value) * mask for equad, mask in zip(self._log10_dmequads, self._log10_dmequad_masks) ) ) ** 2 ) ** 0.5 @signal_base.cache_call(["delay_params"]) def get_mean_dm(self, params): """Get weighted DMX estimates (distributed to TOAs).""" mean_dm = np.zeros(self._ntoas, "d") # DMEFAC- and DMJUMP-adjusted dm, dme = self.get_dm(params), self.get_dme(params) for which in self._dmwhich: mean_dm[which] = np.sum(dm[which] / dme[which] ** 2) / np.sum(1.0 / dme[which] ** 2) return mean_dm @signal_base.cache_call(["delay_params"]) def get_mean_dme(self, params): """Get weighted DMX uncertainties (distributed to TOAs). Note that get_phi computes these variances directly.""" mean_dme = np.zeros(self._ntoas, "d") # DMEFAC- and DMJUMP-adjusted dme = self.get_dme(params) for which in self._dmwhich: mean_dme[which] = np.sqrt(1.0 / np.sum(1.0 / dme[which] ** 2)) return mean_dme @signal_base.cache_call(["delay_params"]) def get_logsignalprior(self, params): """Get an additional likelihood/prior term to cover terms that would not affect optimization, were they not dependent on DMEFAC, DMEQUAD, and DMJUMP.""" dm, dme = self.get_dm(params), self.get_dme(params) mean_dm, mean_dme = self.get_mean_dm(params), self.get_mean_dme(params) # now this is a bit wasteful, because it makes copies of the mean DMX and DMXERR # and only uses the first value, but it shouldn't cost us too much expterm = -0.5 * np.sum(dm ** 2 / dme ** 2) expterm += 0.5 * sum(mean_dm[which][0] ** 2 / mean_dme[which][0] ** 2 for which in self._dmwhich) # sum_i [-0.5 * log(dmerr**2)] = -sum_i log dmerr; same for mean_dmerr logterm = -np.sum(np.log(dme)) + sum(np.log(mean_dme[which][0]) for which in self._dmwhich) return expterm + logterm # these are for debugging, but should not enter the likelihood computation def get_delta_dm(self, params, use_mean_dm=False): # DM - DMX delta_dm = np.zeros(self._ntoas, "d") if use_mean_dm: dm = self.get_mean_dm(params) else: dm = self.get_dm(params) # DMJUMP-adjusted for dmx, which in zip(self._dmx, self._dmwhich): delta_dm[which] = dm[which] - (self._dmpar + dmx) return delta_dm def get_dm_chi2(self, params, use_mean_dm=False): # 'DM' chi-sqaured delta_dm = self.get_delta_dm(params, use_mean_dm=use_mean_dm) if use_mean_dm: dme = self.get_mean_dme(params) chi2 = 0.0 for idmx, which in enumerate(self._dmwhich): chi2 += (delta_dm[which][0] / dme[which][0]) ** 2 else: dme = self.get_dme(params) # DMEFAC- and DMEQUAD-adjusted chi2 = np.sum((delta_dm / dme) ** 2) return chi2 return WidebandTimingModel
def BasisGP( priorFunction, basisFunction, coefficients=False, combine=True, selection=Selection(selections.no_selection), name="", ): """Class factory for generic GPs with a basis matrix.""" class BasisGP(signal_base.Signal): signal_type = "basis" signal_name = name signal_id = name basis_combine = combine def __init__(self, psr): super(BasisGP, self).__init__(psr) self.name = self.psrname + "_" + self.signal_id self._do_selection(psr, priorFunction, basisFunction, coefficients, selection) def _do_selection(self, psr, priorfn, basisfn, coefficients, selection): sel = selection(psr) self._keys = sorted(sel.masks.keys()) self._masks = [sel.masks[key] for key in self._keys] self._prior, self._bases = {}, {} self._params, self._coefficients = {}, {} for key, mask in zip(self._keys, self._masks): pnames = [psr.name, name, key] pname = "_".join([n for n in pnames if n]) self._prior[key] = priorfn(pname, psr=psr) self._bases[key] = basisfn(pname, psr=psr) for par in itertools.chain(self._prior[key]._params.values(), self._bases[key]._params.values()): self._params[par.name] = par if coefficients: # we can only create GPCoefficients parameters if the basis # can be constructed with default arguments # (and does not change size) self._construct_basis() for key in self._keys: pname = "_".join([n for n in [psr.name, name, key] if n]) chain = itertools.chain(self._prior[key]._params.values(), self._bases[key]._params.values()) priorargs = {par.name: self._params[par.name] for par in chain} logprior = parameter.Function(functools.partial(self._get_coefficient_logprior, key), **priorargs) size = self._slices[key].stop - self._slices[key].start cpar = parameter.GPCoefficients(logprior=logprior, size=size)(pname + "_coefficients") self._coefficients[key] = cpar self._params[cpar.name] = cpar @property def basis_params(self): """Get any varying basis parameters.""" ret = [] for basis in self._bases.values(): ret.extend([pp.name for pp in basis.params]) return ret @signal_base.cache_call("basis_params") def _construct_basis(self, params={}): basis, self._labels = {}, {} for key, mask in zip(self._keys, self._masks): basis[key], self._labels[key] = self._bases[key](params=params, mask=mask) nc = sum(F.shape[1] for F in basis.values()) self._basis = np.zeros((len(self._masks[0]), nc)) # TODO: should this be defined here? it will cache phi self._phi = KernelMatrix(nc) self._slices = {} nctot = 0 for key, mask in zip(self._keys, self._masks): Fmat = basis[key] nn = Fmat.shape[1] self._basis[mask, nctot : nn + nctot] = Fmat self._slices.update({key: slice(nctot, nn + nctot)}) nctot += nn # this class does different things (and gets different method # definitions) if the user wants it to model GP coefficients # (e.g., for a hierarchical likelihood) or if they do not if coefficients: def _get_coefficient_logprior(self, key, c, **params): self._construct_basis(params) phi = self._prior[key](self._labels[key], params=params) if phi.ndim == 1: return -0.5 * np.sum(c * c / phi) - 0.5 * np.sum(np.log(phi)) - 0.5 * len(phi) * np.log(2 * np.pi) # note: (2*pi)^(n/2) is not in signal_base likelihood else: # TO DO: this code could be embedded in KernelMatrix phiinv, logdet = KernelMatrix(phi).inv(logdet=True) return -0.5 * np.dot(c, np.dot(phiinv, c)) - 0.5 * logdet - 0.5 * phi.shape[0] * np.log(2 * np.pi) # MV: could assign this to a data member at initialization @property def delay_params(self): return [pp.name for pp in self.params if "_coefficients" in pp.name] @signal_base.cache_call(["basis_params", "delay_params"]) def get_delay(self, params={}): self._construct_basis(params) c = np.zeros(self._basis.shape[1]) for key, slc in self._slices.items(): p = self._coefficients[key] c[slc] = params[p.name] if p.name in params else p.value return np.dot(self._basis, c) def get_basis(self, params={}): return None def get_phi(self, params): return None def get_phiinv(self, params): return None else: @property def delay_params(self): return [] def get_delay(self, params={}): return 0 def get_basis(self, params={}): self._construct_basis(params) return self._basis def get_phi(self, params): self._construct_basis(params) for key, slc in self._slices.items(): phislc = self._prior[key](self._labels[key], params=params) self._phi = self._phi.set(phislc, slc) return self._phi def get_phiinv(self, params): return self.get_phi(params).inv() return BasisGP
noisefiles = sorted(glob.glob(noisepath + '*.txt')) noisefiles = [ x for x in noisefiles if x.split('/')[-1].split('_')[0] in psrlist ] noise_params = {} for nf in noisefiles: noise_params.update(get_noise_from_pal2(nf)) #Load pulsars from pickle file with open(psr_pickle_file, 'rb') as psrfile: psrs = pickle.load(psrfile) psrfile.close() # find the maximum time span to set GW frequency sampling selection = Selection(selections.by_backend) tmin = [p.toas.min() for p in psrs] tmax = [p.toas.max() for p in psrs] Tspan = np.max(tmax) - np.min(tmin) ##### parameters and priors ##### # white noise parameters ''' efac = parameter.Uniform(0.5,4.0) log10_equad = parameter.Uniform(-10,-5) log10_ecorr = parameter.Uniform(-10,-5) ''' efac = parameter.Constant() log10_equad = parameter.Constant()
def _ecorr_test_ipta(self, method="sparse"): """Test of sparse/sherman-morrison ecorr signal and solve methods.""" selection = Selection(selections.nanograv_backends) efac = parameter.Uniform(0.1, 5) ecorr = parameter.Uniform(-10, -5) ef = white_signals.MeasurementNoise(efac=efac) ec = white_signals.EcorrKernelNoise(log10_ecorr=ecorr, selection=selection, method=method) tm = gp_signals.TimingModel() s = ef + ec + tm m = s(self.ipsr) # set parameters efacs = [1.3] ecorrs = [-6.1, -6.2, -6.3, -6.4, -7.2, -8.4, -7.1, -7.9] params = { "J1713+0747_efac": efacs[0], "J1713+0747_ASP-L_log10_ecorr": ecorrs[0], "J1713+0747_ASP-S_log10_ecorr": ecorrs[1], "J1713+0747_GASP-8_log10_ecorr": ecorrs[2], "J1713+0747_GASP-L_log10_ecorr": ecorrs[3], "J1713+0747_GUPPI-8_log10_ecorr": ecorrs[4], "J1713+0747_GUPPI-L_log10_ecorr": ecorrs[5], "J1713+0747_PUPPI-L_log10_ecorr": ecorrs[6], "J1713+0747_PUPPI-S_log10_ecorr": ecorrs[7], } # get EFAC Nvec nvec0 = efacs[0]**2 * self.ipsr.toaerrs**2 # get the basis flags = [ "ASP-L", "ASP-S", "GASP-8", "GASP-L", "GUPPI-8", "GUPPI-L", "PUPPI-L", "PUPPI-S" ] bflags = self.ipsr.backend_flags Umats = [] for flag in np.unique(bflags): if flag in flags: mask = bflags == flag Umats.append( utils.create_quantization_matrix(self.ipsr.toas[mask], nmin=2)[0]) nepoch = sum(U.shape[1] for U in Umats) U = np.zeros((len(self.ipsr.toas), nepoch)) jvec = np.zeros(nepoch) netot, ct = 0, 0 for flag in np.unique(bflags): if flag in flags: mask = bflags == flag nn = Umats[ct].shape[1] U[mask, netot:nn + netot] = Umats[ct] jvec[netot:nn + netot] = 10**(2 * ecorrs[ct]) netot += nn ct += 1 # get covariance matrix wd = Woodbury(nvec0, U, jvec) # test msg = "EFAC/ECORR {} logdet incorrect.".format(method) N = m.get_ndiag(params) assert np.allclose(N.solve(self.ipsr.residuals, logdet=True)[1], wd.logdet(), rtol=1e-8), msg msg = "EFAC/ECORR {} D1 solve incorrect.".format(method) assert np.allclose(N.solve(self.ipsr.residuals), wd.solve(self.ipsr.residuals), rtol=1e-8), msg msg = "EFAC/ECORR {} 1D1 solve incorrect.".format(method) assert np.allclose( N.solve(self.ipsr.residuals, left_array=self.ipsr.residuals), np.dot(self.ipsr.residuals, wd.solve(self.ipsr.residuals)), rtol=1e-8, ), msg msg = "EFAC/ECORR {} 2D1 solve incorrect.".format(method) T = m.get_basis() assert np.allclose(N.solve(self.ipsr.residuals, left_array=T), np.dot(T.T, wd.solve(self.ipsr.residuals)), rtol=1e-8), msg msg = "EFAC/ECORR {} 2D2 solve incorrect.".format(method) assert np.allclose(N.solve(T, left_array=T), np.dot(T.T, wd.solve(T)), rtol=1e-8), msg
def _ecorr_test(self, method="sparse"): """Test of sparse/sherman-morrison ecorr signal and solve methods.""" selection = Selection(selections.by_backend) efac = parameter.Uniform(0.1, 5) ecorr = parameter.Uniform(-10, -5) ef = white_signals.MeasurementNoise(efac=efac, selection=selection) ec = white_signals.EcorrKernelNoise(log10_ecorr=ecorr, selection=selection, method=method) tm = gp_signals.TimingModel() s = ef + ec + tm m = s(self.psr) # set parameters efacs = [1.3, 1.4, 1.5, 1.6] ecorrs = [-6.1, -6.2, -6.3, -6.4] params = { "B1855+09_430_ASP_efac": efacs[0], "B1855+09_430_PUPPI_efac": efacs[1], "B1855+09_L-wide_ASP_efac": efacs[2], "B1855+09_L-wide_PUPPI_efac": efacs[3], "B1855+09_430_ASP_log10_ecorr": ecorrs[0], "B1855+09_430_PUPPI_log10_ecorr": ecorrs[1], "B1855+09_L-wide_ASP_log10_ecorr": ecorrs[2], "B1855+09_L-wide_PUPPI_log10_ecorr": ecorrs[3], } # get EFAC Nvec flags = ["430_ASP", "430_PUPPI", "L-wide_ASP", "L-wide_PUPPI"] nvec0 = np.zeros_like(self.psr.toas) for ct, flag in enumerate(np.unique(flags)): ind = flag == self.psr.backend_flags nvec0[ind] = efacs[ct]**2 * self.psr.toaerrs[ind]**2 # get the basis bflags = self.psr.backend_flags Umats = [] for flag in np.unique(bflags): mask = bflags == flag Umats.append( utils.create_quantization_matrix(self.psr.toas[mask], nmin=2)[0]) nepoch = sum(U.shape[1] for U in Umats) U = np.zeros((len(self.psr.toas), nepoch)) jvec = np.zeros(nepoch) netot = 0 for ct, flag in enumerate(np.unique(bflags)): mask = bflags == flag nn = Umats[ct].shape[1] U[mask, netot:nn + netot] = Umats[ct] jvec[netot:nn + netot] = 10**(2 * ecorrs[ct]) netot += nn # get covariance matrix wd = Woodbury(nvec0, U, jvec) # test msg = "EFAC/ECORR {} logdet incorrect.".format(method) N = m.get_ndiag(params) assert np.allclose(N.solve(self.psr.residuals, logdet=True)[1], wd.logdet(), rtol=1e-10), msg msg = "EFAC/ECORR {} D1 solve incorrect.".format(method) assert np.allclose(N.solve(self.psr.residuals), wd.solve(self.psr.residuals), rtol=1e-10), msg msg = "EFAC/ECORR {} 1D1 solve incorrect.".format(method) assert np.allclose( N.solve(self.psr.residuals, left_array=self.psr.residuals), np.dot(self.psr.residuals, wd.solve(self.psr.residuals)), rtol=1e-10, ), msg msg = "EFAC/ECORR {} 2D1 solve incorrect.".format(method) T = m.get_basis() assert np.allclose(N.solve(self.psr.residuals, left_array=T), np.dot(T.T, wd.solve(self.psr.residuals)), rtol=1e-10), msg msg = "EFAC/ECORR {} 2D2 solve incorrect.".format(method) assert np.allclose(N.solve(T, left_array=T), np.dot(T.T, wd.solve(T)), rtol=1e-10), msg
def test_pta(self): # get parameters from PAL2 style noise files params = get_noise_from_pal2(datadir + "/B1855+09_noise.txt") params2 = get_noise_from_pal2(datadir + "/J1909-3744_noise.txt") params.update(params2) # setup basic model efac = parameter.Constant() equad = parameter.Constant() ecorr = parameter.Constant() log10_A = parameter.Constant() gamma = parameter.Constant() selection = Selection(selections.by_backend) ms = white_signals.MeasurementNoise(efac=efac, log10_t2equad=equad, selection=selection) ec = white_signals.EcorrKernelNoise(log10_ecorr=ecorr, selection=selection) pl = utils.powerlaw(log10_A=log10_A, gamma=gamma) rn = gp_signals.FourierBasisGP(pl) s = ms + ec + rn pta = s(self.psrs[0]) + s(self.psrs[1]) # set parameters pta.set_default_params(params) # get parameters efacs, equads, ecorrs, log10_A, gamma = [], [], [], [], [] for pname in [p.name for p in self.psrs]: efacs.append([ params[key] for key in sorted(params.keys()) if "efac" in key and pname in key ]) equads.append([ params[key] for key in sorted(params.keys()) if "equad" in key and pname in key ]) ecorrs.append([ params[key] for key in sorted(params.keys()) if "ecorr" in key and pname in key ]) log10_A.append(params["{}_red_noise_log10_A".format(pname)]) gamma.append(params["{}_red_noise_gamma".format(pname)]) # correct value tflags = [sorted(list(np.unique(p.backend_flags))) for p in self.psrs] cfs, logdets, phis = [], [], [] for ii, (psr, flags) in enumerate(zip(self.psrs, tflags)): nvec0 = np.zeros_like(psr.toas) for ct, flag in enumerate(flags): ind = psr.backend_flags == flag nvec0[ind] = efacs[ii][ct]**2 * ( psr.toaerrs[ind]**2 + 10**(2 * equads[ii][ct]) * np.ones(np.sum(ind))) # get the basis bflags = psr.backend_flags Umats = [] for flag in np.unique(bflags): mask = bflags == flag Umats.append( utils.create_quantization_matrix(psr.toas[mask])[0]) nepoch = sum(U.shape[1] for U in Umats) U = np.zeros((len(psr.toas), nepoch)) jvec = np.zeros(nepoch) netot = 0 for ct, flag in enumerate(np.unique(bflags)): mask = bflags == flag nn = Umats[ct].shape[1] U[mask, netot:nn + netot] = Umats[ct] jvec[netot:nn + netot] = 10**(2 * ecorrs[ii][ct]) netot += nn # get covariance matrix cov = np.diag(nvec0) + np.dot(U * jvec[None, :], U.T) cf = sl.cho_factor(cov) logdet = np.sum(2 * np.log(np.diag(cf[0]))) cfs.append(cf) logdets.append(logdet) F, f2 = utils.createfourierdesignmatrix_red(psr.toas, nmodes=20) phi = utils.powerlaw(f2, log10_A=log10_A[ii], gamma=gamma[ii]) phis.append(phi) # tests Ns = pta.get_ndiag(params) pphis = pta.get_phi(params) pphiinvs = pta.get_phiinv(params) Ts = pta.get_basis(params) zipped = zip(logdets, cfs, phis, self.psrs, Ns, pphis, pphiinvs, Ts) for logdet, cf, phi, psr, N, pphi, pphiinv, T in zipped: msg = "EFAC/ECORR logdet incorrect." assert np.allclose(N.solve(psr.residuals, logdet=True)[1], logdet, rtol=1e-10), msg msg = "EFAC/ECORR D1 solve incorrect." assert np.allclose(N.solve(psr.residuals), sl.cho_solve(cf, psr.residuals), rtol=1e-10), msg msg = "EFAC/ECORR 1D1 solve incorrect." assert np.allclose( N.solve(psr.residuals, left_array=psr.residuals), np.dot(psr.residuals, sl.cho_solve(cf, psr.residuals)), rtol=1e-10, ), msg msg = "EFAC/ECORR 2D1 solve incorrect." assert np.allclose(N.solve(psr.residuals, left_array=T), np.dot(T.T, sl.cho_solve(cf, psr.residuals)), rtol=1e-10), msg msg = "EFAC/ECORR 2D2 solve incorrect." assert np.allclose(N.solve(T, left_array=T), np.dot(T.T, sl.cho_solve(cf, T)), rtol=1e-10), msg # spectrum test msg = "Spectrum incorrect for GP Fourier signal." assert np.all(pphi == phi), msg # inverse spectrum test msg = "Spectrum inverse incorrect for GP Fourier signal." assert np.all(pphiinv == 1 / phi), msg
def compute_like(self, npsrs=1, inc_corr=False, inc_kernel=False): # get parameters from PAL2 style noise files params = get_noise_from_pal2(datadir + "/B1855+09_noise.txt") params2 = get_noise_from_pal2(datadir + "/J1909-3744_noise.txt") params.update(params2) psrs = self.psrs if npsrs == 2 else [self.psrs[0]] if inc_corr: params.update({"GW_gamma": 4.33, "GW_log10_A": -15.0}) # find the maximum time span to set GW frequency sampling tmin = [p.toas.min() for p in psrs] tmax = [p.toas.max() for p in psrs] Tspan = np.max(tmax) - np.min(tmin) # setup basic model efac = parameter.Constant() equad = parameter.Constant() ecorr = parameter.Constant() log10_A = parameter.Constant() gamma = parameter.Constant() selection = Selection(selections.by_backend) ef = white_signals.MeasurementNoise(efac=efac, selection=selection) eq = white_signals.EquadNoise(log10_equad=equad, selection=selection) ec = white_signals.EcorrKernelNoise(log10_ecorr=ecorr, selection=selection) pl = utils.powerlaw(log10_A=log10_A, gamma=gamma) rn = gp_signals.FourierBasisGP(pl) orf = utils.hd_orf() crn = gp_signals.FourierBasisCommonGP(pl, orf, components=20, name="GW", Tspan=Tspan) tm = gp_signals.TimingModel() log10_sigma = parameter.Uniform(-10, -5) log10_lam = parameter.Uniform(np.log10(86400), np.log10(1500 * 86400)) basis = create_quant_matrix(dt=7 * 86400) prior = se_kernel(log10_sigma=log10_sigma, log10_lam=log10_lam) se = gp_signals.BasisGP(prior, basis, name="se") # set up kernel stuff if isinstance(inc_kernel, bool): inc_kernel = [inc_kernel] * npsrs if inc_corr: s = ef + eq + ec + rn + crn + tm else: s = ef + eq + ec + rn + tm models = [] for ik, psr in zip(inc_kernel, psrs): snew = s + se if ik else s models.append(snew(psr)) pta = signal_base.PTA(models) # set parameters pta.set_default_params(params) # SE kernel parameters log10_sigmas, log10_lams = [-7.0, -6.5], [7.0, 6.5] params.update({ "B1855+09_se_log10_lam": log10_lams[0], "B1855+09_se_log10_sigma": log10_sigmas[0], "J1909-3744_se_log10_lam": log10_lams[1], "J1909-3744_se_log10_sigma": log10_sigmas[1], }) # get parameters efacs, equads, ecorrs, log10_A, gamma = [], [], [], [], [] lsig, llam = [], [] for pname in [p.name for p in psrs]: efacs.append([ params[key] for key in sorted(params.keys()) if "efac" in key and pname in key ]) equads.append([ params[key] for key in sorted(params.keys()) if "equad" in key and pname in key ]) ecorrs.append([ params[key] for key in sorted(params.keys()) if "ecorr" in key and pname in key ]) log10_A.append(params["{}_red_noise_log10_A".format(pname)]) gamma.append(params["{}_red_noise_gamma".format(pname)]) lsig.append(params["{}_se_log10_sigma".format(pname)]) llam.append(params["{}_se_log10_lam".format(pname)]) GW_gamma = 4.33 GW_log10_A = -15.0 # correct value tflags = [sorted(list(np.unique(p.backend_flags))) for p in psrs] cfs, logdets, phis, Ts = [], [], [], [] for ii, (ik, psr, flags) in enumerate(zip(inc_kernel, psrs, tflags)): nvec0 = np.zeros_like(psr.toas) for ct, flag in enumerate(flags): ind = psr.backend_flags == flag nvec0[ind] = efacs[ii][ct]**2 * psr.toaerrs[ind]**2 nvec0[ind] += 10**(2 * equads[ii][ct]) * np.ones(np.sum(ind)) # get the basis bflags = psr.backend_flags Umats = [] for flag in np.unique(bflags): mask = bflags == flag Umats.append( utils.create_quantization_matrix(psr.toas[mask])[0]) nepoch = sum(U.shape[1] for U in Umats) U = np.zeros((len(psr.toas), nepoch)) jvec = np.zeros(nepoch) netot = 0 for ct, flag in enumerate(np.unique(bflags)): mask = bflags == flag nn = Umats[ct].shape[1] U[mask, netot:nn + netot] = Umats[ct] jvec[netot:nn + netot] = 10**(2 * ecorrs[ii][ct]) netot += nn # get covariance matrix cov = np.diag(nvec0) + np.dot(U * jvec[None, :], U.T) cf = sl.cho_factor(cov) logdet = np.sum(2 * np.log(np.diag(cf[0]))) cfs.append(cf) logdets.append(logdet) F, f2 = utils.createfourierdesignmatrix_red(psr.toas, nmodes=20, Tspan=Tspan) Mmat = psr.Mmat.copy() norm = np.sqrt(np.sum(Mmat**2, axis=0)) Mmat /= norm U2, avetoas = create_quant_matrix(psr.toas, dt=7 * 86400) if ik: T = np.hstack((F, Mmat, U2)) else: T = np.hstack((F, Mmat)) Ts.append(T) phi = utils.powerlaw(f2, log10_A=log10_A[ii], gamma=gamma[ii]) if inc_corr: phigw = utils.powerlaw(f2, log10_A=GW_log10_A, gamma=GW_gamma) else: phigw = np.zeros(40) K = se_kernel(avetoas, log10_sigma=log10_sigmas[ii], log10_lam=log10_lams[ii]) k = np.diag( np.concatenate((phi + phigw, np.ones(Mmat.shape[1]) * 1e40))) if ik: k = sl.block_diag(k, K) phis.append(k) # manually compute loglike loglike = 0 TNrs, TNTs = [], [] for ct, psr in enumerate(psrs): TNrs.append(np.dot(Ts[ct].T, sl.cho_solve(cfs[ct], psr.residuals))) TNTs.append(np.dot(Ts[ct].T, sl.cho_solve(cfs[ct], Ts[ct]))) loglike += -0.5 * ( np.dot(psr.residuals, sl.cho_solve(cfs[ct], psr.residuals)) + logdets[ct]) TNr = np.concatenate(TNrs) phi = sl.block_diag(*phis) if inc_corr: hd = utils.hd_orf(psrs[0].pos, psrs[1].pos) phi[len(phis[0]):len(phis[0]) + 40, :40] = np.diag(phigw * hd) phi[:40, len(phis[0]):len(phis[0]) + 40] = np.diag(phigw * hd) cf = sl.cho_factor(phi) phiinv = sl.cho_solve(cf, np.eye(phi.shape[0])) logdetphi = np.sum(2 * np.log(np.diag(cf[0]))) Sigma = sl.block_diag(*TNTs) + phiinv cf = sl.cho_factor(Sigma) expval = sl.cho_solve(cf, TNr) logdetsigma = np.sum(2 * np.log(np.diag(cf[0]))) loglike -= 0.5 * (logdetphi + logdetsigma) loglike += 0.5 * np.dot(TNr, expval) method = ["partition", "sparse", "cliques"] for mth in method: eloglike = pta.get_lnlikelihood(params, phiinv_method=mth) msg = "Incorrect like for npsr={}, phiinv={}".format(npsrs, mth) assert np.allclose(eloglike, loglike), msg
def test_single_pulsar(self): # get parameters from PAL2 style noise files params = get_noise_from_pal2(datadir + "/B1855+09_noise.txt") # setup basic model efac = parameter.Constant() equad = parameter.Constant() ecorr = parameter.Constant() log10_A = parameter.Constant() gamma = parameter.Constant() selection = Selection(selections.by_backend) ms = white_signals.MeasurementNoise(efac=efac, log10_t2equad=equad, selection=selection) ec = white_signals.EcorrKernelNoise(log10_ecorr=ecorr, selection=selection) pl = utils.powerlaw(log10_A=log10_A, gamma=gamma) rn = gp_signals.FourierBasisGP(pl) s = ms + ec + rn m = s(self.psrs[0]) # set parameters m.set_default_params(params) # get parameters efacs = [params[key] for key in sorted(params.keys()) if "efac" in key] equads = [ params[key] for key in sorted(params.keys()) if "equad" in key ] ecorrs = [ params[key] for key in sorted(params.keys()) if "ecorr" in key ] log10_A = params["B1855+09_red_noise_log10_A"] gamma = params["B1855+09_red_noise_gamma"] # correct value flags = ["430_ASP", "430_PUPPI", "L-wide_ASP", "L-wide_PUPPI"] nvec0 = np.zeros_like(self.psrs[0].toas) for ct, flag in enumerate(np.unique(flags)): ind = flag == self.psrs[0].backend_flags nvec0[ind] = efacs[ct]**2 * ( self.psrs[0].toaerrs[ind]**2 + 10**(2 * equads[ct]) * np.ones(np.sum(ind))) # get the basis bflags = self.psrs[0].backend_flags Umats = [] for flag in np.unique(bflags): mask = bflags == flag Umats.append( utils.create_quantization_matrix(self.psrs[0].toas[mask])[0]) nepoch = sum(U.shape[1] for U in Umats) U = np.zeros((len(self.psrs[0].toas), nepoch)) jvec = np.zeros(nepoch) netot = 0 for ct, flag in enumerate(np.unique(bflags)): mask = bflags == flag nn = Umats[ct].shape[1] U[mask, netot:nn + netot] = Umats[ct] jvec[netot:nn + netot] = 10**(2 * ecorrs[ct]) netot += nn # get covariance matrix cov = np.diag(nvec0) + np.dot(U * jvec[None, :], U.T) cf = sl.cho_factor(cov) logdet = np.sum(2 * np.log(np.diag(cf[0]))) # test msg = "EFAC/ECORR logdet incorrect." N = m.get_ndiag(params) assert np.allclose(N.solve(self.psrs[0].residuals, logdet=True)[1], logdet, rtol=1e-10), msg msg = "EFAC/ECORR D1 solve incorrect." assert np.allclose(N.solve(self.psrs[0].residuals), sl.cho_solve(cf, self.psrs[0].residuals), rtol=1e-10), msg msg = "EFAC/ECORR 1D1 solve incorrect." assert np.allclose( N.solve(self.psrs[0].residuals, left_array=self.psrs[0].residuals), np.dot(self.psrs[0].residuals, sl.cho_solve(cf, self.psrs[0].residuals)), rtol=1e-10, ), msg msg = "EFAC/ECORR 2D1 solve incorrect." T = m.get_basis(params) assert np.allclose( N.solve(self.psrs[0].residuals, left_array=T), np.dot(T.T, sl.cho_solve(cf, self.psrs[0].residuals)), rtol=1e-10, ), msg msg = "EFAC/ECORR 2D2 solve incorrect." assert np.allclose(N.solve(T, left_array=T), np.dot(T.T, sl.cho_solve(cf, T)), rtol=1e-10), msg F, f2 = utils.createfourierdesignmatrix_red(self.psrs[0].toas, nmodes=20) # spectrum test phi = utils.powerlaw(f2, log10_A=log10_A, gamma=gamma) msg = "Spectrum incorrect for GP Fourier signal." assert np.all(m.get_phi(params) == phi), msg # inverse spectrum test msg = "Spectrum inverse incorrect for GP Fourier signal." assert np.all(m.get_phiinv(params) == 1 / phi), msg
def test_red_noise_add_backend(self): """Test that red noise with backend addition only returns independent columns.""" # set up signals pl = utils.powerlaw(log10_A=parameter.Uniform(-18, -12), gamma=parameter.Uniform(1, 7)) selection = Selection(selections.by_backend) cpl = utils.powerlaw( log10_A=parameter.Uniform(-18, -12)("log10_Agw"), gamma=parameter.Uniform(1, 7)("gamma_gw") ) # parameters log10_As = [-14, -14.4, -15, -14.8] gammas = [2.3, 4.4, 1.8, 5.6] log10_Ac, gammac = -15.5, 1.33 params = { "B1855+09_red_noise_430_ASP_gamma": gammas[0], "B1855+09_red_noise_430_PUPPI_gamma": gammas[1], "B1855+09_red_noise_L-wide_ASP_gamma": gammas[2], "B1855+09_red_noise_L-wide_PUPPI_gamma": gammas[3], "B1855+09_red_noise_430_ASP_log10_A": log10_As[0], "B1855+09_red_noise_430_PUPPI_log10_A": log10_As[1], "B1855+09_red_noise_L-wide_ASP_log10_A": log10_As[2], "B1855+09_red_noise_L-wide_PUPPI_log10_A": log10_As[3], "log10_Agw": log10_Ac, "gamma_gw": gammac, } Tmax = self.psr.toas.max() - self.psr.toas.min() tpars = [ (30, 20, Tmax, Tmax), (20, 30, Tmax, Tmax), (30, 30, Tmax, Tmax), (30, 20, Tmax, 1.123 * Tmax), (20, 30, Tmax, 1.123 * Tmax), (30, 30, 1.123 * Tmax, Tmax), (30, 20, None, Tmax), ] for (nf1, nf2, T1, T2) in tpars: rn = gp_signals.FourierBasisGP(spectrum=pl, components=nf1, Tspan=T1, selection=selection) crn = gp_signals.FourierBasisGP(spectrum=cpl, components=nf2, Tspan=T2) s = rn + crn rnm = s(self.psr) # get the basis bflags = self.psr.backend_flags Fmats, fs, phis = [], [], [] F2, f2 = utils.createfourierdesignmatrix_red(self.psr.toas, nf2, Tspan=T2) p2 = utils.powerlaw(f2, log10_Ac, gammac) for ct, flag in enumerate(np.unique(bflags)): mask = bflags == flag F1, f1 = utils.createfourierdesignmatrix_red(self.psr.toas[mask], nf1, Tspan=T1) Fmats.append(F1) fs.append(f1) phis.append(utils.powerlaw(f1, log10_As[ct], gammas[ct])) Fmats.append(F2) phis.append(p2) nf = sum(F.shape[1] for F in Fmats) F = np.zeros((len(self.psr.toas), nf)) phi = np.hstack([p for p in phis]) nftot = 0 for ct, flag in enumerate(np.unique(bflags)): mask = bflags == flag nn = Fmats[ct].shape[1] F[mask, nftot : nn + nftot] = Fmats[ct] nftot += nn F[:, -2 * nf2 :] = F2 msg = "Combined red noise PSD incorrect " msg += "for {} {} {} {}".format(nf1, nf2, T1, T2) assert np.all(rnm.get_phi(params) == phi), msg msg = "Combined red noise PSD inverse incorrect " msg += "for {} {} {} {}".format(nf1, nf2, T1, T2) assert np.all(rnm.get_phiinv(params) == 1 / phi), msg msg = "Combined red noise Fmat incorrect " msg += "for {} {} {} {}".format(nf1, nf2, T1, T2) assert np.allclose(F, rnm.get_basis(params)), msg
def test_wideband(self): ms = white_signals.MeasurementNoise(selection=Selection(selections.by_backend)) dm = gp_signals.WidebandTimingModel( dmefac=parameter.Uniform(0.9, 1.1), dmefac_selection=Selection(selections.by_backend), dmjump=parameter.Normal(0, 1), dmjump_selection=Selection(selections.by_frontend), ) model = ms + dm pta = signal_base.PTA([model(self.psr)]) ps = parameter.sample(pta.params) pta.get_lnlikelihood(ps) dmtiming = pta.pulsarmodels[0].signals[1] msg = "DMEFAC masks do not cover the data." assert np.all(sum(dmtiming._dmefac_masks) == 1), msg msg = "DMJUMP masks do not cover the data." assert np.all(sum(dmtiming._dmjump_masks) == 1), msg # start with zero DMEFAC and DMJUMP p0 = {par.name: (1 if "dmefac" in par.name else 0) for par in dmtiming.params} pta.get_lnlikelihood(params=p0) phi0 = dmtiming.get_phi(params=p0) dl0 = dmtiming.get_delay(params=p0) dm_flags, dme_flags = np.array(self.psr.flags["pp_dm"], "d"), np.array(self.psr.flags["pp_dme"], "d") delays = np.zeros_like(self.psr.toas) check = 0 for index, par in enumerate(self.psr.fitpars): if "DMX" not in par: msg = "Problem with unbound timing parameters" assert phi0[index] == 1e40, msg else: dmx = self.psr.dmx[par] which = (dmx["DMXR1"] <= (self.psr.stoas / 86400)) & ((self.psr.stoas / 86400) < dmx["DMXR2"]) check += which avgdm = np.sum(dm_flags[which] / dme_flags[which] ** 2) / np.sum(1.0 / dme_flags[which] ** 2) vardm = 1.0 / np.sum(1.0 / dme_flags[which] ** 2) msg = "Priors do not match" assert np.allclose(vardm, phi0[index]), msg delays[which] = (avgdm - self.psr.dm - dmx["DMX"]) / (2.41e-4 * self.psr.freqs[which] ** 2) msg = "Not all TOAs are covered by DMX" assert np.all(check == 1) msg = "Delays do not match" assert np.allclose(dl0, delays), msg # sample DMEFACs randomly p1 = {par.name: (parameter.sample(par)[par.name] if "dmefac" in par.name else 0) for par in dmtiming.params} pta.get_lnlikelihood(params=p1) phi1 = dmtiming.get_phi(params=p1) dl1 = dmtiming.get_delay(params=p1) sel = Selection(selections.by_backend)(self.psr) msg = "Problem making selection" assert np.all(sum(m for m in sel.masks.values()) == 1), msg dme_flags_var = dme_flags.copy() for key, mask in sel.masks.items(): dmefac = p1["J1832-0836_" + key + "_dmefac"] dme_flags_var[mask] *= dmefac for index, par in enumerate(self.psr.fitpars): if "DMX" not in par: msg = "Problem with unbound timing parameters" assert phi1[index] == 1e40, msg else: dmx = self.psr.dmx[par] which = (dmx["DMXR1"] <= (self.psr.stoas / 86400)) & ((self.psr.stoas / 86400) < dmx["DMXR2"]) avgdm = np.sum(dm_flags[which] / dme_flags_var[which] ** 2) / np.sum(1.0 / dme_flags_var[which] ** 2) vardm = 1.0 / np.sum(1.0 / dme_flags_var[which] ** 2) msg = "Priors do not match" assert np.allclose(vardm, phi1[index]), msg delays[which] = (avgdm - self.psr.dm - dmx["DMX"]) / (2.41e-4 * self.psr.freqs[which] ** 2) msg = "Delays do not match" assert np.allclose(dl1, delays), msg