# make sure all pulsar have same reference time tt=[] for p in psr: tt.append(np.min(p.toas)) # find reference time tref = np.min(tt) # now scale pulsar time for p in psr: p.toas -= tref # get G matrices for p in psr: p.G = PALutils.createGmatrix(p.dmatrix) # run Fp statistic to determine starting frequency print 'Running initial Fpstat search' fsearch = np.logspace(-9, -7, 200) fpstat = np.zeros(len(fsearch)) for ii in range(len(fsearch)): fpstat[ii] = PALLikelihoods.fpStat(psr, fsearch[ii]) # determine maximum likelihood frequency fmaxlike = fsearch[np.argmax(fpstat)] print 'Maximum likelihood from f-stat search = {0}\n'.format(fmaxlike) # prior ranges
def addInverseCovFromNoiseFile(self, parfile, timfile, noisefile, DMOFF=None, DMXOFF=None, dailyAverage=False): """ Add noise covariance matrix after timing model subtraction. """ # Check whether the two files exist if not os.path.isfile(parfile) or not os.path.isfile(timfile): raise IOError, "Cannot find parfile (%s) or timfile (%s)!" % (parfile, timfile) assert(self.filename != None), "ERROR: HDF5 file not set!" # 'a' means: read/write if exists, create otherwise self.h5file = h5.File(self.filename, 'a') # Create the data subgroup if it does not exist if "Data" in self.h5file: datagroup = self.h5file["Data"] else: raise IOError, "Cannot add noise parameters if Data group does not exist!" # Load pulsar data from the JPL Cython tempo2 library t2pulsar = t2.tempopulsar(parfile, timfile) # turn off DMMODEL fitting if DMOFF is not None: t2pulsar['DMMODEL'].fit = False # turn off DMX fitting if DMXOFF is not None: DMXFlag = False print 'Turning off DMX fitting and turning DM fitting on' for par in t2pulsar.pars: if 'DMX' in par: t2pulsar[par].fit = False t2pulsar['DM'].fit = True DMXFlag = True if DMXFlag== False: print 'NO DMX for pulsar {0}'.format(t2pulsar.name) # refit 5 times to make sure we are converged t2pulsar.fit(iters=5) # Create the pulsar subgroup if it does not exist if "Pulsars" in datagroup: pulsarsgroup = datagroup["Pulsars"] else: raise IOError, "Cannot add noise parameters if pulsar group does not exist!" # Look up the name of the pulsar, and see if it exist if t2pulsar.name in pulsarsgroup: pass else: raise IOError, "%s must already exists in %s to add noise parameters!"\ % (t2pulsar.name, self.filename) pulsarsgroup = pulsarsgroup[t2pulsar.name] # first create G matrix from design matrix and toas designmatrix = np.double(t2pulsar.designmatrix()) toas = np.double(t2pulsar.toas()*86400) errs = np.double(t2pulsar.toaerrs*1e-6) # if doing daily averaging if dailyAverage: # get average quantities toas, qmatrix, errs, dmatrix, freqs, bands = PALutils.dailyAverage(t2pulsar) # construct new daily averaged residuals and designmatrix toas *= 86400 designmatrix = np.dot(qmatrix, dmatrix) G = PALutils.createGmatrix(designmatrix) # create matrix of time lags tm = PALutils.createTimeLags(toas, toas, round=True) # now read noise file to get model and parameters file = open(noisefile,'r') fH = None tau = None DMAmp = None DMgam = None for line in file.readlines(): # default parameters for different models other than pure PL key = line.split()[0] # get amplitude if "Amp" == key: Amp = float(line.split()[-1]) # get spectral index elif "gam" == key: gam = float(line.split()[-1]) # get efac elif "efac" == key: efac = float(line.split()[-1]) # get quad elif "equad" == key: equad = float(line.split()[-1]) # get high frequency cutoff if available elif "fH" == key: fH = float(line.split()[-1]) # get correlation time scale if available elif "tau" == key: tau = float(line.split()[-1]) # get DM Amplitude if available elif "DMAmp" == key: DMAmp = float(line.split()[-1]) # get DM Spectral Index if available elif "DMgam" == key: DMgam = float(line.split()[-1]) # cosstruct red and white noise covariance matrices red = PALutils.createRedNoiseCovarianceMatrix(tm, Amp, gam, fH=fH) white = PALutils.createWhiteNoiseCovarianceMatrix(errs, efac, equad, tau=tau) # construct post timing model marginalization covariance matrix cov = red + white pcov = np.dot(G.T, np.dot(cov, G)) # finally construct "inverse" invCov = np.dot(G, np.dot(np.linalg.inv(pcov), G.T)) # create dataset for inverse covariance matrix pulsarsgroup.create_dataset('invCov', data = invCov) # create dataset for G matrix pulsarsgroup.create_dataset('Gmatrix', data = G) # record noise parameter values pulsarsgroup.create_dataset('Amp', data = Amp) pulsarsgroup.create_dataset('gam', data = gam) pulsarsgroup.create_dataset('efac', data = efac) pulsarsgroup.create_dataset('equad', data = equad) if fH is not None: pulsarsgroup.create_dataset('fH', data = fH) if tau is not None: pulsarsgroup.create_dataset('tau', data = tau) if DMAmp is not None: pulsarsgroup.create_dataset('DMAmp', data = DMAmp) if DMgam is not None: pulsarsgroup.create_dataset('DMgam', data = DMgam) # Close the hdf5 file self.h5file.close()
def __init__(self,pulsargroup, addNoise=False, addGmatrix=True): # loop though keys in pulsargroup and fill in psr attributes that are needed for GW analysis self.dist = None self.distErr = None self.fH = None for key in pulsargroup: # look for TOAs if key == "TOAs": self.toas = pulsargroup[key].value # residuals elif key == "residuals": self.res = pulsargroup[key].value # toa error bars elif key == "toaErr": self.err = pulsargroup[key].value # frequencies in Hz elif key == "freqs": self.freqs = pulsargroup[key].value # design matrix elif key == "designmatrix": self.dmatrix = pulsargroup[key].value self.ntoa, self.nfit = self.dmatrix.shape if addGmatrix: self.G = PALutils.createGmatrix(self.dmatrix) # design matrix elif key == "pname": self.name = pulsargroup[key].value # pulsar distance in kpc elif key == "dist": self.dist = pulsargroup[key].value # pulsar distance uncertainty in kpc elif key == "distErr": self.distErr = pulsargroup[key].value # right ascension and declination elif key == 'tmp_name': par_names = list(pulsargroup[key].value) for ct,name in enumerate(par_names): # right ascension and phi if name == "RAJ": self.ra = pulsargroup["tmp_valpost"].value[ct] self.phi = self.ra # right ascension if name == "DECJ": self.dec = pulsargroup["tmp_valpost"].value[ct] self.theta = np.pi/2 - self.dec # inverse covariance matrix elif key == "invCov": if addNoise: self.invCov = pulsargroup[key].value ## noise parameters ## elif key == "Amp": self.Amp = pulsargroup[key].value # red noise spectral elif key == "gam": self.gam = pulsargroup[key].value # efac elif key == "efac": self.efac = pulsargroup[key].value # equad elif key == "equad": self.equad = pulsargroup[key].value # fH elif key == "fH": self.fH = pulsargroup[key].value # pulsar distance uncertainty in kpc elif key == "distErr": self.distErr = pulsargroup[key].value if self.dist is None: print 'WARNING: No distance info, using d = 1 kpc' self.dist = 1.0 if self.distErr is None: print 'WARNING: No distance error info, using sigma_d = 0.1 kpc' self.distErr = 0.1
def __init__(self, pulsargroup, addNoise=False, addGmatrix=True): # loop though keys in pulsargroup and fill in psr attributes that are needed for GW analysis self.dist = None self.distErr = None self.fH = None for key in pulsargroup: # look for TOAs if key == "TOAs": self.toas = pulsargroup[key].value # residuals elif key == "residuals": self.res = pulsargroup[key].value # toa error bars elif key == "toaErr": self.err = pulsargroup[key].value # frequencies in Hz elif key == "freqs": self.freqs = pulsargroup[key].value # design matrix elif key == "designmatrix": self.dmatrix = pulsargroup[key].value self.ntoa, self.nfit = self.dmatrix.shape if addGmatrix: self.G = PALutils.createGmatrix(self.dmatrix) # design matrix elif key == "pname": self.name = pulsargroup[key].value # pulsar distance in kpc elif key == "dist": self.dist = pulsargroup[key].value # pulsar distance uncertainty in kpc elif key == "distErr": self.distErr = pulsargroup[key].value # right ascension and declination elif key == 'tmp_name': par_names = list(pulsargroup[key].value) for ct, name in enumerate(par_names): # right ascension and phi if name == "RAJ": self.ra = pulsargroup["tmp_valpost"].value[ct] self.phi = self.ra # right ascension if name == "DECJ": self.dec = pulsargroup["tmp_valpost"].value[ct] self.theta = np.pi / 2 - self.dec # inverse covariance matrix elif key == "invCov": if addNoise: self.invCov = pulsargroup[key].value ## noise parameters ## elif key == "Amp": self.Amp = pulsargroup[key].value # red noise spectral elif key == "gam": self.gam = pulsargroup[key].value # efac elif key == "efac": self.efac = pulsargroup[key].value # equad elif key == "equad": self.equad = pulsargroup[key].value elif key == "cequad": self.cequad = pulsargroup[key].value # fH elif key == "fH": self.fH = pulsargroup[key].value # pulsar distance uncertainty in kpc elif key == "distErr": self.distErr = pulsargroup[key].value if self.dist is None: print 'WARNING: No distance info, using d = 1 kpc' self.dist = 1.0 if self.distErr is None: print 'WARNING: No distance error info, using sigma_d = 0.1 kpc' self.distErr = 0.1
def addInverseCovFromNoiseFile(self, parfile, timfile, noisefile, DMOFF=None, DMXOFF=None, dailyAverage=False): """ Add noise covariance matrix after timing model subtraction. """ # Check whether the two files exist if not os.path.isfile(parfile) or not os.path.isfile(timfile): raise IOError, "Cannot find parfile (%s) or timfile (%s)!" % ( parfile, timfile) assert (self.filename != None), "ERROR: HDF5 file not set!" # 'a' means: read/write if exists, create otherwise self.h5file = h5.File(self.filename, 'a') # Create the data subgroup if it does not exist if "Data" in self.h5file: datagroup = self.h5file["Data"] else: raise IOError, "Cannot add noise parameters if Data group does not exist!" # Load pulsar data from the JPL Cython tempo2 library t2pulsar = t2.tempopulsar(parfile, timfile) # turn off DMMODEL fitting if DMOFF is not None: t2pulsar['DMMODEL'].fit = False # turn off DMX fitting if DMXOFF is not None: DMXFlag = False print 'Turning off DMX fitting and turning DM fitting on' for par in t2pulsar.pars: if 'DMX' in par: t2pulsar[par].fit = False t2pulsar['DM'].fit = True DMXFlag = True if DMXFlag == False: print 'NO DMX for pulsar {0}'.format(t2pulsar.name) # refit 5 times to make sure we are converged t2pulsar.fit(iters=5) # Create the pulsar subgroup if it does not exist if "Pulsars" in datagroup: pulsarsgroup = datagroup["Pulsars"] else: raise IOError, "Cannot add noise parameters if pulsar group does not exist!" # Look up the name of the pulsar, and see if it exist if t2pulsar.name in pulsarsgroup: pass else: raise IOError, "%s must already exists in %s to add noise parameters!"\ % (t2pulsar.name, self.filename) pulsarsgroup = pulsarsgroup[t2pulsar.name] # first create G matrix from design matrix and toas designmatrix = np.double(t2pulsar.designmatrix()) toas = np.double(t2pulsar.toas() * 86400) errs = np.double(t2pulsar.toaerrs * 1e-6) # if doing daily averaging if dailyAverage: # get average quantities toas, qmatrix, errs, dmatrix, freqs, bands = PALutils.dailyAverage( t2pulsar) # construct new daily averaged residuals and designmatrix toas *= 86400 designmatrix = np.dot(qmatrix, dmatrix) G = PALutils.createGmatrix(designmatrix) # create matrix of time lags tm = PALutils.createTimeLags(toas, toas, round=True) # now read noise file to get model and parameters file = open(noisefile, 'r') fH = None tau = None DMAmp = None DMgam = None for line in file.readlines(): # default parameters for different models other than pure PL key = line.split()[0] # get amplitude if "Amp" == key: Amp = float(line.split()[-1]) # get spectral index elif "gam" == key: gam = float(line.split()[-1]) # get efac elif "efac" == key: efac = float(line.split()[-1]) # get quad elif "equad" == key: equad = float(line.split()[-1]) # get high frequency cutoff if available elif "fH" == key: fH = float(line.split()[-1]) # get correlation time scale if available elif "tau" == key: tau = float(line.split()[-1]) # get DM Amplitude if available elif "DMAmp" == key: DMAmp = float(line.split()[-1]) # get DM Spectral Index if available elif "DMgam" == key: DMgam = float(line.split()[-1]) # cosstruct red and white noise covariance matrices red = PALutils.createRedNoiseCovarianceMatrix(tm, Amp, gam, fH=fH) white = PALutils.createWhiteNoiseCovarianceMatrix(errs, efac, equad, tau=tau) # construct post timing model marginalization covariance matrix cov = red + white pcov = np.dot(G.T, np.dot(cov, G)) # finally construct "inverse" invCov = np.dot(G, np.dot(np.linalg.inv(pcov), G.T)) # create dataset for inverse covariance matrix pulsarsgroup.create_dataset('invCov', data=invCov) # create dataset for G matrix pulsarsgroup.create_dataset('Gmatrix', data=G) # record noise parameter values pulsarsgroup.create_dataset('Amp', data=Amp) pulsarsgroup.create_dataset('gam', data=gam) pulsarsgroup.create_dataset('efac', data=efac) pulsarsgroup.create_dataset('equad', data=equad) if fH is not None: pulsarsgroup.create_dataset('fH', data=fH) if tau is not None: pulsarsgroup.create_dataset('tau', data=tau) if DMAmp is not None: pulsarsgroup.create_dataset('DMAmp', data=DMAmp) if DMgam is not None: pulsarsgroup.create_dataset('DMgam', data=DMgam) # Close the hdf5 file self.h5file.close()
# make sure all pulsar have same reference time tt=[] for p in psr: tt.append(np.min(p.toas)) # find reference time tref = np.min(tt) # now scale pulsar time for p in psr: p.toas -= tref # get G matrices for p in psr: p.G = PALutils.createGmatrix(p.dmatrix) # run Fp statistic to determine starting frequency if args.freq is None: print 'Running initial Fpstat search' fsearch = np.logspace(-9, -7, 1000) fpstat = np.zeros(len(fsearch)) for ii in range(len(fsearch)): fpstat[ii] = PALLikelihoods.fpStat(psr, fsearch[ii]) # determine maximum likelihood frequency fmaxlike = fsearch[np.argmax(fpstat)] print 'Maximum likelihood from f-stat search = {0}\n'.format(fmaxlike) # get determinant of covariance matrix for use in likelihood logdetTerm = []
# initialize fourier design matrix if args.nmodes != 0: F, f = PALutils.createfourierdesignmatrix(psr.toas, args.nmodes, freq=True) Tspan = psr.toas.max() - psr.toas.min() ##fsred = np.array([1.599558028614668e-07, 5.116818355403073e-08]) # 1855 #fsred = np.array([9.549925860214369e-08]) # 1909 #fsred = np.array([1/Tspan, 9.772372209558111e-08]) # 1909 # #F = np.zeros((psr.ntoa, 2*len(fsred))) #F[:,0::2] = np.cos(2*np.pi*np.outer(psr.toas, fsred)) #F[:,1::2] = np.sin(2*np.pi*np.outer(psr.toas, fsred)) # get G matrices psr.G = PALutils.createGmatrix(psr.dmatrix) # pre compute diagonalized efac + equad white noise model efac = np.dot(psr.G.T, np.dot(np.diag(psr.err**2), psr.G)) equad = np.dot(psr.G.T, psr.G) L = np.linalg.cholesky(equad) Linv = np.linalg.inv(L) sand = np.dot(Linv, np.dot(efac, Linv.T)) u, s, v = np.linalg.svd(sand) proj = np.dot(u.T, np.dot(Linv, psr.G.T)) # project residuals onto new basis psr.res = np.dot(proj, psr.res) if args.nmodes != 0 and args.single == False: print 'Projecting F matrix'
# import hdf5 file pfile = h5.File(args.h5file, 'r') # define the pulsargroup pulsargroup = pfile['Data']['Pulsars'][args.pname] # fill in pulsar class psr = PALpulsarInit.pulsar(pulsargroup, addGmatrix=True) # initialize fourier design matrix if args.nmodes != 0: F, f = PALutils.createfourierdesignmatrix(psr.toas, args.nmodes, freq=True) # get G matrices psr.G = PALutils.createGmatrix(psr.dmatrix) # pre compute diagonalized efac + equad white noise model efac = np.dot(psr.G.T, np.dot(np.diag(psr.err**2), psr.G)) equad = np.dot(psr.G.T, psr.G) L = np.linalg.cholesky(equad) Linv = np.linalg.inv(L) sand = np.dot(Linv, np.dot(efac, Linv.T)) u,s,v = np.linalg.svd(sand) proj = np.dot(u.T, np.dot(Linv, psr.G.T)) # project residuals onto new basis psr.res = np.dot(proj, psr.res) if args.nmodes != 0: F = np.dot(proj, F)