def ln_prob(xx): Ared = 10.0**xx[0] gam_red = xx[1] ct = 2 if args.dmVar: Adm = 10.0**xx[ct] gam_dm = xx[ct + 1] ct = 4 EFAC = xx[ct:ct + len(systems)] ct += len(systems) if args.fullN: EQUAD = 10.0**xx[ct + len(systems):ct + 2 * len(systems)] ct += len(systems) if 'pta' in t2psr.flags(): if len(psr.sysflagdict['nano-f'].keys()) > 0: ECORR = 10.0**xx[ct:ct + len(psr.sysflagdict['nano-f'].keys())] ct += len(psr.sysflagdict['nano-f'].keys()) if args.incGlitch: glitch_epoch = xx[ct] glitch_lamp = xx[ct + 1] loglike1 = 0 #################################### #################################### scaled_err = (psr.toaerrs).copy() for jj, sysname in enumerate(systems): scaled_err[systems[sysname]] *= EFAC[jj] ### white_noise = np.zeros(len(scaled_err)) if args.fullN: white_noise = np.ones(len(scaled_err)) for jj, sysname in enumerate(systems): white_noise[systems[sysname]] *= EQUAD[jj] new_err = np.sqrt(scaled_err**2.0 + white_noise**2.0) ######## if args.incGlitch: model_res = psr.res - utils.glitch_signal(psr, glitch_epoch, glitch_lamp) elif not incGlitch: model_res = psr.res # compute ( T.T * N^-1 * T ) # & log determinant of N if args.fullN: if len(ECORR) > 0: Jamp = np.ones(len(psr.epflags)) for jj, nano_sysname in enumerate( psr.sysflagdict['nano-f'].keys()): Jamp[np.where(psr.epflags == nano_sysname)] *= ECORR[jj]**2.0 Nx = jitter.cython_block_shermor_0D(model_res, new_err**2., Jamp, psr.Uinds) d = np.dot(psr.Te.T, Nx) logdet_N, TtNT = \ jitter.cython_block_shermor_2D(psr.Te, new_err**2., Jamp, psr.Uinds) det_dummy, dtNdt = \ jitter.cython_block_shermor_1D(model_res, new_err**2., Jamp, psr.Uinds) else: d = np.dot(psr.Te.T, model_res / (new_err**2.0)) N = 1. / (new_err**2.0) right = (N * psr.Te.T).T TtNT = np.dot(psr.Te.T, right) logdet_N = np.sum(np.log(new_err**2.0)) # triple product in likelihood function dtNdt = np.sum(model_res**2.0 / (new_err**2.0)) else: d = np.dot(psr.Te.T, model_res / (new_err**2.0)) N = 1. / (new_err**2.0) right = (N * psr.Te.T).T TtNT = np.dot(psr.Te.T, right) logdet_N = np.sum(np.log(new_err**2.0)) # triple product in likelihood function dtNdt = np.sum(model_res**2.0 / (new_err**2.0)) loglike1 += -0.5 * (logdet_N + dtNdt) #################################### #################################### # parameterize intrinsic red noise as power law Tspan = (1 / fqs[0]) * 86400.0 f1yr = 1 / 3.16e7 # parameterize intrinsic red-noise and DM-variations as power law if args.dmVar: kappa = np.log10( np.append( Ared**2/12/np.pi**2 * \ f1yr**(gam_red-3) * \ (fqs/86400.0)**(-gam_red)/Tspan, Adm**2/12/np.pi**2 * \ f1yr**(gam_dm-3) * \ (fqs/86400.0)**(-gam_dm)/Tspan ) ) else: kappa = np.log10( Ared**2/12/np.pi**2 * \ f1yr**(gam_red-3) * \ (fqs/86400.0)**(-gam_red)/Tspan ) # construct elements of sigma array if args.dmVar: mode_count = 4 * nmode else: mode_count = 2 * nmode diagonal = np.zeros(mode_count) diagonal[0::2] = 10**kappa diagonal[1::2] = 10**kappa # compute Phi inverse red_phi = np.diag(1. / diagonal) logdet_Phi = np.sum(np.log(diagonal)) # now fill in real covariance matrix Phi = np.zeros(TtNT.shape) for kk in range(0, mode_count): Phi[kk + psr.Gc.shape[1], kk + psr.Gc.shape[1]] = red_phi[kk, kk] # symmeterize Phi Phi = Phi + Phi.T - np.diag(np.diag(Phi)) # compute sigma Sigma = TtNT + Phi # cholesky decomp for second term in exponential try: cf = sl.cho_factor(Sigma) expval2 = sl.cho_solve(cf, d) logdet_Sigma = np.sum(2 * np.log(np.diag(cf[0]))) except np.linalg.LinAlgError: print 'Cholesky Decomposition Failed second time!! Using SVD instead' u, s, v = sl.svd(Sigma) expval2 = np.dot(v.T, 1 / s * np.dot(u.T, d)) logdet_Sigma = np.sum(np.log(s)) logLike = -0.5 * (logdet_Phi + logdet_Sigma) + \ 0.5 * (np.dot(d, expval2)) + loglike1 prior_fac = 0.0 if args.redamp_prior == 'uniform': prior_fac += np.log(Ared * np.log(10.0)) if (args.dmVar == True) and (args.dmamp_prior == 'uniform'): prior_fac += np.log(Adm * np.log(10.0)) return logLike + prior_fac
def ln_prob(xx): Ared = 10.0**xx[0] gam_red = xx[1] ct = 2 if args.incDM: Adm = 10.0**xx[ct] gam_dm = xx[ct+1] ct = 4 EFAC = xx[ct:ct+len(systems)] ct += len(systems) if args.fullN: EQUAD = 10.0**xx[ct:ct+len(systems)] ct += len(systems) ECORR = [] if 'pta' in t2psr.flags(): if 'NANOGrav' in list(set(t2psr.flagvals('pta'))): if len(psr.sysflagdict['nano-f'].keys())>0: ECORR = 10.0**xx[ct:ct+len(psr.sysflagdict['nano-f'].keys())] ct += len(psr.sysflagdict['nano-f'].keys()) if args.incGlitch: glitch_epoch = xx[ct] glitch_lamp = xx[ct+1] loglike1 = 0. #################################### #################################### scaled_err = (psr.toaerrs).copy() for jj,sysname in enumerate(systems): scaled_err[systems[sysname]] *= EFAC[jj] ### white_noise = np.zeros(len(scaled_err)) if args.fullN: white_noise = np.ones(len(scaled_err)) for jj,sysname in enumerate(systems): white_noise[systems[sysname]] *= EQUAD[jj] new_err = np.sqrt( scaled_err**2.0 + white_noise**2.0 ) ######## if args.incGlitch: model_res = psr.res - utils.glitch_signal(psr, glitch_epoch, glitch_lamp) elif not args.incGlitch: model_res = psr.res # compute ( T.T * N^-1 * T ) # & log determinant of N if args.fullN: if len(ECORR)>0: Jamp = np.ones(len(psr.epflags)) for jj,nano_sysname in enumerate(psr.sysflagdict['nano-f'].keys()): Jamp[np.where(psr.epflags==nano_sysname)] *= ECORR[jj]**2.0 Nx = jitter.cython_block_shermor_0D(model_res, new_err**2., Jamp, psr.Uinds) d = np.dot(psr.Te.T, Nx) logdet_N, TtNT = \ jitter.cython_block_shermor_2D(psr.Te, new_err**2., Jamp, psr.Uinds) det_dummy, dtNdt = \ jitter.cython_block_shermor_1D(model_res, new_err**2., Jamp, psr.Uinds) else: d = np.dot(psr.Te.T, model_res/( new_err**2.0 )) N = 1./( new_err**2.0 ) right = (N*psr.Te.T).T TtNT = np.dot(psr.Te.T, right) logdet_N = np.sum(np.log( new_err**2.0 )) # triple product in likelihood function dtNdt = np.sum(model_res**2.0/( new_err**2.0 )) else: d = np.dot(psr.Te.T, model_res/( new_err**2.0 )) N = 1./( new_err**2.0 ) right = (N*psr.Te.T).T TtNT = np.dot(psr.Te.T, right) logdet_N = np.sum(np.log( new_err**2.0 )) # triple product in likelihood function dtNdt = np.sum(model_res**2.0/( new_err**2.0 )) loglike1 += -0.5 * (logdet_N + dtNdt) #################################### #################################### # parameterize intrinsic red noise as power law Tspan = (1/fqs[0])*86400.0 f1yr = 1/3.16e7 # parameterize intrinsic red-noise and DM-variations as power law if args.incDM: kappa = np.log10( np.append( Ared**2/12/np.pi**2 * \ f1yr**(gam_red-3) * \ (fqs/86400.0)**(-gam_red)/Tspan, Adm**2/12/np.pi**2 * \ f1yr**(gam_dm-3) * \ (fqs/86400.0)**(-gam_dm)/Tspan ) ) else: kappa = np.log10( Ared**2/12/np.pi**2 * \ f1yr**(gam_red-3) * \ (fqs/86400.0)**(-gam_red)/Tspan ) # construct elements of sigma array if args.incDM: mode_count = 4*nmode else: mode_count = 2*nmode diagonal = np.zeros(mode_count) diagonal[0::2] = 10**kappa diagonal[1::2] = 10**kappa # compute Phi inverse red_phi = np.diag(1./diagonal) logdet_Phi = np.sum(np.log( diagonal )) # now fill in real covariance matrix Phi = np.zeros( TtNT.shape ) for kk in range(0,mode_count): Phi[kk+psr.Gc.shape[1],kk+psr.Gc.shape[1]] = red_phi[kk,kk] # symmeterize Phi Phi = Phi + Phi.T - np.diag(np.diag(Phi)) # compute sigma Sigma = TtNT + Phi # cholesky decomp for second term in exponential try: cf = sl.cho_factor(Sigma) expval2 = sl.cho_solve(cf, d) logdet_Sigma = np.sum(2*np.log(np.diag(cf[0]))) except np.linalg.LinAlgError: #print 'Cholesky Decomposition Failed second time!! Using SVD instead' #u,s,v = sl.svd(Sigma) #expval2 = np.dot(v.T, 1/s*np.dot(u.T, d)) #logdet_Sigma = np.sum(np.log(s)) print 'Cholesky Decomposition Failed second time!! Getting outta here...' return -np.inf logLike = -0.5 * (logdet_Phi + logdet_Sigma) + \ 0.5 * (np.dot(d, expval2)) + loglike1 prior_fac = 0.0 if args.redPrior == 'uniform': prior_fac += np.log(Ared * np.log(10.0)) if args.incDM and (args.dmPrior == 'uniform'): prior_fac += np.log(Adm * np.log(10.0)) return logLike + prior_fac