Ejemplo n.º 1
0
    def myloglike(cube, ndim, nparams):

        efac = cube[0]
        equad = 10**cube[1]
        gam = cube[2]
        A = 10**cube[3]

        fs = np.zeros(args.ss)
        for ii in range(args.ss):
            fs[ii] = 10**cube[4 + ii]
        rho2 = np.zeros(args.ss)
        for ii in range(args.ss):
            rho2[ii] = cube[ii + 4 + args.ss]

        # check to make sure frequencies are ordered
        ordered = np.all([fs[ii] < fs[ii + 1] for ii in range(args.ss - 1)])

        if ordered:

            F1 = list(
                PALutils.createfourierdesignmatrix(psr.toas, args.nmodes).T)
            tmp, f = PALutils.createfourierdesignmatrix(psr.toas,
                                                        args.nmodes,
                                                        freq=True)
            for ii in range(args.ss):
                F1.append(np.cos(2 * np.pi * fs[ii] * psr.toas))
                F1.append(np.sin(2 * np.pi * fs[ii] * psr.toas))

            F = np.array(F1).T
            F = np.dot(proj, F)

            # compute rho from A and gam# compute total time span of data
            Tspan = psr.toas.max() - psr.toas.min()

            # get power spectrum coefficients
            f1yr = 1 / 3.16e7
            rho = list(
                np.log10(A**2 / 12 / np.pi**2 * f1yr**(gam - 3) * f**(-gam) /
                         Tspan))

            # compute total rho
            for ii in range(args.ss):
                rho.append(rho2[ii])

            loglike = PALLikelihoods.lentatiMarginalizedLike(
                psr, F, s, np.array(rho), efac**2, equad)

        else:

            loglike = -np.inf

        #print efac, rho, loglike

        return loglike
Ejemplo n.º 2
0
    def myloglike(cube, ndim, nparams):

        efac = cube[0]
        equad = 10**cube[1]
        gam = cube[2]
        A = 10**cube[3]

        fs = np.zeros(args.ss)
        for ii in range(args.ss):
            fs[ii] = 10**cube[4+ii]
        rho2 = np.zeros(args.ss)
        for ii in range(args.ss):
            rho2[ii] = cube[ii+4+args.ss]
        
        # check to make sure frequencies are ordered
        ordered = np.all([fs[ii] < fs[ii+1] for ii in range(args.ss-1)])

        if ordered:

            F1 = list(PALutils.createfourierdesignmatrix(psr.toas, args.nmodes).T)
            tmp, f = PALutils.createfourierdesignmatrix(psr.toas, args.nmodes, freq=True)
            for ii in range(args.ss):
                F1.append(np.cos(2*np.pi*fs[ii]*psr.toas))
                F1.append(np.sin(2*np.pi*fs[ii]*psr.toas))

            F = np.array(F1).T
            F = np.dot(proj, F)

            # compute rho from A and gam# compute total time span of data
            Tspan = psr.toas.max() - psr.toas.min()

            # get power spectrum coefficients
            f1yr = 1/3.16e7
            rho = list(np.log10(A**2/12/np.pi**2 * f1yr**(gam-3) * f**(-gam)/Tspan))

            # compute total rho
            for ii in range(args.ss):
                rho.append(rho2[ii])

            loglike = PALLikelihoods.lentatiMarginalizedLike(psr, F, s, np.array(rho), efac**2, equad)
            
        else:

            loglike = -np.inf

        #print efac, rho, loglike

        return loglike
Ejemplo n.º 3
0
    def myloglike(cube, ndim, nparams):

        efac = cube[0]
        equad = 10**cube[1]

        fs = np.zeros(args.ss)
        for ii in range(args.ss):
            fs[ii] = 10**cube[2+ii]
        rho = np.zeros(args.nmodes+args.ss)
        for ii in range(args.nmodes+args.ss):
            rho[ii] = cube[ii+2+args.ss]

        F1 = list(PALutils.createfourierdesignmatrix(psr.toas, args.nmodes).T)
        for ii in range(args.ss):
            F1.append(np.cos(2*np.pi*fs[ii]*psr.toas))
            F1.append(np.sin(2*np.pi*fs[ii]*psr.toas))

        F = np.array(F1).T

        F = np.dot(proj, F)
       
        loglike = PALLikelihoods.lentatiMarginalizedLike(psr, F, s, rho, efac, equad)

        #print efac, rho, loglike

        return loglike
Ejemplo n.º 4
0
if args.null == False:
    print "Running initial Fpstat search"
    fsearch = np.logspace(-9, -7, 1000)
    fpstat = np.zeros(len(fsearch))
    for ii in range(len(fsearch)):
        fpstat[ii] = PALLikelihoods.fpStat(psr, fsearch[ii])

    # determine maximum likelihood frequency
    fmaxlike = fsearch[np.argmax(fpstat)]
    print "Maximum likelihood from f-stat search = {0}\n".format(fmaxlike)

# get Tmax
Tmax = np.max([p.toas.max() - p.toas.min() for p in psr])

# initialize fourier design matrix
F = [PALutils.createfourierdesignmatrix(p.toas, args.nmodes, Tspan=Tmax) for p in psr]

f = []
for ct, p in enumerate(psr):
    T = p.toas.max() - p.toas.min()
    f.append(np.linspace(1 / T, args.nmodes / T, args.nmodes))

# pre compute diagonalized efac + equad white noise model
SS = []
proj = []
SS = []
print "Pre-Computing white noise covariances"
for ct, p in enumerate(psr):
    efac = np.dot(p.G.T, np.dot(np.diag(p.err ** 2), p.G))
    equad = np.dot(p.G.T, p.G)
    L = np.linalg.cholesky(equad)
Ejemplo n.º 5
0
L = []
for ct, p in enumerate(psr):

    Amp = p.Amp
    gam = p.gam
    efac = p.efac
    equad = p.equad
    try:
        cequad = p.cequad
    except AttributeError:
        cequad = 0
        
    avetoas, U = PALutils.exploderMatrix(p.toas)
    Tspan = p.toas.max()-p.toas.min()
    F, f = PALutils.createfourierdesignmatrix(p.toas, 10, freq=True, Tspan=Tspan)
            
    f1yr = 1/3.16e7
    rho = (Amp**2/12/np.pi**2 * f1yr**(gam-3) * f**(-gam)/Tspan)
    
    tmp = np.zeros(20)
    tmp[0::2] = rho
    tmp[1::2] = rho
    
    phi = np.diag(tmp)
    
    white = PALutils.createWhiteNoiseCovarianceMatrix(p.err, efac**2, equad)
    
    cequad_mat = cequad**2 * np.dot(U,U.T)
    
    red = np.dot(F, np.dot(phi, F.T))
Ejemplo n.º 6
0
    for p in psr:
        print 'Pulsar {0} has {1} ns weighted rms'.format(
            p.name,
            p.rms() * 1e9)

npsr = len(psr)

pfile.close()

# get Tmax
Tmax = np.max([p.toas.max() - p.toas.min() for p in psr])

# initialize fourier design matrix
F = [
    PALutils.createfourierdesignmatrix(p.toas, args.nmodes, Tspan=Tmax)
    for p in psr
]

if args.powerlaw:
    tmp, f = PALutils.createfourierdesignmatrix(p.toas,
                                                args.nmodes,
                                                Tspan=Tmax,
                                                freq=True)

# get G matrices
for p in psr:
    p.G = PALutils.createGmatrix(p.dmatrix)

# pre compute diagonalized efac + equad white noise model
Diag = []
Ejemplo n.º 7
0
if args.null == False:
    print 'Running initial Fpstat search'
    fsearch = np.logspace(-9, -7, 1000)
    fpstat = np.zeros(len(fsearch))
    for ii in range(len(fsearch)):
        fpstat[ii] = PALLikelihoods.fpStat(psr, fsearch[ii])

    # determine maximum likelihood frequency
    fmaxlike = fsearch[np.argmax(fpstat)]
    print 'Maximum likelihood from f-stat search = {0}\n'.format(fmaxlike)

# get Tmax
Tmax = np.max([p.toas.max() - p.toas.min() for p in psr])

# initialize fourier design matrix
F = [PALutils.createfourierdesignmatrix(p.toas, args.nmodes, Tspan=Tmax) for p in psr]

f = []
for ct, p in enumerate(psr):
    T = p.toas.max() - p.toas.min()
    f.append(np.linspace(1/T, args.nmodes/T, args.nmodes))

# pre compute diagonalized efac + equad white noise model
SS = []
proj = []
SS = []
print 'Pre-Computing white noise covariances'
for ct, p in enumerate(psr):
    efac = np.dot(p.G.T, np.dot(np.diag(p.err**2), p.G))
    equad = np.dot(p.G.T, p.G)
    L = np.linalg.cholesky(equad)
Ejemplo n.º 8
0
L = []
for ct, p in enumerate(psr):

    Amp = p.Amp
    gam = p.gam
    efac = p.efac
    equad = p.equad
    try:
        cequad = p.cequad
    except AttributeError:
        cequad = 0

    avetoas, U = PALutils.exploderMatrix(p.toas)
    Tspan = p.toas.max() - p.toas.min()
    F, f = PALutils.createfourierdesignmatrix(p.toas,
                                              10,
                                              freq=True,
                                              Tspan=Tspan)

    f1yr = 1 / 3.16e7
    rho = (Amp**2 / 12 / np.pi**2 * f1yr**(gam - 3) * f**(-gam) / Tspan)

    tmp = np.zeros(20)
    tmp[0::2] = rho
    tmp[1::2] = rho

    phi = np.diag(tmp)

    white = PALutils.createWhiteNoiseCovarianceMatrix(p.err, efac**2, equad)

    cequad_mat = cequad**2 * np.dot(U, U.T)
Ejemplo n.º 9
0
        pass

print 'Reading in HDF5 file'

# import hdf5 file
pfile = h5.File(args.h5file, 'r')

# define the pulsargroup
pulsargroup = pfile['Data']['Pulsars'][args.pname]

# fill in pulsar class
psr = PALpulsarInit.pulsar(pulsargroup, addGmatrix=True)

# initialize fourier design matrix
if args.nmodes != 0:
    F, f = PALutils.createfourierdesignmatrix(psr.toas, args.nmodes, freq=True)

Tspan = psr.toas.max() - psr.toas.min()
##fsred = np.array([1.599558028614668e-07, 5.116818355403073e-08]) # 1855
#fsred = np.array([9.549925860214369e-08]) # 1909
#fsred = np.array([1/Tspan, 9.772372209558111e-08]) # 1909
#
#F = np.zeros((psr.ntoa, 2*len(fsred)))
#F[:,0::2] = np.cos(2*np.pi*np.outer(psr.toas, fsred))
#F[:,1::2] = np.sin(2*np.pi*np.outer(psr.toas, fsred))

# get G matrices
psr.G = PALutils.createGmatrix(psr.dmatrix)

# pre compute diagonalized efac + equad white noise model
efac = np.dot(psr.G.T, np.dot(np.diag(psr.err**2), psr.G))
tt=[] 
for p in psr:
    tt.append(np.min(p.toas))

# find reference time
tref = np.min(tt)

# now scale pulsar time
for p in psr:
    p.toas -= tref

# get Tmax
Tmax = np.max([p.toas.max() - p.toas.min() for p in psr])

# initialize fourier design matrix
F = [PALutils.createfourierdesignmatrix(p.toas, args.nmodes, Tspan=Tmax) for p in psr]

if args.powerlaw:
    tmp, f = PALutils.createfourierdesignmatrix(p.toas, args.nmodes, Tspan=Tmax, freq=True)

# get G matrices
for p in psr:
    p.G = PALutils.createGmatrix(p.dmatrix)

# run Fp statistic to determine starting frequency
print 'Running initial Fpstat search'
fsearch = np.logspace(-9, -7, 200)
fpstat = np.zeros(len(fsearch))
for ii in range(len(fsearch)):
    fpstat[ii] = PALLikelihoods.fpStat(psr, fsearch[ii])
Ejemplo n.º 11
0
        pass

print 'Reading in HDF5 file' 

# import hdf5 file
pfile = h5.File(args.h5file, 'r')

# define the pulsargroup
pulsargroup = pfile['Data']['Pulsars'][args.pname]

# fill in pulsar class
psr = PALpulsarInit.pulsar(pulsargroup, addGmatrix=True)

# initialize fourier design matrix
if args.nmodes != 0:
    F, f = PALutils.createfourierdesignmatrix(psr.toas, args.nmodes, freq=True)

# get G matrices
psr.G = PALutils.createGmatrix(psr.dmatrix)

# pre compute diagonalized efac + equad white noise model
efac = np.dot(psr.G.T, np.dot(np.diag(psr.err**2), psr.G))
equad = np.dot(psr.G.T, psr.G)
L = np.linalg.cholesky(equad)
Linv = np.linalg.inv(L)
sand = np.dot(Linv, np.dot(efac, Linv.T))
u,s,v = np.linalg.svd(sand)
proj = np.dot(u.T, np.dot(Linv, psr.G.T))

# project residuals onto new basis
psr.res = np.dot(proj, psr.res)