Esempio n. 1
0
    def myloglike(cube, ndim, nparams):

        efac = cube[0]
        equad = 10**cube[1]
        gam = cube[2]
        A = 10**cube[3]

        fs = np.zeros(args.ss)
        for ii in range(args.ss):
            fs[ii] = 10**cube[4 + ii]
        rho2 = np.zeros(args.ss)
        for ii in range(args.ss):
            rho2[ii] = cube[ii + 4 + args.ss]

        # check to make sure frequencies are ordered
        ordered = np.all([fs[ii] < fs[ii + 1] for ii in range(args.ss - 1)])

        if ordered:

            F1 = list(
                PALutils.createfourierdesignmatrix(psr.toas, args.nmodes).T)
            tmp, f = PALutils.createfourierdesignmatrix(psr.toas,
                                                        args.nmodes,
                                                        freq=True)
            for ii in range(args.ss):
                F1.append(np.cos(2 * np.pi * fs[ii] * psr.toas))
                F1.append(np.sin(2 * np.pi * fs[ii] * psr.toas))

            F = np.array(F1).T
            F = np.dot(proj, F)

            # compute rho from A and gam# compute total time span of data
            Tspan = psr.toas.max() - psr.toas.min()

            # get power spectrum coefficients
            f1yr = 1 / 3.16e7
            rho = list(
                np.log10(A**2 / 12 / np.pi**2 * f1yr**(gam - 3) * f**(-gam) /
                         Tspan))

            # compute total rho
            for ii in range(args.ss):
                rho.append(rho2[ii])

            loglike = PALLikelihoods.lentatiMarginalizedLike(
                psr, F, s, np.array(rho), efac**2, equad)

        else:

            loglike = -np.inf

        #print efac, rho, loglike

        return loglike
Esempio n. 2
0
def optStat(psr, ORF, gam=4.33333):
    """
    Computes the Optimal statistic as defined in Chamberlin, Creighton, Demorest et al (2013)

    @param psr: List of pulsar object instances
    @param ORF: Vector of pairwise overlap reduction values
    @param gam: Power Spectral index of GBW (default = 13/3, ie SMBMBs)

    @return: Opt: Optimal statistic value (A_gw^2)
    @return: sigma: 1-sigma uncertanty on Optimal statistic
    @return: snr: signal-to-noise ratio of cross correlations

    """

    #TODO: maybe compute ORF in code instead of reading it in. Would be less
    # of a risk but a bit slower...

    k = 0
    npsr = len(psr)
    top = 0
    bot = 0
    for ll in xrange(0, npsr):
        for kk in xrange(ll + 1, npsr):

            # form matrix of toa residuals and compute SigmaIJ
            tm = PALutils.createTimeLags(psr[ll].toas, psr[kk].toas)

            # create cross covariance matrix without overall amplitude A^2
            SIJ = ORF[k] / 2 * PALutils.createRedNoiseCovarianceMatrix(
                tm, 1, gam)

            # construct numerator and denominator of optimal statistic
            bot += np.trace(
                np.dot(psr[ll].invCov,
                       np.dot(SIJ, np.dot(psr[kk].invCov, SIJ.T))))
            top += np.dot(psr[ll].res, np.dot(psr[ll].invCov, np.dot(SIJ, \
                        np.dot(psr[kk].invCov, psr[kk].res))))
            k += 1

    # compute optimal statistic
    Opt = top / bot

    # compute uncertainty
    sigma = 1 / np.sqrt(bot)

    # compute SNR
    snr = top / np.sqrt(bot)

    # return optimal statistic and snr
    return Opt, sigma, snr
    def myloglike(cube, ndim, nparams):

        efac = cube[0]
        equad = 10**cube[1]
        gam = cube[2]
        A = 10**cube[3]

        fs = np.zeros(args.ss)
        for ii in range(args.ss):
            fs[ii] = 10**cube[4+ii]
        rho2 = np.zeros(args.ss)
        for ii in range(args.ss):
            rho2[ii] = cube[ii+4+args.ss]
        
        # check to make sure frequencies are ordered
        ordered = np.all([fs[ii] < fs[ii+1] for ii in range(args.ss-1)])

        if ordered:

            F1 = list(PALutils.createfourierdesignmatrix(psr.toas, args.nmodes).T)
            tmp, f = PALutils.createfourierdesignmatrix(psr.toas, args.nmodes, freq=True)
            for ii in range(args.ss):
                F1.append(np.cos(2*np.pi*fs[ii]*psr.toas))
                F1.append(np.sin(2*np.pi*fs[ii]*psr.toas))

            F = np.array(F1).T
            F = np.dot(proj, F)

            # compute rho from A and gam# compute total time span of data
            Tspan = psr.toas.max() - psr.toas.min()

            # get power spectrum coefficients
            f1yr = 1/3.16e7
            rho = list(np.log10(A**2/12/np.pi**2 * f1yr**(gam-3) * f**(-gam)/Tspan))

            # compute total rho
            for ii in range(args.ss):
                rho.append(rho2[ii])

            loglike = PALLikelihoods.lentatiMarginalizedLike(psr, F, s, np.array(rho), efac**2, equad)
            
        else:

            loglike = -np.inf

        #print efac, rho, loglike

        return loglike
Esempio n. 4
0
def loglike(x):

    start = time.time()

    # parameter values
    theta = x[0]
    phi = x[1]
    freq = 10 ** x[2]
    mc = 10 ** x[3]
    dist = 10 ** x[4]
    psi = x[5]
    inc = x[6]
    phase = x[7]

    pdist = x[8:]

    # generate list of waveforms for all puslars
    s = PALutils.createResidualsFast(
        psr, theta, phi, mc, dist, freq, phase, psi, inc, pdist=pdist, evolve=False, phase_approx=True
    )

    loglike = 0
    for ct, p in enumerate(psr):

        diff = p.res - s[ct]
        loglike += -0.5 * (logdetTerm[ct] + np.dot(diff, np.dot(p.invCov, diff)))

    if np.isnan(loglike):
        print "NaN log-likelihood. Not good..."
        return -np.inf
    else:
        return loglike
    def myloglike(cube, ndim, nparams):

        efac = cube[0]
        equad = 10**cube[1]

        fs = np.zeros(args.ss)
        for ii in range(args.ss):
            fs[ii] = 10**cube[2+ii]
        rho = np.zeros(args.nmodes+args.ss)
        for ii in range(args.nmodes+args.ss):
            rho[ii] = cube[ii+2+args.ss]

        F1 = list(PALutils.createfourierdesignmatrix(psr.toas, args.nmodes).T)
        for ii in range(args.ss):
            F1.append(np.cos(2*np.pi*fs[ii]*psr.toas))
            F1.append(np.sin(2*np.pi*fs[ii]*psr.toas))

        F = np.array(F1).T

        F = np.dot(proj, F)
       
        loglike = PALLikelihoods.lentatiMarginalizedLike(psr, F, s, rho, efac, equad)

        #print efac, rho, loglike

        return loglike
def loglike(x):

    efac = np.ones(npsr)
    equad = np.zeros(npsr)
    theta = x[0]
    phi = x[1]
    f = 10**x[2]
    mc = 10**x[3]
    dist = 10**x[4]
    psi = x[5]
    inc = x[6]
    phase = x[7]
    pdist = x[8:(8+npsr)]
        
    loglike = 0
    for ct, p in enumerate(psr):

        # make waveform with pulsar term
        s = PALutils.createResiduals(p, theta, phi, mc, dist, f, phase, psi, inc, pdist=pdist[ct], \
                 psrTerm=True)

        # project onto white noise basis
        s = np.dot(projList[ct], s)

        loglike += np.sum(p.res*s/(efac[ct]*Diag[ct] + equad[ct]**2))
        loglike -= 0.5 * np.sum(s**2/(efac[ct]*Diag[ct] + equad[ct]**2))
   
    if np.isnan(loglike):
        print 'NaN log-likelihood. Not good...'
        return -np.inf
    else:
        return loglike
Esempio n. 7
0
def loglike(x):

    efac = np.ones(npsr)
    equad = np.zeros(npsr)
    theta = x[0]
    phi = x[1]
    f = 10**x[2]
    mc = 10**x[3]
    dist = 10**x[4]
    psi = x[5]
    inc = x[6]
    phase = x[7]
    pdist = x[8:(8 + npsr)]

    loglike = 0
    for ct, p in enumerate(psr):

        # make waveform with pulsar term
        s = PALutils.createResiduals(p, theta, phi, mc, dist, f, phase, psi, inc, pdist=pdist[ct], \
                 psrTerm=True)

        # project onto white noise basis
        s = np.dot(projList[ct], s)

        loglike += np.sum(p.res * s / (efac[ct] * Diag[ct] + equad[ct]**2))
        loglike -= 0.5 * np.sum(s**2 / (efac[ct] * Diag[ct] + equad[ct]**2))

    if np.isnan(loglike):
        print 'NaN log-likelihood. Not good...'
        return -np.inf
    else:
        return loglike
Esempio n. 8
0
def loglike(x):

    start = time.time()

    # parameter values
    theta = x[0]
    phi = x[1]
    freq = 10**x[2]
    mc = 10**x[3]
    dist = 10**x[4]
    psi = x[5]
    inc = x[6]
    phase = x[7]

    pdist = x[8:]

    # generate list of waveforms for all puslars
    s = PALutils.createResidualsFast(psr, theta, phi, mc, dist, freq, phase, psi, inc,\
                                     pdist=pdist, evolve=False, phase_approx=True)
    
    loglike = 0
    for ct, p in enumerate(psr):

        diff = p.res - s[ct]
        loglike += -0.5 * (logdetTerm[ct] + np.dot(diff, np.dot(p.invCov, diff)))


    if np.isnan(loglike):
        print 'NaN log-likelihood. Not good...'
        return -np.inf
    else:
        return loglike
Esempio n. 9
0
def optStat(psr, ORF, gam=4.33333):
    """
    Computes the Optimal statistic as defined in Chamberlin, Creighton, Demorest et al (2013)

    @param psr: List of pulsar object instances
    @param ORF: Vector of pairwise overlap reduction values
    @param gam: Power Spectral index of GBW (default = 13/3, ie SMBMBs)

    @return: Opt: Optimal statistic value (A_gw^2)
    @return: sigma: 1-sigma uncertanty on Optimal statistic
    @return: snr: signal-to-noise ratio of cross correlations

    """

    #TODO: maybe compute ORF in code instead of reading it in. Would be less
    # of a risk but a bit slower...

    k = 0
    npsr = len(psr)
    top = 0
    bot = 0
    for ll in xrange(0, npsr):
        for kk in xrange(ll+1, npsr):

            # form matrix of toa residuals and compute SigmaIJ
            tm = PALutils.createTimeLags(psr[ll].toas, psr[kk].toas)

            # create cross covariance matrix without overall amplitude A^2
            SIJ = ORF[k]/2 * PALutils.createRedNoiseCovarianceMatrix(tm, 1, gam)
            
            # construct numerator and denominator of optimal statistic
            bot += np.trace(np.dot(psr[ll].invCov, np.dot(SIJ, np.dot(psr[kk].invCov, SIJ.T))))
            top += np.dot(psr[ll].res, np.dot(psr[ll].invCov, np.dot(SIJ, \
                        np.dot(psr[kk].invCov, psr[kk].res))))
            k+=1

    # compute optimal statistic
    Opt = top/bot
    
    # compute uncertainty
    sigma = 1/np.sqrt(bot)

    # compute SNR
    snr = top/np.sqrt(bot)

    # return optimal statistic and snr
    return Opt, sigma, snr
Esempio n. 10
0
def upperLimitFunc(h, fstat_ref, freq, nreal):
    """
    Compute the value of the fstat for a range of parameters, with fixed
    amplitude over many realizations.

    @param h: value of the strain amplitude to keep constant
    @param fstat_ref: value of fstat for real data set
    @param freq: GW frequency
    @param nreal: number of realizations

    """
    Tmaxyr = np.array([(p.toas.max() - p.toas.min())/3.16e7 for p in psr]).max()
    count = 0
    for ii in range(nreal):

        # draw parameter values
        gwtheta = np.arccos(np.random.uniform(-1, 1))
        gwphi = np.random.uniform(0, 2*np.pi)
        gwphase = np.random.uniform(0, 2*np.pi)
        gwinc = np.arccos(np.random.uniform(0, 1))
        gwpsi = np.random.uniform(-np.pi/4, np.pi/4)

        # check to make sure source has not coalesced during observation time
        coal = True
        while coal:
            gwmc = 10**np.random.uniform(7, 10)
            tcoal = 2e6 * (gwmc/1e8)**(-5/3) * (freq/1e-8)**(-8/3)
            if tcoal > Tmaxyr:
                coal = False

        # determine distance in order to keep strain fixed
        gwdist = 4 * np.sqrt(2/5) * (gwmc*4.9e-6)**(5/3) * (np.pi*freq)**(2/3) / h

        # convert back to Mpc
        gwdist /= 1.0267e14

        # create residuals 
        for ct,p in enumerate(psr):
            inducedRes = PALutils.createResiduals(p, gwtheta, gwphi, gwmc, gwdist, \
                            freq, gwphase, gwpsi, gwinc, evolve=True)
 
            # replace residuals in pulsar object
            p.res = res[ct] + np.dot(R[ct], inducedRes)

        # compute f-statistic
        fpstat = PALLikelihoods.fpStat(psr, freq)
        
        # check to see if larger than in real data
        if fpstat > fstat_ref:
            count += 1

    # now get detection probability
    detProb = count/nreal

    print freq, h, detProb

    return detProb - 0.95
Esempio n. 11
0
def marginalizedPulsarPhaseLike(psr, theta, phi, phase, inc, psi, freq, h, maximize=False):
    """ 
    Compute the log-likelihood marginalized over pulsar phases

    @param psr: List of pulsar object instances
    @param theta: GW polar angle [radian]
    @param phi: GW azimuthal angle [radian]
    @param phase: Initial GW phase [radian]
    @param inc: GW inclination angle [radian]
    @param psi: GW polarization angle [radian]
    @param freq: GW initial frequency [Hz]
    @param h: GW strain
    @param maximize: Option to maximize over pulsar phases instead of marginalize

    """

    # get number of pulsars
    npsr = len(psr)
       
    # get c and d
    c = np.cos(phase)
    d = np.sin(phase)

    # construct xi = M**5/3/D and omega
    xi = 0.25 * np.sqrt(5/2) * (np.pi*freq)**(-2/3) * h
    omega = np.pi*freq

    lnlike = 0
    for ct, pp in enumerate(psr):

        # compute relevant inner products
        cip = np.dot(np.cos(2*omega*pp.toas), np.dot(pp.invCov, pp.res)) 
        sip = np.dot(np.sin(2*omega*pp.toas), np.dot(pp.invCov, pp.res))
        N = np.dot(np.cos(2*omega*pp.toas), np.dot(pp.invCov, np.cos(2*omega*pp.toas)))

        # compute fplus and fcross
        fplus, fcross, cosMu = PALutils.createAntennaPatternFuncs(pp, theta, phi)

        # mind you p's and q's
        p = (1+np.cos(inc)**2) * (fplus*np.cos(2*psi) + fcross*np.sin(2*psi))
        q = 2*np.cos(inc) * (fplus*np.sin(2*psi) - fcross*np.cos(2*psi))

        # construct X Y and Z
        X = -xi/omega**(1/3) * (p*sip + q*cip - 0.5*xi/omega**(1/3)*N*c*(p**2+q**2))
        Y = -xi/omega**(1/3) * (q*sip - p*cip - 0.5*xi/omega**(1/3)*N*d*(p**2+q**2))
        Z = xi/omega**(1/3) * ((p*c+q*d)*sip - (p*d-q*c)*cip \
                        -0.5*xi/omega**(1/3)*N*(p**2+q**2))

        # add to log-likelihood
        #print X, Y
        if maximize:
            lnlike += Z + np.sqrt(X**2 + Y**2)
        else:
            lnlike += Z + np.log(ss.iv(0, np.sqrt(X**2 + Y**2)))

    return lnlike
Esempio n. 12
0
def crossPower(psr, gam=13 / 3):
    """

    Compute the cross power as defined in Eq 9 and uncertainty of Eq 10 in 
    Demorest et al (2012).

    @param psr: List of pulsar object instances
    @param gam: Power spectral index of GWB

    @return: vector of cross power for each pulsar pair
    @return: vector of cross power uncertainties for each pulsar pair

    """

    # initialization
    npsr = len(psr)

    # now compute cross power
    rho = []
    sig = []
    xi = []
    for ll in range(npsr):
        for kk in range(ll + 1, npsr):

            # matrix of time lags
            tm = PALutils.createTimeLags(psr[ll].toas, psr[kk].toas)

            # create cross covariance matrix without overall amplitude A^2
            SIJ = PALutils.createRedNoiseCovarianceMatrix(tm, 1, gam)

            # construct numerator and denominator of optimal statistic
            bot = np.trace(
                np.dot(psr[ll].invCov,
                       np.dot(SIJ, np.dot(psr[kk].invCov, SIJ.T))))
            top = np.dot(psr[ll].res, np.dot(psr[ll].invCov, np.dot(SIJ, \
                        np.dot(psr[kk].invCov, psr[kk].res))))

            # cross correlation and uncertainty
            rho.append(top / bot)
            sig.append(1 / np.sqrt(bot))

    return np.array(rho), np.array(sig)
Esempio n. 13
0
def crossPower(psr, gam=13/3):
    """

    Compute the cross power as defined in Eq 9 and uncertainty of Eq 10 in 
    Demorest et al (2012).

    @param psr: List of pulsar object instances
    @param gam: Power spectral index of GWB

    @return: vector of cross power for each pulsar pair
    @return: vector of cross power uncertainties for each pulsar pair

    """

    # initialization
    npsr = len(psr) 

    # now compute cross power
    rho = []
    sig = []
    xi = []
    for ll in range(npsr):
        for kk in range(ll+1, npsr):
            
            # matrix of time lags
            tm = PALutils.createTimeLags(psr[ll].toas, psr[kk].toas)

            # create cross covariance matrix without overall amplitude A^2
            SIJ = PALutils.createRedNoiseCovarianceMatrix(tm, 1, gam)
            
            # construct numerator and denominator of optimal statistic
            bot = np.trace(np.dot(psr[ll].invCov, np.dot(SIJ, np.dot(psr[kk].invCov, SIJ.T))))
            top = np.dot(psr[ll].res, np.dot(psr[ll].invCov, np.dot(SIJ, \
                        np.dot(psr[kk].invCov, psr[kk].res))))

            # cross correlation and uncertainty
            rho.append(top/bot)
            sig.append(1/np.sqrt(bot))


    return np.array(rho), np.array(sig)
Esempio n. 14
0
def bigPulsarPhaseJump(x, iter, beta):

    # get old parameters
    q = x.copy()

    # pick pulsar index at random
    ind = np.random.randint(0, npsr, npsr)
    ind = np.unique(ind)

    # get relevant parameters
    freq = 10**x[2]
    pdist = x[8 + ind]
    pdistErr = np.array([psr[ii].distErr for ii in list(ind)])
    phi = x[1]
    theta = x[0]

    # put pulsar distance in correct units
    pdist *= 1.0267e11
    pdistErr *= 1.0267e11

    # get cosMu
    cosMu = np.zeros(len(ind))
    for ii in range(len(ind)):
        tmp1, temp2, cosMu[ii] = PALutils.createAntennaPatternFuncs(
            psr[ind[ii]], theta, phi)

    # construct pulsar phase
    phase_old = 2 * np.pi * freq * pdist * (1 - cosMu)

    # gaussian jump
    phase_jump = np.random.randn(
        np.size(pdist)) * pdistErr * freq * (1 - cosMu)

    # make jump multiple of 2 pi
    phase_jump = np.array([int(phase_jump[ii]) \
                    for ii in range(np.size(pdist))])

    # new phase
    phase_new = phase_old + 2 * np.pi * phase_jump

    # solve for new pulsar distances from phase_new
    L_new = phase_new / (2 * np.pi * freq * (1 - cosMu))

    # convert back to Kpc
    L_new /= 1.0267e11

    q[8 + ind] = L_new

    return q
def bigPulsarPhaseJump(x, iter, beta):

    # get old parameters
    q = x.copy()

    # pick pulsar index at random
    ind = np.random.randint(0, npsr, npsr)
    ind = np.unique(ind)
    
    # get relevant parameters
    freq = 10**x[2]
    pdist = x[8+ind]
    pdistErr = np.array([psr[ii].distErr for ii in list(ind)])
    phi = x[1]
    theta = x[0]

    # put pulsar distance in correct units
    pdist *= 1.0267e11  
    pdistErr *= 1.0267e11  

    # get cosMu
    cosMu = np.zeros(len(ind))
    for ii in range(len(ind)):
        tmp1, temp2, cosMu[ii] = PALutils.createAntennaPatternFuncs(psr[ind[ii]], theta, phi)
    
    # construct pulsar phase
    phase_old = 2*np.pi*freq*pdist*(1-cosMu)

    # gaussian jump 
    phase_jump = np.random.randn(np.size(pdist))*pdistErr*freq*(1-cosMu)

    # make jump multiple of 2 pi
    phase_jump = np.array([int(phase_jump[ii]) \
                    for ii in range(np.size(pdist))])

    # new phase
    phase_new = phase_old + 2*np.pi*phase_jump

    # solve for new pulsar distances from phase_new
    L_new = phase_new/(2*np.pi*freq*(1-cosMu))

    # convert back to Kpc
    L_new /= 1.0267e11  

    q[8+ind] = L_new

    return q
Esempio n. 16
0
def smallPulsarPhaseJump(x, iter, beta):

    # get old parameters
    q = x.copy()

    # jump size
    sigma = np.sqrt(beta) * 0.5

    # pick pulsar index at random
    ind = np.random.randint(0, npsr, npsr)
    ind = np.unique(ind)

    # get relevant parameters
    freq = 10**x[2]
    pdist = x[8 + ind]
    phi = x[1]
    theta = x[0]

    # put pulsar distance in correct units
    pdist *= 1.0267e11

    # get cosMu
    cosMu = np.zeros(len(ind))
    for ii in range(len(ind)):
        tmp1, temp2, cosMu[ii] = PALutils.createAntennaPatternFuncs(
            psr[ind[ii]], theta, phi)

    # construct pulsar phase
    phase_old = 2 * np.pi * freq * pdist * (1 - cosMu)

    # gaussian jump
    phase_new = phase_old + np.random.randn(np.size(pdist)) * sigma

    # solve for new pulsar distances from phase_new
    L_new = phase_new / (2 * np.pi * freq * (1 - cosMu))

    # convert back to Kpc
    L_new /= 1.0267e11

    q[8 + ind] = L_new

    return q
def smallPulsarPhaseJump(x, iter, beta):

    # get old parameters
    q = x.copy()

    # jump size
    sigma = np.sqrt(beta) * 0.5

    # pick pulsar index at random
    ind = np.random.randint(0, npsr, npsr)
    ind = np.unique(ind)
    
    # get relevant parameters
    freq = 10**x[2]
    pdist = x[8+ind]
    phi = x[1]
    theta = x[0]

    # put pulsar distance in correct units
    pdist *= 1.0267e11  

    # get cosMu
    cosMu = np.zeros(len(ind))
    for ii in range(len(ind)):
        tmp1, temp2, cosMu[ii] = PALutils.createAntennaPatternFuncs(psr[ind[ii]], theta, phi)


    # construct pulsar phase
    phase_old = 2*np.pi*freq*pdist*(1-cosMu)

    # gaussian jump 
    phase_new = phase_old + np.random.randn(np.size(pdist))*sigma

    # solve for new pulsar distances from phase_new
    L_new = phase_new/(2*np.pi*freq*(1-cosMu))

    # convert back to Kpc
    L_new /= 1.0267e11 

    q[8+ind] = L_new

    return q
def loglike(x):

    tstart = time.time()

    theta = np.arccos(x[0])
    phi = x[1]
    f = 10**x[2]
    h = 10**x[3]
    psi = x[4]
    inc = np.arccos(x[5])
    phase = x[6]

    # get pulsar phase
    pphase = np.zeros(npsr)
    for ii in range(npsr):
        pphase[ii] = x[(ndim-npsr) + ii]

    # pick a distance and mass from the strain. Doesnt matter since non-evolving
    mc = 5e8
    dist = 4 * np.sqrt(2/5) * (mc*4.9e-6)**(5/3) * (np.pi*f)**(2/3) / h
    dist /= 1.0267e14

    loglike = 0
    for ct, p in enumerate(psr):
    

        # make waveform with no frequency evolution
        s = PALutils.createResiduals(p, theta, phi, mc, dist, f, phase, psi, inc,\
                                     pphase=pphase[ct], evolve=False)

        diff = p.res - s
        loglike += -0.5 * logdetTerm[ct]
        loglike += -0.5 * np.dot(diff, np.dot(p.invCov, diff))

    #print 'Evaluation time = {0} s'.format(time.time() - tstart)

    if np.isnan(loglike):
        print 'NaN log-likelihood. Not good...'
        return -np.inf
    else:
        return loglike
Esempio n. 19
0
def loglike(x):

    tstart = time.time()

    theta = np.arccos(x[0])
    phi = x[1]
    f = 10**x[2]
    h = 10**x[3]
    psi = x[4]
    inc = np.arccos(x[5])
    phase = x[6]

    # get pulsar phase
    pphase = np.zeros(npsr)
    for ii in range(npsr):
        pphase[ii] = x[(ndim - npsr) + ii]

    # pick a distance and mass from the strain. Doesnt matter since non-evolving
    mc = 5e8
    dist = 4 * np.sqrt(
        2 / 5) * (mc * 4.9e-6)**(5 / 3) * (np.pi * f)**(2 / 3) / h
    dist /= 1.0267e14

    loglike = 0
    for ct, p in enumerate(psr):

        # make waveform with no frequency evolution
        s = PALutils.createResiduals(p, theta, phi, mc, dist, f, phase, psi, inc,\
                                     pphase=pphase[ct], evolve=False)

        diff = p.res - s
        loglike += -0.5 * logdetTerm[ct]
        loglike += -0.5 * np.dot(diff, np.dot(p.invCov, diff))

    #print 'Evaluation time = {0} s'.format(time.time() - tstart)

    if np.isnan(loglike):
        print 'NaN log-likelihood. Not good...'
        return -np.inf
    else:
        return loglike
Esempio n. 20
0
tt=[] 
for p in psr:
    tt.append(np.min(p.toas))

# find reference time
tref = np.min(tt)

# now scale pulsar time
for p in psr:
    p.toas -= tref

# create list of reference residuals
res = [p.res for p in psr]

# get list of R matrices
R = [PALutils.createRmatrix(p.dmatrix, p.err) for p in psr]

L = []
for ct, p in enumerate(psr):

    Amp = p.Amp
    gam = p.gam
    efac = p.efac
    equad = p.equad
    try:
        cequad = p.cequad
    except AttributeError:
        cequad = 0
        
    avetoas, U = PALutils.exploderMatrix(p.toas)
    Tspan = p.toas.max()-p.toas.min()
Esempio n. 21
0
    def addInverseCovFromNoiseFile(self, parfile, timfile, noisefile, DMOFF=None, DMXOFF=None, dailyAverage=False):
        """
        
        Add noise covariance matrix after timing model subtraction.

        """

        # Check whether the two files exist
        if not os.path.isfile(parfile) or not os.path.isfile(timfile):
            raise IOError, "Cannot find parfile (%s) or timfile (%s)!" % (parfile, timfile)
        assert(self.filename != None), "ERROR: HDF5 file not set!"

        # 'a' means: read/write if exists, create otherwise
        self.h5file = h5.File(self.filename, 'a')

        # Create the data subgroup if it does not exist
        if "Data" in self.h5file:
            datagroup = self.h5file["Data"]
        else:
            raise IOError, "Cannot add noise parameters if Data group does not exist!"

        # Load pulsar data from the JPL Cython tempo2 library
        t2pulsar = t2.tempopulsar(parfile, timfile)
        
        # turn off DMMODEL fitting
        if DMOFF is not None:
            t2pulsar['DMMODEL'].fit = False

        # turn off DMX fitting
        if DMXOFF is not None:
            DMXFlag = False
            print 'Turning off DMX fitting and turning DM fitting on'
            for par in t2pulsar.pars:
                if 'DMX' in par:
                    t2pulsar[par].fit = False
                    t2pulsar['DM'].fit = True
                    DMXFlag = True
            if DMXFlag== False: 
                print 'NO DMX for pulsar {0}'.format(t2pulsar.name)

        # refit 5 times to make sure we are converged
        t2pulsar.fit(iters=5)

        # Create the pulsar subgroup if it does not exist
        if "Pulsars" in datagroup:
            pulsarsgroup = datagroup["Pulsars"]
        else:
            raise IOError, "Cannot add noise parameters if pulsar group does not exist!"

        # Look up the name of the pulsar, and see if it exist
        if t2pulsar.name in pulsarsgroup:
            pass
        else:
            raise IOError, "%s must already exists in %s to add noise parameters!"\
                    % (t2pulsar.name, self.filename)

        pulsarsgroup = pulsarsgroup[t2pulsar.name]

        # first create G matrix from design matrix and toas
        designmatrix = np.double(t2pulsar.designmatrix())
        toas = np.double(t2pulsar.toas()*86400)
        errs = np.double(t2pulsar.toaerrs*1e-6)

        # if doing daily averaging
        if dailyAverage:

            # get average quantities
            toas, qmatrix, errs, dmatrix, freqs, bands = PALutils.dailyAverage(t2pulsar)

            # construct new daily averaged residuals and designmatrix
            toas *= 86400
            designmatrix = np.dot(qmatrix, dmatrix)
        
        G = PALutils.createGmatrix(designmatrix)

        # create matrix of time lags
        tm = PALutils.createTimeLags(toas, toas, round=True)

        # now read noise file to get model and parameters
        file = open(noisefile,'r')

        fH = None
        tau = None
        DMAmp = None
        DMgam = None
 
        for line in file.readlines():
            
            # default parameters for different models other than pure PL
            key = line.split()[0]

            # get amplitude
            if "Amp" == key:
                Amp = float(line.split()[-1])

            # get spectral index
            elif "gam" == key:
                gam = float(line.split()[-1])
            
            # get efac
            elif "efac" == key:
                efac = float(line.split()[-1])
            
            # get quad
            elif "equad" == key:
                equad = float(line.split()[-1])
            
            # get high frequency cutoff if available
            elif "fH" == key:
                fH = float(line.split()[-1])
            
            # get correlation time scale if available
            elif "tau" == key:
                tau = float(line.split()[-1])

            # get DM Amplitude if available
            elif "DMAmp" == key:
                DMAmp = float(line.split()[-1])

            # get DM Spectral Index if available
            elif "DMgam" == key:
                DMgam = float(line.split()[-1])

        # cosstruct red and white noise covariance matrices
        red = PALutils.createRedNoiseCovarianceMatrix(tm, Amp, gam, fH=fH)
        white = PALutils.createWhiteNoiseCovarianceMatrix(errs, efac, equad, tau=tau)

        # construct post timing model marginalization covariance matrix
        cov = red + white
        pcov = np.dot(G.T, np.dot(cov, G))

        # finally construct "inverse"
        invCov = np.dot(G, np.dot(np.linalg.inv(pcov), G.T))

        # create dataset for inverse covariance matrix
        pulsarsgroup.create_dataset('invCov', data = invCov) 

        # create dataset for G matrix
        pulsarsgroup.create_dataset('Gmatrix', data = G) 

        # record noise parameter values
        pulsarsgroup.create_dataset('Amp', data = Amp)
        pulsarsgroup.create_dataset('gam', data = gam)
        pulsarsgroup.create_dataset('efac', data = efac)
        pulsarsgroup.create_dataset('equad', data = equad)
        if fH is not None:
            pulsarsgroup.create_dataset('fH', data = fH)
        if tau is not None:
            pulsarsgroup.create_dataset('tau', data = tau)
        if DMAmp is not None:
            pulsarsgroup.create_dataset('DMAmp', data = DMAmp)
        if DMgam is not None:
            pulsarsgroup.create_dataset('DMgam', data = DMgam)


        # Close the hdf5 file
        self.h5file.close()
Esempio n. 22
0
    def addInverseCovFromNoiseFile(self,
                                   parfile,
                                   timfile,
                                   noisefile,
                                   DMOFF=None,
                                   DMXOFF=None,
                                   dailyAverage=False):
        """
        
        Add noise covariance matrix after timing model subtraction.

        """

        # Check whether the two files exist
        if not os.path.isfile(parfile) or not os.path.isfile(timfile):
            raise IOError, "Cannot find parfile (%s) or timfile (%s)!" % (
                parfile, timfile)
        assert (self.filename != None), "ERROR: HDF5 file not set!"

        # 'a' means: read/write if exists, create otherwise
        self.h5file = h5.File(self.filename, 'a')

        # Create the data subgroup if it does not exist
        if "Data" in self.h5file:
            datagroup = self.h5file["Data"]
        else:
            raise IOError, "Cannot add noise parameters if Data group does not exist!"

        # Load pulsar data from the JPL Cython tempo2 library
        t2pulsar = t2.tempopulsar(parfile, timfile)

        # turn off DMMODEL fitting
        if DMOFF is not None:
            t2pulsar['DMMODEL'].fit = False

        # turn off DMX fitting
        if DMXOFF is not None:
            DMXFlag = False
            print 'Turning off DMX fitting and turning DM fitting on'
            for par in t2pulsar.pars:
                if 'DMX' in par:
                    t2pulsar[par].fit = False
                    t2pulsar['DM'].fit = True
                    DMXFlag = True
            if DMXFlag == False:
                print 'NO DMX for pulsar {0}'.format(t2pulsar.name)

        # refit 5 times to make sure we are converged
        t2pulsar.fit(iters=5)

        # Create the pulsar subgroup if it does not exist
        if "Pulsars" in datagroup:
            pulsarsgroup = datagroup["Pulsars"]
        else:
            raise IOError, "Cannot add noise parameters if pulsar group does not exist!"

        # Look up the name of the pulsar, and see if it exist
        if t2pulsar.name in pulsarsgroup:
            pass
        else:
            raise IOError, "%s must already exists in %s to add noise parameters!"\
                    % (t2pulsar.name, self.filename)

        pulsarsgroup = pulsarsgroup[t2pulsar.name]

        # first create G matrix from design matrix and toas
        designmatrix = np.double(t2pulsar.designmatrix())
        toas = np.double(t2pulsar.toas() * 86400)
        errs = np.double(t2pulsar.toaerrs * 1e-6)

        # if doing daily averaging
        if dailyAverage:

            # get average quantities
            toas, qmatrix, errs, dmatrix, freqs, bands = PALutils.dailyAverage(
                t2pulsar)

            # construct new daily averaged residuals and designmatrix
            toas *= 86400
            designmatrix = np.dot(qmatrix, dmatrix)

        G = PALutils.createGmatrix(designmatrix)

        # create matrix of time lags
        tm = PALutils.createTimeLags(toas, toas, round=True)

        # now read noise file to get model and parameters
        file = open(noisefile, 'r')

        fH = None
        tau = None
        DMAmp = None
        DMgam = None

        for line in file.readlines():

            # default parameters for different models other than pure PL
            key = line.split()[0]

            # get amplitude
            if "Amp" == key:
                Amp = float(line.split()[-1])

            # get spectral index
            elif "gam" == key:
                gam = float(line.split()[-1])

            # get efac
            elif "efac" == key:
                efac = float(line.split()[-1])

            # get quad
            elif "equad" == key:
                equad = float(line.split()[-1])

            # get high frequency cutoff if available
            elif "fH" == key:
                fH = float(line.split()[-1])

            # get correlation time scale if available
            elif "tau" == key:
                tau = float(line.split()[-1])

            # get DM Amplitude if available
            elif "DMAmp" == key:
                DMAmp = float(line.split()[-1])

            # get DM Spectral Index if available
            elif "DMgam" == key:
                DMgam = float(line.split()[-1])

        # cosstruct red and white noise covariance matrices
        red = PALutils.createRedNoiseCovarianceMatrix(tm, Amp, gam, fH=fH)
        white = PALutils.createWhiteNoiseCovarianceMatrix(errs,
                                                          efac,
                                                          equad,
                                                          tau=tau)

        # construct post timing model marginalization covariance matrix
        cov = red + white
        pcov = np.dot(G.T, np.dot(cov, G))

        # finally construct "inverse"
        invCov = np.dot(G, np.dot(np.linalg.inv(pcov), G.T))

        # create dataset for inverse covariance matrix
        pulsarsgroup.create_dataset('invCov', data=invCov)

        # create dataset for G matrix
        pulsarsgroup.create_dataset('Gmatrix', data=G)

        # record noise parameter values
        pulsarsgroup.create_dataset('Amp', data=Amp)
        pulsarsgroup.create_dataset('gam', data=gam)
        pulsarsgroup.create_dataset('efac', data=efac)
        pulsarsgroup.create_dataset('equad', data=equad)
        if fH is not None:
            pulsarsgroup.create_dataset('fH', data=fH)
        if tau is not None:
            pulsarsgroup.create_dataset('tau', data=tau)
        if DMAmp is not None:
            pulsarsgroup.create_dataset('DMAmp', data=DMAmp)
        if DMgam is not None:
            pulsarsgroup.create_dataset('DMgam', data=DMgam)

        # Close the hdf5 file
        self.h5file.close()
Esempio n. 23
0
    def __init__(self, pulsargroup, addNoise=False, addGmatrix=True):

        # loop though keys in pulsargroup and fill in psr attributes that are needed for GW analysis
        self.dist = None
        self.distErr = None
        self.fH = None

        for key in pulsargroup:

            # look for TOAs
            if key == "TOAs":
                self.toas = pulsargroup[key].value

            # residuals
            elif key == "residuals":
                self.res = pulsargroup[key].value

            # toa error bars
            elif key == "toaErr":
                self.err = pulsargroup[key].value

            # frequencies in Hz
            elif key == "freqs":
                self.freqs = pulsargroup[key].value

            # design matrix
            elif key == "designmatrix":
                self.dmatrix = pulsargroup[key].value
                self.ntoa, self.nfit = self.dmatrix.shape
                if addGmatrix:
                    self.G = PALutils.createGmatrix(self.dmatrix)

            # design matrix
            elif key == "pname":
                self.name = pulsargroup[key].value

            # pulsar distance in kpc
            elif key == "dist":
                self.dist = pulsargroup[key].value

            # pulsar distance uncertainty in kpc
            elif key == "distErr":
                self.distErr = pulsargroup[key].value

            # right ascension and declination
            elif key == 'tmp_name':
                par_names = list(pulsargroup[key].value)
                for ct, name in enumerate(par_names):

                    # right ascension and phi
                    if name == "RAJ":
                        self.ra = pulsargroup["tmp_valpost"].value[ct]
                        self.phi = self.ra

                    # right ascension
                    if name == "DECJ":
                        self.dec = pulsargroup["tmp_valpost"].value[ct]
                        self.theta = np.pi / 2 - self.dec

            # inverse covariance matrix
            elif key == "invCov":
                if addNoise:
                    self.invCov = pulsargroup[key].value

            ## noise parameters ##

            elif key == "Amp":
                self.Amp = pulsargroup[key].value

            # red noise spectral
            elif key == "gam":
                self.gam = pulsargroup[key].value

            # efac
            elif key == "efac":
                self.efac = pulsargroup[key].value

            # equad
            elif key == "equad":
                self.equad = pulsargroup[key].value

            elif key == "cequad":
                self.cequad = pulsargroup[key].value

            # fH
            elif key == "fH":
                self.fH = pulsargroup[key].value

            # pulsar distance uncertainty in kpc
            elif key == "distErr":
                self.distErr = pulsargroup[key].value

        if self.dist is None:
            print 'WARNING: No distance info, using d = 1 kpc'
            self.dist = 1.0

        if self.distErr is None:
            print 'WARNING: No distance error info, using sigma_d = 0.1 kpc'
            self.distErr = 0.1
Esempio n. 24
0
# make sure all pulsar have same reference time
tt=[] 
for p in psr:
    tt.append(np.min(p.toas))

# find reference time
tref = np.min(tt)

# now scale pulsar time
for p in psr:
    p.toas -= tref

# get G matrices
for p in psr:
    p.G = PALutils.createGmatrix(p.dmatrix)

# run Fp statistic to determine starting frequency
if args.null == False:
    print 'Running initial Fpstat search'
    fsearch = np.logspace(-9, -7, 1000)
    fpstat = np.zeros(len(fsearch))
    for ii in range(len(fsearch)):
        fpstat[ii] = PALLikelihoods.fpStat(psr, fsearch[ii])

    # determine maximum likelihood frequency
    fmaxlike = fsearch[np.argmax(fpstat)]
    print 'Maximum likelihood from f-stat search = {0}\n'.format(fmaxlike)

# get Tmax
Tmax = np.max([p.toas.max() - p.toas.min() for p in psr])
Esempio n. 25
0
def firstOrderLikelihood(psr, ORF, Agw, gamgw, Ared, gred, efac, equad, \
                        interpolate=False):
    """
    Compute the value of the first-order likelihood as defined in 
    Ellis, Siemens, van Haasteren (2013).

    @param psr: List of pulsar object instances
    @param ORF: Vector of pairwise overlap reduction values
    @param Agw: Amplitude of GWB in standard strain amplitude units
    @param gamgw: Power spectral index of GWB
    @param Ared: Vector of amplitudes of intrinsic red noise in GWB strain units
    @param gamgw: Vector of power spectral index of red noise
    @param efac: Vector of efacs 
    @param equad: Vector of equads
    @param interpolate: Boolean to perform interpolation only with compressed
                        data. (default = False)

    @return: Log-likelihood value

    """
    npsr = len(psr)
    loglike = 0
    tmp = []

    # start loop to evaluate auto-terms
    for ll in range(npsr):

        r1 = np.dot(psr[ll].G.T, psr[ll].res)

        # create time lags
        tm = PALutils.createTimeLags(psr[ll].toas, psr[ll].toas)

        #TODO: finish option to do interpolation when using compression

        # calculate auto GW covariance matrix
        SC = PALutils.createRedNoiseCovarianceMatrix(tm, Agw, gamgw)

        # calculate auto red noise covariance matrix
        SA = PALutils.createRedNoiseCovarianceMatrix(tm, Ared[ll], gred[ll])

        # create white noise covariance matrix
        #TODO: add ability to use multiple efacs for different backends
        white = PALutils.createWhiteNoiseCovarianceMatrix(
            psr[ll].err, efac[ll], equad[ll])

        # total auto-covariance matrix
        P = SC + SA + white

        # sandwich with G matrices
        Ppost = np.dot(psr[ll].G.T, np.dot(P, psr[ll].G))

        # do cholesky solve
        cf = sl.cho_factor(Ppost)

        # solution vector P^_1 r
        rr = sl.cho_solve(cf, r1)

        # temporarily store P^-1 r
        tmp.append(np.dot(psr[ll].G, rr))

        # add to log-likelihood
        loglike += -0.5 * (np.sum(np.log(2 * np.pi * np.diag(cf[0])**2)) +
                           np.dot(r1, rr))

    # now compute cross terms
    k = 0
    for ll in range(npsr):
        for kk in range(ll + 1, npsr):

            # create time lags
            tm = PALutils.createTimeLags(psr[ll].toas, psr[kk].toas)

            # create cross covariance matrix
            SIJ = PALutils.createRedNoiseCovarianceMatrix(tm, 1, gamgw)

            # carry out matrix-vetor operations
            tmp1 = np.dot(SIJ, tmp[kk])

            # add to likelihood
            loglike += ORF[k] / 2 * Agw**2 * np.dot(tmp[ll], tmp1)

            # increment ORF counter
            k += 1

    return loglike
Esempio n. 26
0
def upperLimitFunc(h):
    """
    Compute the value of the fstat for a range of parameters, with fixed
    amplitude over many realizations.

    @param h: value of the strain amplitude to keep constant
    @param fstat_ref: value of fstat for real data set
    @param freq: GW frequency
    @param nreal: number of realizations

    """

    Tmaxyr = np.array([(p.toas.max() - p.toas.min()) / 3.16e7
                       for p in psr]).max()
    count = 0
    for ii in range(nreal):

        # draw parameter values
        gwtheta = np.arccos(np.random.uniform(-1, 1))
        gwphi = np.random.uniform(0, 2 * np.pi)
        gwphase = np.random.uniform(0, 2 * np.pi)
        gwinc = np.arccos(np.random.uniform(-1, 1))
        gwpsi = np.random.uniform(-np.pi / 4, np.pi / 4)

        # check to make sure source has not coalesced during observation time
        gwmc = 10**np.random.uniform(7, 10)
        tcoal = 2e6 * (gwmc / 1e8)**(-5 / 3) * (freq / 1e-8)**(-8 / 3)
        if tcoal < Tmaxyr:
            gwmc = 1e5

        # determine distance in order to keep strain fixed
        gwdist = 4 * np.sqrt(
            2 / 5) * (gwmc * 4.9e-6)**(5 / 3) * (np.pi * freq)**(2 / 3) / h

        # convert back to Mpc
        gwdist /= 1.0267e14

        # create residuals and refit for all pulsars
        for ct, p in enumerate(psr):
            inducedRes = PALutils.createResiduals(p, gwtheta, gwphi, gwmc, gwdist, \
                            freq, gwphase, gwpsi, gwinc)

            # create simulated data set
            noise = np.dot(L[ct], np.random.randn(L[ct].shape[0]))
            pp[ct].stoas[:] -= pp[ct].residuals() / 86400
            pp[ct].stoas[:] += np.longdouble(np.dot(RQ[ct], noise) / 86400)
            pp[ct].stoas[:] += np.longdouble(
                np.dot(RQ[ct], inducedRes) / 86400)

            # refit
            pp[ct].fit(iters=3)

            # replace residuals in pulsar object
            p.res = pp[ct].residuals()

            print p.name, p.rms() * 1e6

        # compute f-statistic
        fpstat = PALLikelihoods.fpStat(psr, freq)

        # check to see if larger than in real data
        if fpstat > fstat_ref:
            count += 1

    # now get detection probability
    detProb = count / nreal

    print h, detProb

    return detProb - 0.95
Esempio n. 27
0
# make sure all pulsar have same reference time
tt = []
for p in psr:
    tt.append(np.min(p.toas))

# find reference time
tref = np.min(tt)

# now scale pulsar time
for p in psr:
    p.toas -= tref

# get G matrices
for p in psr:
    p.G = PALutils.createGmatrix(p.dmatrix)

# run Fp statistic to determine starting frequency
if args.freq is None:
    print "Running initial Fpstat search"
    fsearch = np.logspace(-9, -7, 1000)
    fpstat = np.zeros(len(fsearch))
    for ii in range(len(fsearch)):
        fpstat[ii] = PALLikelihoods.fpStat(psr, fsearch[ii])

    # determine maximum likelihood frequency
    fmaxlike = fsearch[np.argmax(fpstat)]
    print "Maximum likelihood from f-stat search = {0}\n".format(fmaxlike)

# get determinant of covariance matrix for use in likelihood
logdetTerm = []
Esempio n. 28
0
# define the pulsargroup
pulsargroup = pfile['Data']['Pulsars']

# fill in pulsar class
psr = [PALpulsarInit.pulsar(pulsargroup[key],addNoise=True, addGmatrix=True) \
            for key in pulsargroup]

# number of pulsars
npsr = len(psr)

# create list of reference residuals
res = [p.res for p in psr]

# get list of R matrices
R = [PALutils.createRmatrix(p.dmatrix, p.err) for p in psr]

# pre-compute noise covariance matrices

ct = 0
D = []
Dmatrix = []
print 'Computing diagonalized auto-covariance matrices'
for key in pulsargroup:

    # get noise values from file TODO: change this to read directly from pulsar class
    Amp = pulsargroup[key]['Amp'].value
    gam = pulsargroup[key]['gam'].value
    efac = pulsargroup[key]['efac'].value
    equad = pulsargroup[key]['equad'].value
    try:
Esempio n. 29
0
def upperLimitFunc(A, optstat_ref, nreal):
    """
    Compute the value of the Optimal Statistic for different signal realizations
    
    @param A: value of GWB amplitude
    @param optstat_ref: value of optimal statistic with no injection 
    @param nreal: number of realizations

    """
    count = 0
    for ii in range(nreal):
        
        # create residuals
        inducedRes = PALutils.createGWB(psr, A, 4.3333)

        Pinvr = []
        Pinv = []
        for ct, p in enumerate(psr):

            # replace residuals in pulsar object
            p.res = res[ct] + np.dot(R[ct], inducedRes[ct])

            # determine injected amplitude by minimizing Likelihood function
            c = np.dot(Dmatrix[ct], p.res)
            f = lambda x: -PALutils.twoComponentNoiseLike(x, D[ct], c)
            fbounded = minimize_scalar(f, bounds=(0, 1e-14, 3.0e-13), method='Brent')
            Amp = np.abs(fbounded.x)
            #print Amp
            #Amp = A

            # construct P^-1 r
            Pinvr.append(c/(Amp**2 * D[ct] + 1))
            Pinv.append(1/(Amp**2 * D[ct] + 1))

        # construct optimal statstic
        k = 0
        top = 0
        bot = 0
        for ll in range(npsr):
            for kk in range(ll+1, npsr):

                # compute numerator of optimal statisic
                top += ORF[k]/2 * np.dot(Pinvr[ll], np.dot(SIJ[k], Pinvr[kk]))

                # compute trace term
                bot += (ORF[k]/2)**2 * np.trace(np.dot((Pinv[ll]*SIJ[k].T).T, (Pinv[kk]*SIJ[k]).T))
                # iterate counter 
                k += 1

        # get optimal statistic and SNR
        optStat = top/bot
        snr = top/np.sqrt(bot)
        
        # check to see if larger than in real data
        if optStat > optstat_ref:
            count += 1


    # now get detection probability
    detProb = count/nreal

    print A, detProb
    injAmp.append(A)
    injDetProb.append(detProb)

    return detProb - 0.95
Esempio n. 30
0
tt = []
for p in psr:
    tt.append(np.min(p.toas))

# find reference time
tref = np.min(tt)

# now scale pulsar time
for p in psr:
    p.toas -= tref

# create list of reference residuals
res = [p.res for p in psr]

# get list of R matrices
R = [PALutils.createRmatrix(p.dmatrix, p.err) for p in psr]

L = []
for ct, p in enumerate(psr):

    Amp = p.Amp
    gam = p.gam
    efac = p.efac
    equad = p.equad
    try:
        cequad = p.cequad
    except AttributeError:
        cequad = 0

    avetoas, U = PALutils.exploderMatrix(p.toas)
    Tspan = p.toas.max() - p.toas.min()
Esempio n. 31
0
tt=[] 
for p in psr:
    tt.append(np.min(p.toas))

# find reference time
tref = np.min(tt)

# now scale pulsar time
for p in psr:
    p.toas -= tref

# create list of reference residuals
res = [p.res for p in psr]

# get list of R matrices
R = [PALutils.createRmatrix(p.dmatrix, p.err) for p in psr]


#############################################################################################

#### DEFINE UPPER LIMIT FUNCTION #####

def upperLimitFunc(h, fstat_ref, freq, nreal):
    """
    Compute the value of the fstat for a range of parameters, with fixed
    amplitude over many realizations.

    @param h: value of the strain amplitude to keep constant
    @param fstat_ref: value of fstat for real data set
    @param freq: GW frequency
    @param nreal: number of realizations
Esempio n. 32
0
def HTFSingleInd(psr, F, proj, SS, rho, efac, equad, gwtheta, gwphi, mc, \
                                dist, fgw, phase0, psi, inc, pphase=None, pdist=None, \
                                evolve=True, psrTerm=True, phase_approx=False):
    """
    Lentati marginalized likelihood function only including efac and equad
    and power law coefficients

    @param psr: Pulsar class
    @param F: Fourier design matrix constructed in PALutils
    @param proj: Projection operator from white noise
    @param SS: Diagonalized white noise matrix
    @param rho: Power spectrum coefficients
    @param efac: constant multipier on error bar covaraince matrix term
    @param equad: Additional white noise added in quadrature to efac
    @param gwtheta: Polar angle of GW source in celestial coords [radians]
    @param gwphi: Azimuthal angle of GW source in celestial coords [radians]
    @param mc: Chirp mass of SMBMB [solar masses]
    @param dist: Luminosity distance to SMBMB [Mpc]
    @param fgw: Frequency of GW (twice the orbital frequency) [Hz]
    @param phase0: Initial Phase of GW source [radians]
    @param psi: Polarization of GW source [radians]
    @param inc: Inclination of GW source [radians]
    @param pdist: Pulsar distance to use other than those in psr [kpc]
    @param pphase: Use pulsar phase to determine distance [radian]
    @param psrTerm: Option to include pulsar term [boolean] 
    @param evolve: Option to exclude evolution [boolean]



    @return: LogLike: loglikelihood

    """
    # make waveform with no frequency evolution
    s = PALutils.createResiduals(psr, gwtheta, gwphi, mc, dist, fgw, phase0, psi, inc,\
                                 pphase=pphase, evolve=evolve, pdist=pdist, \
                                 psrTerm=psrTerm, phase_approx=phase_approx)


    diff = np.dot(proj, (psr.res - s))


    # compute total time span of data
    Tspan = psr.toas.max() - psr.toas.min()

    # compute d
    d = np.dot(F.T, diff/(efac*SS + equad**2))

    # compute Sigma
    N = 1/(efac*SS + equad**2)
    right = (N*F.T).T
    FNF = np.dot(F.T, right)

    arr = np.zeros(2*len(rho))
    ct = 0
    for ii in range(0, 2*len(rho), 2):
        arr[ii] = rho[ct]
        arr[ii+1] = rho[ct]
        ct += 1

    Sigma = FNF + np.diag(1/arr)

    # cholesky decomp for second term in exponential
    cf = sl.cho_factor(Sigma)
    expval2 = sl.cho_solve(cf, d)
    logdet_Sigma = np.sum(2*np.log(np.diag(cf[0])))

    dtNdt = np.sum(diff**2/(efac*SS + equad**2))
    
    logdet_Phi = np.sum(np.log(arr))

    logdet_N = np.sum(np.log(efac*SS + equad**2))


    logLike = -0.5 * (logdet_N + logdet_Phi + logdet_Sigma)\
                    - 0.5 * (dtNdt - np.dot(d, expval2))


    return logLike
Esempio n. 33
0
    def __init__(self,pulsargroup, addNoise=False, addGmatrix=True):


        # loop though keys in pulsargroup and fill in psr attributes that are needed for GW analysis
        self.dist = None
        self.distErr = None
        self.fH = None

        for key in pulsargroup:

            # look for TOAs
            if key == "TOAs":
                self.toas = pulsargroup[key].value

            # residuals
            elif key == "residuals":
                self.res = pulsargroup[key].value

            # toa error bars
            elif key == "toaErr":
                self.err = pulsargroup[key].value
            
            # frequencies in Hz
            elif key == "freqs":
                self.freqs = pulsargroup[key].value
            
            # design matrix
            elif key == "designmatrix":
                self.dmatrix = pulsargroup[key].value
                self.ntoa, self.nfit = self.dmatrix.shape
                if addGmatrix:
                    self.G = PALutils.createGmatrix(self.dmatrix)
            
            # design matrix
            elif key == "pname":
                self.name = pulsargroup[key].value
            
            # pulsar distance in kpc
            elif key == "dist":
                self.dist = pulsargroup[key].value 
            
            # pulsar distance uncertainty in kpc
            elif key == "distErr":
                self.distErr = pulsargroup[key].value 

            # right ascension and declination
            elif key == 'tmp_name':
                par_names = list(pulsargroup[key].value)
                for ct,name in enumerate(par_names):

                    # right ascension and phi
                    if name == "RAJ":
                        self.ra = pulsargroup["tmp_valpost"].value[ct]
                        self.phi = self.ra
                    
                    # right ascension
                    if name == "DECJ":
                        self.dec = pulsargroup["tmp_valpost"].value[ct]
                        self.theta = np.pi/2 - self.dec

            # inverse covariance matrix
            elif key == "invCov":
                if addNoise:
                    self.invCov = pulsargroup[key].value


            ## noise parameters ##

            elif key == "Amp":

                self.Amp = pulsargroup[key].value 
            
            # red noise spectral
            elif key == "gam":
                self.gam = pulsargroup[key].value 
            
            # efac
            elif key == "efac":
                self.efac = pulsargroup[key].value 
            
            # equad
            elif key == "equad":
                self.equad = pulsargroup[key].value 
            
            # fH
            elif key == "fH":
                self.fH = pulsargroup[key].value 

            # pulsar distance uncertainty in kpc
            elif key == "distErr":
                self.distErr = pulsargroup[key].value 

        if self.dist is None:
            print 'WARNING: No distance info, using d = 1 kpc'
            self.dist = 1.0

        if self.distErr is None:
            print 'WARNING: No distance error info, using sigma_d = 0.1 kpc'
            self.distErr = 0.1
Esempio n. 34
0
def covarianceJumpProposal(x, iter, beta):

    # get scale
    scale = 1

    # medium size jump every 1000 steps
    if np.random.rand() < 1 / 1000:
        scale = 5

    # large size jump every 10000 steps
    if np.random.rand() < 1 / 10000:
        scale = 50

    # get parmeters in new diagonalized basis
    y = np.dot(U.T, x)

    # make correlated componentwise adaptive jump
    if iter < 100000:
        ind = np.unique(np.random.randint(0, 8, 1))
    else:
        ind = np.unique(np.random.randint(0, 8, ndim))

    neff = len(ind)
    cd = 2.4 * np.sqrt(1 / beta) / np.sqrt(neff) * scale
    #y[ind] = y[ind] + np.random.randn(neff) * cd * np.sqrt(S[ind])
    q = x.copy()
    q[ind] = q[ind] + np.random.randn(neff) * cd * np.sqrt(
        np.diag(cov[ind, ind]))
    #q = np.dot(U, y)

    # need to make sure that we keep the pulsar phase constant, plus small offset

    # get parameters before jump
    omega0 = 2 * np.pi * 10**x[2]
    L0 = x[8:] * 1.0267e11
    phi0 = x[1]
    theta0 = x[0]

    omega1 = 2 * np.pi * 10**q[2]
    L1 = q[8:] * 1.0267e11
    phi1 = q[1]
    theta1 = q[0]

    # get cosMu
    cosMu0 = np.zeros(npsr)
    cosMu1 = np.zeros(npsr)
    for ii in range(npsr):
        tmp1, temp2, cosMu0[ii] = PALutils.createAntennaPatternFuncs(
            psr[ii], theta0, phi0)
        tmp1, temp2, cosMu1[ii] = PALutils.createAntennaPatternFuncs(
            psr[ii], theta1, phi1)

    # construct new pulsar distances to keep the pulsar phases constant
    sigma = np.sqrt(1 / beta) * 0.0 * np.random.randn(npsr)
    phase0 = omega0 * L0 * (1 - cosMu0)
    phase1 = omega1 * L1 * (1 - cosMu1)
    L_new = L1 - (np.mod(phase1, 2 * np.pi) - np.mod(phase0, 2 * np.pi) +
                  sigma) / (omega1 * (1 - cosMu1))

    #phasenew = omega1*L_new*(1-cosMu1)
    #print 'New phase = ', np.mod(phasenew ,2*np.pi)
    #print 'Old phase = ', np.mod(phase0,2*np.pi)
    #print 'New L = ', L_new
    #print 'Old L = ', L0, '\n'

    # convert back to Kpc
    L_new /= 1.0267e11

    q[8:] = L_new

    #print x[0]
    #print q[0], '\n'

    return q
Esempio n. 35
0
def HTFSingleInd(
    psr,
    F,
    proj,
    SS,
    rho,
    efac,
    equad,
    gwtheta,
    gwphi,
    mc,
    dist,
    fgw,
    phase0,
    psi,
    inc,
    pphase=None,
    pdist=None,
    evolve=True,
    psrTerm=True,
    phase_approx=False,
):
    """
    Lentati marginalized likelihood function only including efac and equad
    and power law coefficients

    @param psr: Pulsar class
    @param F: Fourier design matrix constructed in PALutils
    @param proj: Projection operator from white noise
    @param SS: Diagonalized white noise matrix
    @param rho: Power spectrum coefficients
    @param efac: constant multipier on error bar covaraince matrix term
    @param equad: Additional white noise added in quadrature to efac
    @param gwtheta: Polar angle of GW source in celestial coords [radians]
    @param gwphi: Azimuthal angle of GW source in celestial coords [radians]
    @param mc: Chirp mass of SMBMB [solar masses]
    @param dist: Luminosity distance to SMBMB [Mpc]
    @param fgw: Frequency of GW (twice the orbital frequency) [Hz]
    @param phase0: Initial Phase of GW source [radians]
    @param psi: Polarization of GW source [radians]
    @param inc: Inclination of GW source [radians]
    @param pdist: Pulsar distance to use other than those in psr [kpc]
    @param pphase: Use pulsar phase to determine distance [radian]
    @param psrTerm: Option to include pulsar term [boolean] 
    @param evolve: Option to exclude evolution [boolean]



    @return: LogLike: loglikelihood

    """
    # make waveform with no frequency evolution
    s = PALutils.createResiduals(
        psr,
        gwtheta,
        gwphi,
        mc,
        dist,
        fgw,
        phase0,
        psi,
        inc,
        pphase=pphase,
        evolve=evolve,
        pdist=pdist,
        psrTerm=psrTerm,
        phase_approx=phase_approx,
    )

    diff = np.dot(proj, (psr.res - s))

    # compute total time span of data
    Tspan = psr.toas.max() - psr.toas.min()

    # compute d
    d = np.dot(F.T, diff / (efac * SS + equad ** 2))

    # compute Sigma
    N = 1 / (efac * SS + equad ** 2)
    right = (N * F.T).T
    FNF = np.dot(F.T, right)

    arr = np.zeros(2 * len(rho))
    ct = 0
    for ii in range(0, 2 * len(rho), 2):
        arr[ii] = rho[ct]
        arr[ii + 1] = rho[ct]
        ct += 1

    Sigma = FNF + np.diag(1 / arr)

    # cholesky decomp for second term in exponential
    cf = sl.cho_factor(Sigma)
    expval2 = sl.cho_solve(cf, d)
    logdet_Sigma = np.sum(2 * np.log(np.diag(cf[0])))

    dtNdt = np.sum(diff ** 2 / (efac * SS + equad ** 2))

    logdet_Phi = np.sum(np.log(arr))

    logdet_N = np.sum(np.log(efac * SS + equad ** 2))

    logLike = -0.5 * (logdet_N + logdet_Phi + logdet_Sigma) - 0.5 * (dtNdt - np.dot(d, expval2))

    return logLike
Esempio n. 36
0
# make sure pulsar names are in correct order
# TODO: is this a very round about way to do this?
index = []
for ct, p in enumerate(pp):

    if p.name == psr[ct].name:
        index.append(ct)
    else:
        for ii in range(npsr):
            if pp[ii].name == psr[ct].name:
                index.append(ii)

pp = [pp[ii] for ii in index]

M = [PALutils.createQSDdesignmatrix(p.toas) for p in psr]

RQ = [PALutils.createRmatrix(M[ct], p.err) for ct, p in enumerate(psr)]

# construct noise matrix for new noise realizations
print 'Constructing noise cholesky decompositions'
L = []
for ct, p in enumerate(psr):

    Amp = p.Amp
    gam = p.gam
    efac = p.efac
    equad = p.equad
    cequad = p.cequad

    avetoas, U = PALutils.exploderMatrix(p.toas)
# make sure all pulsar have same reference time
tt=[] 
for p in psr:
    tt.append(np.min(p.toas))

# find reference time
tref = np.min(tt)

# now scale pulsar time
for p in psr:
    p.toas -= tref


# compute pairwise overlap reduction function values
print 'Computing Overlap Reduction Function Values'
ORF = PALutils.computeORF(psr)

# since we have defined our ORF to be normalized to 1
hdcoeff = ORF/2

# compute optimal statistic
print 'Running Cross correlation Statistic on {0} Pulsars'.format(npsr)
crosspower, crosspowererr = PALLikelihoods.crossPower(psr, args.gam)

# angular separation
xi = []
for ll in range(npsr):
    for kk in range(ll+1, npsr):
        xi.append(PALutils.angularSeparation(psr[ll].theta, psr[ll].phi, \
                                            psr[kk].theta, psr[kk].phi))
Esempio n. 38
0
for p in psr:
    p.toas -= tref

#import glob
#invmat = glob.glob('/Users/Justin/Work/nanograv/nanograv/data_products/joris/*invCov*')
#
## get list of R matrices
#R = [PALutils.createRmatrix(p.dmatrix, p.err) for p in psr]
#
#for ct,p in enumerate(psr):
#    p.invCov = np.dot(R[ct].T, np.dot(p.invCov, R[ct]))


# compute pairwise overlap reduction function values
print 'Computing Overlap Reduction Function Values'
ORF = PALutils.computeORF(psr)

# compute optimal statistic
print 'Running Optimal Statistic on {0} Pulsars'.format(npsr)
Opt, sigma, snr = PALLikelihoods.optStat(psr, ORF, gam=args.gam)

print 'Results of Search\n'

print '------------------------------------\n'

print 'A_gw^2 = {0}'.format(Opt)
print 'std. dev. = {0}'.format(sigma)
print 'SNR = {0}'.format(snr)

if snr > 3.0:
    print 'SNR of {0} is above threshold!'.format(snr)
Esempio n. 39
0
def firstOrderLikelihood(psr, ORF, Agw, gamgw, Ared, gred, efac, equad, \
                        interpolate=False):
    """
    Compute the value of the first-order likelihood as defined in 
    Ellis, Siemens, van Haasteren (2013).

    @param psr: List of pulsar object instances
    @param ORF: Vector of pairwise overlap reduction values
    @param Agw: Amplitude of GWB in standard strain amplitude units
    @param gamgw: Power spectral index of GWB
    @param Ared: Vector of amplitudes of intrinsic red noise in GWB strain units
    @param gamgw: Vector of power spectral index of red noise
    @param efac: Vector of efacs 
    @param equad: Vector of equads
    @param interpolate: Boolean to perform interpolation only with compressed
                        data. (default = False)

    @return: Log-likelihood value

    """
    npsr = len(psr)
    loglike = 0
    tmp = []

    # start loop to evaluate auto-terms
    for ll in range(npsr):

       r1 = np.dot(psr[ll].G.T, psr[ll].res)

       # create time lags
       tm = PALutils.createTimeLags(psr[ll].toas, psr[ll].toas)

       #TODO: finish option to do interpolation when using compression

       # calculate auto GW covariance matrix
       SC = PALutils.createRedNoiseCovarianceMatrix(tm, Agw, gamgw)

       # calculate auto red noise covariance matrix
       SA = PALutils.createRedNoiseCovarianceMatrix(tm, Ared[ll], gred[ll])

       # create white noise covariance matrix
       #TODO: add ability to use multiple efacs for different backends
       white = PALutils.createWhiteNoiseCovarianceMatrix(psr[ll].err, efac[ll], equad[ll])

       # total auto-covariance matrix
       P = SC + SA + white

       # sandwich with G matrices
       Ppost = np.dot(psr[ll].G.T, np.dot(P, psr[ll].G))

       # do cholesky solve
       cf = sl.cho_factor(Ppost)

       # solution vector P^_1 r
       rr = sl.cho_solve(cf, r1)

       # temporarily store P^-1 r
       tmp.append(np.dot(psr[ll].G, rr))

       # add to log-likelihood
       loglike  += -0.5 * (np.sum(np.log(2*np.pi*np.diag(cf[0])**2)) + np.dot(r1, rr))

 
    # now compute cross terms
    k = 0
    for ll in range(npsr):
        for kk in range(ll+1, npsr):

            # create time lags
            tm = PALutils.createTimeLags(psr[ll].toas, psr[kk].toas)

            # create cross covariance matrix
            SIJ = PALutils.createRedNoiseCovarianceMatrix(tm, 1, gamgw)

            # carry out matrix-vetor operations
            tmp1 = np.dot(SIJ, tmp[kk])

            # add to likelihood
            loglike += ORF[k]/2 * Agw**2 * np.dot(tmp[ll], tmp1)
            
            # increment ORF counter
            k += 1

    return loglike
Esempio n. 40
0
    for p in psr:
        print 'Pulsar {0} has {1} ns weighted rms'.format(
            p.name,
            p.rms() * 1e9)

npsr = len(psr)

pfile.close()

# get Tmax
Tmax = np.max([p.toas.max() - p.toas.min() for p in psr])

# initialize fourier design matrix
F = [
    PALutils.createfourierdesignmatrix(p.toas, args.nmodes, Tspan=Tmax)
    for p in psr
]

if args.powerlaw:
    tmp, f = PALutils.createfourierdesignmatrix(p.toas,
                                                args.nmodes,
                                                Tspan=Tmax,
                                                freq=True)

# get G matrices
for p in psr:
    p.G = PALutils.createGmatrix(p.dmatrix)

# pre compute diagonalized efac + equad white noise model
Diag = []
Esempio n. 41
0
def modelIndependentFullPTASinglSource(psr, proj, s, f, theta, phi, rho, kappa, efac, equad, ORF):
    """
    Model Independent single source testing function

    """
    tstart = time.time()
    
    # get the number of modes, should be the same for all pulsars
    nmode = len(rho)
    npsr = len(psr)

    # get F matrices for all pulsars at given frequency
    F = [np.array([np.sin(2*np.pi*f*p.toas), np.cos(2*np.pi*f*p.toas)]).T for p in psr]

    F = [np.dot(proj[ii], F[ii]) for ii in range(len(proj))]

    loglike1 = 0
    FtNF = []
    for ct,p in enumerate(psr):
    
        # compute d
        if ct == 0:
            d = np.dot(F[ct].T, p.res/(efac[ct]*s[ct] + equad[ct]**2))
        else:
            d = np.append(d, np.dot(F[ct].T, p.res/(efac[ct]*s[ct] + equad[ct]**2)))

        # compute FT N F
        N = 1/(efac[ct]*s[ct] + equad[ct]**2)
        right = (N*F[ct].T).T
        FtNF.append(np.dot(F[ct].T, right))
        
        # log determinant of N
        logdet_N = np.sum(np.log(efac[ct]*s[ct] + equad[ct]**2))

        # triple produce in likelihood function
        dtNdt = np.sum(p.res**2/(efac[ct]*s[ct] + equad[ct]**2))

        loglike1 += -0.5 * (logdet_N + dtNdt)

    # construct elements of sigma array
    sigdiag = []
    sigoffdiag = []
    fplus = np.zeros(npsr)
    fcross = np.zeros(npsr)
    for ii in range(npsr):
        fplus[ii], fcross[ii], cosMu = PALutils.createAntennaPatternFuncs(psr[ii], theta, phi)
        tot = np.zeros(2*nmode)
        offdiag = np.zeros(2*nmode)

        # off diagonal terms
        offdiag[0::2] = 10**rho 
        offdiag[1::2] = 10**rho

        # diagonal terms
        tot[0::2] = 10**rho
        tot[1::2] = 10**rho

        # add in individual red noise
        if len(kappa[ii]) > 0:
            tot[0::2][0:len(kappa[ii])] += 10**kappa[ii]
            tot[1::2][0:len(kappa[ii])] += 10**kappa[ii]
        
        # fill in lists of arrays
        sigdiag.append(tot)
        sigoffdiag.append(offdiag)

    tstart2 = time.time()

    # compute Phi inverse from Lindley's code
    smallMatrix = np.zeros((2*nmode, npsr, npsr))
    for ii in range(npsr):
        for jj in range(ii,npsr):

            if ii == jj:
                smallMatrix[:,ii,jj] = ORF[ii,jj] * sigdiag[jj] * (fplus[ii]**2 + fcross[ii]**2)
            else:
                smallMatrix[:,ii,jj] = ORF[ii,jj] * sigoffdiag[jj] * (fplus[ii]*fplus[jj] + fcross[ii]*fcross[jj])
                smallMatrix[:,jj,ii] = smallMatrix[:,ii,jj]


    # invert them
    logdet_Phi = 0
    for ii in range(2*nmode):
        L = sl.cho_factor(smallMatrix[ii,:,:])
        smallMatrix[ii,:,:] = sl.cho_solve(L, np.eye(npsr))
        logdet_Phi += np.sum(2*np.log(np.diag(L[0])))

    # now fill in real covariance matrix
    Phi = np.zeros((2*npsr*nmode, 2*npsr*nmode))
    for ii in range(npsr):
        for jj in range(ii,npsr):
            for kk in range(0,2*nmode):
                Phi[kk+ii*2*nmode,kk+jj*2*nmode] = smallMatrix[kk,ii,jj]
    
    # symmeterize Phi
    Phi = Phi + Phi.T - np.diag(np.diag(Phi))
            
    # compute sigma
    Sigma = sl.block_diag(*FtNF) + Phi

    tmatrix = time.time() - tstart2

    tstart3 = time.time()
            
    # cholesky decomp for second term in exponential
    cf = sl.cho_factor(Sigma)
    expval2 = sl.cho_solve(cf, d)
    logdet_Sigma = np.sum(2*np.log(np.diag(cf[0])))

    tinverse = time.time() - tstart3

    logLike = -0.5 * (logdet_Phi + logdet_Sigma) + 0.5 * (np.dot(d, expval2)) + loglike1

    #print 'Total time: {0}'.format(time.time() - tstart)
    #print 'Matrix construction time: {0}'.format(tmatrix)
    #print 'Inversion time: {0}\n'.format(tinverse)

    return logLike
Esempio n. 42
0
# make sure all pulsar have same reference time
tt=[] 
for p in psr:
    tt.append(np.min(p.toas))

# find reference time
tref = np.min(tt)

# now scale pulsar time
for p in psr:
    p.toas -= tref

# get G matrices
for p in psr:
    p.G = PALutils.createGmatrix(p.dmatrix)

# run Fp statistic to determine starting frequency
if args.freq is None:
    print 'Running initial Fpstat search'
    fsearch = np.logspace(-9, -7, 1000)
    fpstat = np.zeros(len(fsearch))
    for ii in range(len(fsearch)):
        fpstat[ii] = PALLikelihoods.fpStat(psr, fsearch[ii])

    # determine maximum likelihood frequency
    fmaxlike = fsearch[np.argmax(fpstat)]
    print 'Maximum likelihood from f-stat search = {0}\n'.format(fmaxlike)

# get determinant of covariance matrix for use in likelihood
logdetTerm = []
Esempio n. 43
0
    def addpulsar(self,
                  parfile,
                  timfile,
                  DMOFF=None,
                  DMXOFF=None,
                  dailyAverage=False):
        """
        Add another pulsar to the HDF5 file, given a tempo2 par and tim file.

        @param parfile: tempo2 par file
        @param timfile: tempo2 tim file
        @param DMOFF: Option to turn off DMMODEL fitting
        @param DMOFF: Option to turn off DMX fitting
        @param dailyAverage: Option to perform daily averaging to reduce the number
                             of points by consructing daily averaged TOAs that have
                             one residual per day per frequency band. (This has only
                             been tested on NANOGrav data thus far.)

        """

        # Check whether the two files exist
        if not os.path.isfile(parfile) or not os.path.isfile(timfile):
            raise IOError, "Cannot find parfile (%s) or timfile (%s)!" % (
                parfile, timfile)
        assert (self.filename != None), "ERROR: HDF5 file not set!"

        # 'a' means: read/write if exists, create otherwise
        self.h5file = h5.File(self.filename, 'a')

        if "Model" in self.h5file:
            self.h5file.close()
            self.h5file = None
            raise IOError, "model already available in '%s'. Refusing to add data" % (
                self.filename)

        # Create the data subgroup if it does not exist
        if "Data" in self.h5file:
            datagroup = self.h5file["Data"]
        else:
            datagroup = self.h5file.create_group("Data")

        # Load pulsar data from the JPL Cython tempo2 library
        t2pulsar = t2.tempopulsar(parfile, timfile)

        # do multiple fits
        #t2pulsar.fit(iters=10)

        # turn off DMMODEL fitting
        if DMOFF is not None:
            t2pulsar['DMMODEL'].fit = False

        # turn off DMX fitting
        if DMXOFF is not None:
            DMXFlag = False
            print 'Turning off DMX fitting and turning DM fitting on'
            for par in t2pulsar.pars:
                if 'DMX' in par:
                    print par
                    t2pulsar[par].fit = False
                    t2pulsar['DM'].fit = True
                    DMXFlag = True
            if DMXFlag == False:
                print 'NO DMX for pulsar {0}'.format(t2pulsar.name)

        # refit 5 times to make sure we are converged
        t2pulsar.fit(iters=5)

        # Create the pulsar subgroup if it does not exist
        if "Pulsars" in datagroup:
            pulsarsgroup = datagroup["Pulsars"]
        else:
            pulsarsgroup = datagroup.create_group("Pulsars")

        # Look up the name of the pulsar, and see if it exist
        if t2pulsar.name in pulsarsgroup:
            self.h5file.close()
            raise IOError, "%s already exists in %s!" % (t2pulsar.name,
                                                         self.filename)

        pulsarsgroup = pulsarsgroup.create_group(t2pulsar.name)

        # Read the data from the tempo2 structure.
        designmatrix = np.double(t2pulsar.designmatrix())
        residuals = np.double(t2pulsar.residuals())
        toas = np.double(t2pulsar.toas())
        errs = np.double(t2pulsar.toaerrs * 1e-6)
        pname = t2pulsar.name

        try:  # if tim file has frequencies
            freqs = np.double(t2pulsar.freqs)
        except AttributeError:
            freqs = 0

        try:  # if tim file has frequency band flags
            bands = t2pulsar.flags['B']
        except KeyError:
            bands = 0

        # if doing daily averaging
        if dailyAverage:

            # get average quantities
            toas, qmatrix, errs, dmatrix, freqs, bands = PALutils.dailyAverage(
                t2pulsar)

            # construct new daily averaged residuals and designmatrix
            residuals = np.dot(qmatrix, residuals)
            designmatrix = np.dot(qmatrix, dmatrix)

        # Write the TOAs, residuals, and uncertainties.
        spd = 24.0 * 3600  # seconds per day
        pulsarsgroup.create_dataset('TOAs', data=toas *
                                    spd)  # days (MJD) * sec per day
        pulsarsgroup.create_dataset('residuals', data=residuals)  # seconds
        pulsarsgroup.create_dataset('toaErr', data=errs)  # seconds
        pulsarsgroup.create_dataset('freqs', data=freqs * 1e6)  # Hz
        pulsarsgroup.create_dataset('bands', data=bands)  # Hz

        # add tim and par file paths
        pulsarsgroup.create_dataset('parFile', data=parfile)  # string
        pulsarsgroup.create_dataset('timFile', data=timfile)  # string

        # Write the full design matrix
        pulsarsgroup.create_dataset('designmatrix', data=designmatrix)

        # Obtain the timing model parameters
        tmpname = np.array(t2pulsar.pars)
        tmpvalpre = np.double(
            [t2pulsar.prefit[parname].val for parname in t2pulsar.pars])
        tmpvalpost = np.double(
            [t2pulsar[parname].val for parname in t2pulsar.pars])
        tmperrpre = np.double(
            [t2pulsar.prefit[parname].err for parname in t2pulsar.pars])
        tmperrpost = np.double(
            [t2pulsar[parname].err for parname in t2pulsar.pars])

        # Write the timing model parameter (TMP) descriptions
        pulsarsgroup.create_dataset('pname', data=pname)  # pulsar name
        pulsarsgroup.create_dataset('tmp_name', data=tmpname)  # TMP name
        pulsarsgroup.create_dataset('tmp_valpre',
                                    data=tmpvalpre)  # TMP pre-fit value
        pulsarsgroup.create_dataset('tmp_valpost',
                                    data=tmpvalpost)  # TMP post-fit value
        pulsarsgroup.create_dataset('tmp_errpre',
                                    data=tmperrpre)  # TMP pre-fit error
        pulsarsgroup.create_dataset('tmp_errpost',
                                    data=tmperrpost)  # TMP post-fit error

        # Close the hdf5 file
        self.h5file.close()
Esempio n. 44
0
# make sure all pulsar have same reference time
tt = []
for p in psr:
    tt.append(np.min(p.toas))

# find reference time
tref = np.min(tt)

# now scale pulsar time
for p in psr:
    p.toas -= tref

# get G matrices
for p in psr:
    p.G = PALutils.createGmatrix(p.dmatrix)

# run Fp statistic to determine starting frequency
print 'Running initial Fpstat search'
fsearch = np.logspace(-9, -7, 200)
fpstat = np.zeros(len(fsearch))
for ii in range(len(fsearch)):
    fpstat[ii] = PALLikelihoods.fpStat(psr, fsearch[ii])

# determine maximum likelihood frequency
fmaxlike = fsearch[np.argmax(fpstat)]
print 'Maximum likelihood from f-stat search = {0}\n'.format(fmaxlike)

# prior ranges
thmin = phasemin = incmin = psimin = 0
thmax = incmax = np.pi
Esempio n. 45
0
# make sure all pulsar have same reference time
tt = []
for p in psr:
    tt.append(np.min(p.toas))

# find reference time
tref = np.min(tt)

# now scale pulsar time
for p in psr:
    p.toas -= tref

# compute pairwise overlap reduction function values
print 'Computing Overlap Reduction Function Values'
ORF = PALutils.computeORF(psr)

# since we have defined our ORF to be normalized to 1
hdcoeff = ORF / 2

# compute optimal statistic
print 'Running Cross correlation Statistic on {0} Pulsars'.format(npsr)
crosspower, crosspowererr = PALLikelihoods.crossPower(psr, args.gam)

# angular separation
xi = []
for ll in range(npsr):
    for kk in range(ll + 1, npsr):
        xi.append(PALutils.angularSeparation(psr[ll].theta, psr[ll].phi, \
                                            psr[kk].theta, psr[kk].phi))
Esempio n. 46
0
# make sure pulsar names are in correct order
# TODO: is this a very round about way to do this?
index = []
for ct,p in enumerate(pp):
    
    if p.name == psr[ct].name:
        index.append(ct)
    else:
        for ii in range(npsr):
            if pp[ii].name == psr[ct].name:
                index.append(ii)

pp = [pp[ii] for ii in index]

M = [PALutils.createQSDdesignmatrix(p.toas) for p in psr]

RQ = [PALutils.createRmatrix(M[ct], p.err) for ct, p in enumerate(psr)]

# construct noise matrix for new noise realizations
print 'Constructing noise cholesky decompositions'
L = []
for ct, p in enumerate(psr):

    Amp = p.Amp
    gam = p.gam
    efac = p.efac
    equad = p.equad
    cequad = p.cequad
        
    avetoas, U = PALutils.exploderMatrix(p.toas)
Esempio n. 47
0
# carry out Fp search
if args.fpFlag:

    print 'Beginning Fp Search with {0} pulsars, with frequency range {1} -- {2}'.format(npsr, f[0], f[-1])
    
    fpstat = np.zeros(args.nfreqs)
    for ii in range(args.nfreqs):

        fpstat[ii] = PALLikelihoods.fpStat(psr, f[ii])


    print 'Done Search. Computing False Alarm Probability'
    
    # single template FAP
    pf = np.array([PALutils.ptSum(npsr, fpstat[ii]) for ii in range(np.alen(f))])

    # get total false alarm probability with trials factor
    pfT = 1 - (1-pf)**np.alen(f)

    # write results to file
    if not os.path.exists(args.outDir):
        os.makedirs(args.outDir)

    # get filename from hdf5 file
    fname = args.outDir + '/' + args.h5file.split('/')[-1].split('.')[0] + '.txt'
    fout = open(fname, 'w')
    print 'Writing results to file {0}'.format(fname)
    for ii in range(np.alen(f)):

        fout.write('%g %g %g\n'%(f[ii], fpstat[ii], pfT[ii]))
Esempio n. 48
0
def upperLimitFunc(h):
    """
    Compute the value of the fstat for a range of parameters, with fixed
    amplitude over many realizations.

    @param h: value of the strain amplitude to keep constant
    @param fstat_ref: value of fstat for real data set
    @param freq: GW frequency
    @param nreal: number of realizations

    """
    
    Tmaxyr = np.array([(p.toas.max() - p.toas.min())/3.16e7 for p in psr]).max()
    count = 0
    for ii in range(nreal):

        # draw parameter values
        gwtheta = np.arccos(np.random.uniform(-1, 1))
        gwphi = np.random.uniform(0, 2*np.pi)
        gwphase = np.random.uniform(0, 2*np.pi)
        gwinc = np.arccos(np.random.uniform(-1, 1))
        gwpsi = np.random.uniform(-np.pi/4, np.pi/4)

        # check to make sure source has not coalesced during observation time
        gwmc = 10**np.random.uniform(7, 10)
        tcoal = 2e6 * (gwmc/1e8)**(-5/3) * (freq/1e-8)**(-8/3)
        if tcoal < Tmaxyr:
            gwmc = 1e5

        # determine distance in order to keep strain fixed
        gwdist = 4 * np.sqrt(2/5) * (gwmc*4.9e-6)**(5/3) * (np.pi*freq)**(2/3) / h

        # convert back to Mpc
        gwdist /= 1.0267e14

        # create residuals and refit for all pulsars
        for ct,p in enumerate(psr):
            inducedRes = PALutils.createResiduals(p, gwtheta, gwphi, gwmc, gwdist, \
                            freq, gwphase, gwpsi, gwinc)
 
            # create simulated data set
            noise = np.dot(L[ct], np.random.randn(L[ct].shape[0]))
            pp[ct].stoas[:] -= pp[ct].residuals()/86400
            pp[ct].stoas[:] += np.longdouble(np.dot(RQ[ct], noise)/86400)
            pp[ct].stoas[:] += np.longdouble(np.dot(RQ[ct], inducedRes)/86400)

            # refit
            pp[ct].fit(iters=3)

            # replace residuals in pulsar object
            p.res = pp[ct].residuals()

            print p.name, p.rms()*1e6

        # compute f-statistic
        fpstat = PALLikelihoods.fpStat(psr, freq)

        # check to see if larger than in real data
        if fpstat > fstat_ref:
            count += 1

    # now get detection probability
    detProb = count/nreal

    print h, detProb

    return detProb - 0.95
Esempio n. 49
0
    def addpulsar(self, parfile, timfile, DMOFF=None, DMXOFF=None, dailyAverage=False):

        """
        Add another pulsar to the HDF5 file, given a tempo2 par and tim file.

        @param parfile: tempo2 par file
        @param timfile: tempo2 tim file
        @param DMOFF: Option to turn off DMMODEL fitting
        @param DMOFF: Option to turn off DMX fitting
        @param dailyAverage: Option to perform daily averaging to reduce the number
                             of points by consructing daily averaged TOAs that have
                             one residual per day per frequency band. (This has only
                             been tested on NANOGrav data thus far.)

        """

        # Check whether the two files exist
        if not os.path.isfile(parfile) or not os.path.isfile(timfile):
            raise IOError, "Cannot find parfile (%s) or timfile (%s)!" % (parfile, timfile)
        assert(self.filename != None), "ERROR: HDF5 file not set!"

        # 'a' means: read/write if exists, create otherwise
        self.h5file = h5.File(self.filename, 'a')

        if "Model" in self.h5file:
            self.h5file.close()
            self.h5file = None
            raise IOError, "model already available in '%s'. Refusing to add data" % (self.filename)

        # Create the data subgroup if it does not exist
        if "Data" in self.h5file:
            datagroup = self.h5file["Data"]
        else:
            datagroup = self.h5file.create_group("Data")

        # Load pulsar data from the JPL Cython tempo2 library
        t2pulsar = t2.tempopulsar(parfile, timfile)

        # do multiple fits
        #t2pulsar.fit(iters=10)

        # turn off DMMODEL fitting
        if DMOFF is not None:
            t2pulsar['DMMODEL'].fit = False
        
        # turn off DMX fitting
        if DMXOFF is not None:
            DMXFlag = False
            print 'Turning off DMX fitting and turning DM fitting on'
            for par in t2pulsar.pars:
                if 'DMX' in par:
                    print par
                    t2pulsar[par].fit = False
                    t2pulsar['DM'].fit = True
                    DMXFlag = True
            if DMXFlag== False: 
                print 'NO DMX for pulsar {0}'.format(t2pulsar.name)

        # refit 5 times to make sure we are converged
        t2pulsar.fit(iters=5)

        # Create the pulsar subgroup if it does not exist
        if "Pulsars" in datagroup:
            pulsarsgroup = datagroup["Pulsars"]
        else:
            pulsarsgroup = datagroup.create_group("Pulsars")

        # Look up the name of the pulsar, and see if it exist
        if t2pulsar.name in pulsarsgroup:
            self.h5file.close()
            raise IOError, "%s already exists in %s!" % (t2pulsar.name, self.filename)

        pulsarsgroup = pulsarsgroup.create_group(t2pulsar.name)

        # Read the data from the tempo2 structure.
        designmatrix = np.double(t2pulsar.designmatrix())
        residuals = np.double(t2pulsar.residuals())    
        toas = np.double(t2pulsar.toas())
        errs = np.double(t2pulsar.toaerrs*1e-6)
        pname = t2pulsar.name

        try:    # if tim file has frequencies
            freqs = np.double(t2pulsar.freqs)
        except AttributeError: 
            freqs = 0

        try:    # if tim file has frequency band flags
            bands = t2pulsar.flags['B']
        except KeyError:
            bands = 0

        # if doing daily averaging
        if dailyAverage:

            # get average quantities
            toas, qmatrix, errs, dmatrix, freqs, bands = PALutils.dailyAverage(t2pulsar)

            # construct new daily averaged residuals and designmatrix
            residuals = np.dot(qmatrix, residuals)
            designmatrix = np.dot(qmatrix, dmatrix)

        
        # Write the TOAs, residuals, and uncertainties.
        spd = 24.0*3600     # seconds per day
        pulsarsgroup.create_dataset('TOAs', data = toas*spd)           # days (MJD) * sec per day
        pulsarsgroup.create_dataset('residuals', data = residuals)     # seconds
        pulsarsgroup.create_dataset('toaErr', data = errs)             # seconds
        pulsarsgroup.create_dataset('freqs', data = freqs*1e6)             # Hz
        pulsarsgroup.create_dataset('bands', data = bands)             # Hz

        # add tim and par file paths
        pulsarsgroup.create_dataset('parFile', data = parfile)             # string
        pulsarsgroup.create_dataset('timFile', data = timfile)             # string


        # Write the full design matrix
        pulsarsgroup.create_dataset('designmatrix', data = designmatrix)

        # Obtain the timing model parameters
        tmpname = np.array(t2pulsar.pars)
        tmpvalpre = np.double([t2pulsar.prefit[parname].val for parname in t2pulsar.pars])
        tmpvalpost = np.double([t2pulsar[parname].val for parname in t2pulsar.pars])
        tmperrpre = np.double([t2pulsar.prefit[parname].err for parname in t2pulsar.pars])
        tmperrpost = np.double([t2pulsar[parname].err for parname in t2pulsar.pars])


        # Write the timing model parameter (TMP) descriptions
        pulsarsgroup.create_dataset('pname', data=pname)            # pulsar name
        pulsarsgroup.create_dataset('tmp_name', data=tmpname)       # TMP name
        pulsarsgroup.create_dataset('tmp_valpre', data=tmpvalpre)   # TMP pre-fit value
        pulsarsgroup.create_dataset('tmp_valpost', data=tmpvalpost) # TMP post-fit value
        pulsarsgroup.create_dataset('tmp_errpre', data=tmperrpre)   # TMP pre-fit error
        pulsarsgroup.create_dataset('tmp_errpost', data=tmperrpost) # TMP post-fit error

        # Close the hdf5 file
        self.h5file.close()
Esempio n. 50
0
# carry out Fp search
if args.fpFlag:

    print 'Beginning Fp Search with {0} pulsars, with frequency range {1} -- {2}'.format(
        npsr, f[0], f[-1])

    fpstat = np.zeros(args.nfreqs)
    for ii in range(args.nfreqs):

        fpstat[ii] = PALLikelihoods.fpStat(psr, f[ii])

    print 'Done Search. Computing False Alarm Probability'

    # single template FAP
    pf = np.array(
        [PALutils.ptSum(npsr, fpstat[ii]) for ii in range(np.alen(f))])

    # get total false alarm probability with trials factor
    pfT = 1 - (1 - pf)**np.alen(f)

    # write results to file
    if not os.path.exists(args.outDir):
        os.makedirs(args.outDir)

    # get filename from hdf5 file
    fname = args.outDir + '/' + args.h5file.split('/')[-1].split(
        '.')[0] + '.txt'
    fout = open(fname, 'w')
    print 'Writing results to file {0}'.format(fname)
    for ii in range(np.alen(f)):
Esempio n. 51
0
# make sure all pulsar have same reference time
tt = []
for p in psr:
    tt.append(np.min(p.toas))

# find reference time
tref = np.min(tt)

# now scale pulsar time
for p in psr:
    p.toas -= tref

# get G matrices
for p in psr:
    p.G = PALutils.createGmatrix(p.dmatrix)

# run Fp statistic to determine starting frequency
if args.null == False:
    print "Running initial Fpstat search"
    fsearch = np.logspace(-9, -7, 1000)
    fpstat = np.zeros(len(fsearch))
    for ii in range(len(fsearch)):
        fpstat[ii] = PALLikelihoods.fpStat(psr, fsearch[ii])

    # determine maximum likelihood frequency
    fmaxlike = fsearch[np.argmax(fpstat)]
    print "Maximum likelihood from f-stat search = {0}\n".format(fmaxlike)

# get Tmax
Tmax = np.max([p.toas.max() - p.toas.min() for p in psr])
Esempio n. 52
0
def marginalizedPulsarPhaseLikeNumerical(psr, theta, phi, phase, inc, psi, freq, h,\
                                         maximize=False):
    """ 
    Compute the log-likelihood marginalized over pulsar phases

    @param psr: List of pulsar object instances
    @param theta: GW polar angle [radian]
    @param phi: GW azimuthal angle [radian]
    @param phase: Initial GW phase [radian]
    @param inc: GW inclination angle [radian]
    @param psi: GW polarization angle [radian]
    @param freq: GW initial frequency [Hz]
    @param h: GW strain
    @param maximize: Option to maximize over pulsar phases instead of marginalize

    """

    tstart = time.time()

    # get number of pulsars
    npsr = len(psr)
       
    # construct xi = M**5/3/D and omega
    xi = 0.25 * np.sqrt(5/2) * (np.pi*freq)**(-2/3) * h
    omega = np.pi*freq
    
    # get a values from Ellis et al 2012
    a1 = xi * ((1+np.cos(inc)**2)*np.cos(phase)*np.cos(2*psi) + \
               2*np.cos(inc)*np.sin(phase)*np.sin(2*psi))
    a2 = -xi * ((1+np.cos(inc)**2)*np.sin(phase)*np.cos(2*psi) - \
                2*np.cos(inc)*np.cos(phase)*np.sin(2*psi))
    a3 = xi * ((1+np.cos(inc)**2)*np.cos(phase)*np.sin(2*psi) - \
               2*np.cos(inc)*np.sin(phase)*np.cos(2*psi))
    a4 = -xi * ((1+np.cos(inc)**2)*np.sin(phase)*np.sin(2*psi) + \
                2*np.cos(inc)*np.cos(phase)*np.cos(2*psi))

    lnlike = 0
    tip = 0
    tint = 0
    tmax = 0
    for ct, pp in enumerate(psr):

        tstartip = time.time()

        # compute relevant inner products
        N1 = np.dot(np.cos(2*omega*pp.toas), np.dot(pp.invCov, pp.res)) 
        N2 = np.dot(np.sin(2*omega*pp.toas), np.dot(pp.invCov, pp.res))
        M11 = np.dot(np.sin(2*omega*pp.toas), np.dot(pp.invCov, np.sin(2*omega*pp.toas)))
        M22 = np.dot(np.cos(2*omega*pp.toas), np.dot(pp.invCov, np.cos(2*omega*pp.toas)))
        M12 = np.dot(np.cos(2*omega*pp.toas), np.dot(pp.invCov, np.sin(2*omega*pp.toas)))

        # compute fplus and fcross
        fplus, fcross, cosMu = PALutils.createAntennaPatternFuncs(pp, theta, phi)

        # mind your p's and q's
        p = fplus*a1 + fcross*a3
        q = fplus*a2 + fcross*a4

        # constuct multipliers of pulsar phase terms
        X = p*N1 + q*N2 + p**2*M11 + q**2*M22 + 2*p*q*M12
        Y = p*N1 + q*N2 + 2*p**2*M11 + 2*q**2*M22 + 4*p*q*M12
        Z = p*N2 - q*N1 + 2*(p**2-q**2)*M12 - 2*p*q*(M11-M22)
        W = q**2*M11 + p**2*M22 -2*p*q*M12
        V = p*q*(M11-M22) - (p**2-q**2)*M12
        
        #print X, Y, Z, W, V
        tip += (time.time() - tstartip)

        tstartint = time.time()

        # find the maximum of argument of exponential function
        phip = np.linspace(0, 2*np.pi, 10000)
        arg = X - Y*np.cos(phip) + Z*np.sin(phip) + W*np.sin(phip)**2 + 2*V*np.cos(phip)*np.sin(phip)
        maxarg = np.max(arg)

        if maximize:
            tmax += maxarg

        else:

            # define integrand for numerical integration
            f = lambda phi: np.exp(X - Y*np.cos(phi) + Z*np.sin(phi) + \
                    W*np.sin(phi)**2 + 2*V*np.cos(phi)*np.sin(phi) - maxarg)

            # do numerical integration
            integral = integrate.quad(f, 0, 2*np.pi)[0]
            lnlike += maxarg + np.log(integral)

            tint += (time.time() - tstartint)

    print 'Loglike = {0}'.format(lnlike)
    print 'Total Evaluation Time = {0} s'.format(time.time() - tstart)
    print 'Total inner product evaluation Time = {0} s'.format(tip)
    print 'Total Integration Time = {0} s\n'.format(tint)

    if maximize:
        lnlike = tmax

    return lnlike
Esempio n. 53
0
# now scale pulsar time
for p in psr:
    p.toas -= tref

#import glob
#invmat = glob.glob('/Users/Justin/Work/nanograv/nanograv/data_products/joris/*invCov*')
#
## get list of R matrices
#R = [PALutils.createRmatrix(p.dmatrix, p.err) for p in psr]
#
#for ct,p in enumerate(psr):
#    p.invCov = np.dot(R[ct].T, np.dot(p.invCov, R[ct]))

# compute pairwise overlap reduction function values
print 'Computing Overlap Reduction Function Values'
ORF = PALutils.computeORF(psr)

# compute optimal statistic
print 'Running Optimal Statistic on {0} Pulsars'.format(npsr)
Opt, sigma, snr = PALLikelihoods.optStat(psr, ORF, gam=args.gam)

print 'Results of Search\n'

print '------------------------------------\n'

print 'A_gw^2 = {0}'.format(Opt)
print 'std. dev. = {0}'.format(sigma)
print 'SNR = {0}'.format(snr)

if snr > 3.0:
    print 'SNR of {0} is above threshold!'.format(snr)
Esempio n. 54
0
        pass

print 'Reading in HDF5 file'

# import hdf5 file
pfile = h5.File(args.h5file, 'r')

# define the pulsargroup
pulsargroup = pfile['Data']['Pulsars'][args.pname]

# fill in pulsar class
psr = PALpulsarInit.pulsar(pulsargroup, addGmatrix=True)

# initialize fourier design matrix
if args.nmodes != 0:
    F, f = PALutils.createfourierdesignmatrix(psr.toas, args.nmodes, freq=True)

Tspan = psr.toas.max() - psr.toas.min()
##fsred = np.array([1.599558028614668e-07, 5.116818355403073e-08]) # 1855
#fsred = np.array([9.549925860214369e-08]) # 1909
#fsred = np.array([1/Tspan, 9.772372209558111e-08]) # 1909
#
#F = np.zeros((psr.ntoa, 2*len(fsred)))
#F[:,0::2] = np.cos(2*np.pi*np.outer(psr.toas, fsred))
#F[:,1::2] = np.sin(2*np.pi*np.outer(psr.toas, fsred))

# get G matrices
psr.G = PALutils.createGmatrix(psr.dmatrix)

# pre compute diagonalized efac + equad white noise model
efac = np.dot(psr.G.T, np.dot(np.diag(psr.err**2), psr.G))
Esempio n. 55
0
def marginalizedPulsarPhaseLikeNumerical(psr, theta, phi, phase, inc, psi, freq, h,\
                                         maximize=False):
    """ 
    Compute the log-likelihood marginalized over pulsar phases

    @param psr: List of pulsar object instances
    @param theta: GW polar angle [radian]
    @param phi: GW azimuthal angle [radian]
    @param phase: Initial GW phase [radian]
    @param inc: GW inclination angle [radian]
    @param psi: GW polarization angle [radian]
    @param freq: GW initial frequency [Hz]
    @param h: GW strain
    @param maximize: Option to maximize over pulsar phases instead of marginalize

    """

    tstart = time.time()

    # get number of pulsars
    npsr = len(psr)

    # construct xi = M**5/3/D and omega
    xi = 0.25 * np.sqrt(5 / 2) * (np.pi * freq)**(-2 / 3) * h
    omega = np.pi * freq

    # get a values from Ellis et al 2012
    a1 = xi * ((1+np.cos(inc)**2)*np.cos(phase)*np.cos(2*psi) + \
               2*np.cos(inc)*np.sin(phase)*np.sin(2*psi))
    a2 = -xi * ((1+np.cos(inc)**2)*np.sin(phase)*np.cos(2*psi) - \
                2*np.cos(inc)*np.cos(phase)*np.sin(2*psi))
    a3 = xi * ((1+np.cos(inc)**2)*np.cos(phase)*np.sin(2*psi) - \
               2*np.cos(inc)*np.sin(phase)*np.cos(2*psi))
    a4 = -xi * ((1+np.cos(inc)**2)*np.sin(phase)*np.sin(2*psi) + \
                2*np.cos(inc)*np.cos(phase)*np.cos(2*psi))

    lnlike = 0
    tip = 0
    tint = 0
    tmax = 0
    for ct, pp in enumerate(psr):

        tstartip = time.time()

        # compute relevant inner products
        N1 = np.dot(np.cos(2 * omega * pp.toas), np.dot(pp.invCov, pp.res))
        N2 = np.dot(np.sin(2 * omega * pp.toas), np.dot(pp.invCov, pp.res))
        M11 = np.dot(np.sin(2 * omega * pp.toas),
                     np.dot(pp.invCov, np.sin(2 * omega * pp.toas)))
        M22 = np.dot(np.cos(2 * omega * pp.toas),
                     np.dot(pp.invCov, np.cos(2 * omega * pp.toas)))
        M12 = np.dot(np.cos(2 * omega * pp.toas),
                     np.dot(pp.invCov, np.sin(2 * omega * pp.toas)))

        # compute fplus and fcross
        fplus, fcross, cosMu = PALutils.createAntennaPatternFuncs(
            pp, theta, phi)

        # mind your p's and q's
        p = fplus * a1 + fcross * a3
        q = fplus * a2 + fcross * a4

        # constuct multipliers of pulsar phase terms
        X = p * N1 + q * N2 + p**2 * M11 + q**2 * M22 + 2 * p * q * M12
        Y = p * N1 + q * N2 + 2 * p**2 * M11 + 2 * q**2 * M22 + 4 * p * q * M12
        Z = p * N2 - q * N1 + 2 * (p**2 - q**2) * M12 - 2 * p * q * (M11 - M22)
        W = q**2 * M11 + p**2 * M22 - 2 * p * q * M12
        V = p * q * (M11 - M22) - (p**2 - q**2) * M12

        #print X, Y, Z, W, V
        tip += (time.time() - tstartip)

        tstartint = time.time()

        # find the maximum of argument of exponential function
        phip = np.linspace(0, 2 * np.pi, 10000)
        arg = X - Y * np.cos(phip) + Z * np.sin(phip) + W * np.sin(
            phip)**2 + 2 * V * np.cos(phip) * np.sin(phip)
        maxarg = np.max(arg)

        if maximize:
            tmax += maxarg

        else:

            # define integrand for numerical integration
            f = lambda phi: np.exp(X - Y*np.cos(phi) + Z*np.sin(phi) + \
                    W*np.sin(phi)**2 + 2*V*np.cos(phi)*np.sin(phi) - maxarg)

            # do numerical integration
            integral = integrate.quad(f, 0, 2 * np.pi)[0]
            lnlike += maxarg + np.log(integral)

            tint += (time.time() - tstartint)

    print 'Loglike = {0}'.format(lnlike)
    print 'Total Evaluation Time = {0} s'.format(time.time() - tstart)
    print 'Total inner product evaluation Time = {0} s'.format(tip)
    print 'Total Integration Time = {0} s\n'.format(tint)

    if maximize:
        lnlike = tmax

    return lnlike
Esempio n. 56
0
def marginalizedPulsarPhaseLike(psr,
                                theta,
                                phi,
                                phase,
                                inc,
                                psi,
                                freq,
                                h,
                                maximize=False):
    """ 
    Compute the log-likelihood marginalized over pulsar phases

    @param psr: List of pulsar object instances
    @param theta: GW polar angle [radian]
    @param phi: GW azimuthal angle [radian]
    @param phase: Initial GW phase [radian]
    @param inc: GW inclination angle [radian]
    @param psi: GW polarization angle [radian]
    @param freq: GW initial frequency [Hz]
    @param h: GW strain
    @param maximize: Option to maximize over pulsar phases instead of marginalize

    """

    # get number of pulsars
    npsr = len(psr)

    # get c and d
    c = np.cos(phase)
    d = np.sin(phase)

    # construct xi = M**5/3/D and omega
    xi = 0.25 * np.sqrt(5 / 2) * (np.pi * freq)**(-2 / 3) * h
    omega = np.pi * freq

    lnlike = 0
    for ct, pp in enumerate(psr):

        # compute relevant inner products
        cip = np.dot(np.cos(2 * omega * pp.toas), np.dot(pp.invCov, pp.res))
        sip = np.dot(np.sin(2 * omega * pp.toas), np.dot(pp.invCov, pp.res))
        N = np.dot(np.cos(2 * omega * pp.toas),
                   np.dot(pp.invCov, np.cos(2 * omega * pp.toas)))

        # compute fplus and fcross
        fplus, fcross, cosMu = PALutils.createAntennaPatternFuncs(
            pp, theta, phi)

        # mind your p's and q's
        p = (1 + np.cos(inc)**2) * (fplus * np.cos(2 * psi) +
                                    fcross * np.sin(2 * psi))
        q = 2 * np.cos(inc) * (fplus * np.sin(2 * psi) -
                               fcross * np.cos(2 * psi))

        # construct X Y and Z
        X = -xi / omega**(1 / 3) * (p * sip + q * cip -
                                    0.5 * xi / omega**(1 / 3) * N * c *
                                    (p**2 + q**2))
        Y = -xi / omega**(1 / 3) * (q * sip - p * cip -
                                    0.5 * xi / omega**(1 / 3) * N * d *
                                    (p**2 + q**2))
        Z = xi/omega**(1/3) * ((p*c+q*d)*sip - (p*d-q*c)*cip \
                        -0.5*xi/omega**(1/3)*N*(p**2+q**2))

        # add to log-likelihood
        #print X, Y
        if maximize:
            lnlike += Z + np.sqrt(X**2 + Y**2)
        else:
            lnlike += Z + np.log(ss.iv(0, np.sqrt(X**2 + Y**2)))

    return lnlike
Esempio n. 57
0
# make sure all pulsar have same reference time
tt=[] 
for p in psr:
    tt.append(np.min(p.toas))

# find reference time
tref = np.min(tt)

# now scale pulsar time
for p in psr:
    p.toas -= tref

# get G matrices
for p in psr:
    p.G = PALutils.createGmatrix(p.dmatrix)

# run Fp statistic to determine starting frequency
print 'Running initial Fpstat search'
fsearch = np.logspace(-9, -7, 200)
fpstat = np.zeros(len(fsearch))
for ii in range(len(fsearch)):
    fpstat[ii] = PALLikelihoods.fpStat(psr, fsearch[ii])

# determine maximum likelihood frequency
fmaxlike = fsearch[np.argmax(fpstat)]
print 'Maximum likelihood from f-stat search = {0}\n'.format(fmaxlike)


    
# prior ranges
Esempio n. 58
0
def upperLimitFunc(h, fstat_ref, freq, nreal, theta=None, phi=None, detect=False, \
                  dist=None):
    """
    Compute the value of the fstat for a range of parameters, with fixed
    amplitude over many realizations.

    @param h: value of the strain amplitude to keep constant
    @param fstat_ref: value of fstat for real data set
    @param freq: GW frequency
    @param nreal: number of realizations

    """
    Tmaxyr = np.array([(p.toas.max() - p.toas.min()) / 3.16e7
                       for p in psr]).max()
    count = 0
    for ii in range(nreal):

        # draw parameter values
        gwtheta = np.arccos(np.random.uniform(-1, 1))
        gwphi = np.random.uniform(0, 2 * np.pi)
        gwphase = np.random.uniform(0, 2 * np.pi)
        gwinc = np.arccos(np.random.uniform(-1, 1))
        #gwpsi = np.random.uniform(-np.pi/4, np.pi/4)
        gwpsi = np.random.uniform(0, np.pi)

        # check to make sure source has not coalesced during observation time
        coal = True
        while coal:
            gwmc = 10**np.random.uniform(7, 10)
            tcoal = 2e6 * (gwmc / 1e8)**(-5 / 3) * (freq / 1e-8)**(-8 / 3)
            if tcoal > Tmaxyr:
                coal = False

        # determine distance in order to keep strain fixed
        gwdist = 4 * np.sqrt(
            2 / 5) * (gwmc * 4.9e-6)**(5 / 3) * (np.pi * freq)**(2 / 3) / h

        # convert back to Mpc
        gwdist /= 1.0267e14

        # check for fixed sky location
        if theta is not None:
            gwtheta = theta
        if phi is not None:
            gwphi = phi
        if dist is not None:
            gwdist = dist
            gwmc = ((gwdist * 1.0267e14) / 4 / np.sqrt(2 / 5) /
                    (np.pi * freq)**(2 / 3) * h)**(3 / 5) / 4.9e-6

        # create residuals
        for ct, p in enumerate(psr):
            inducedRes = PALutils.createResiduals(p, gwtheta, gwphi, gwmc, gwdist, \
                            freq, gwphase, gwpsi, gwinc, evolve=True)

            # replace residuals in pulsar object
            noise = np.dot(L[ct], np.random.randn(L[ct].shape[0]))
            p.res = np.dot(R[ct], noise + inducedRes)

        # compute f-statistic
        fpstat = PALLikelihoods.fpStat(psr, freq)

        # check to see if larger than in real data
        if detect:
            if PALutils.ptSum(npsr, fpstat) < 1e-4:
                count += 1
        else:
            if fpstat > fstat_ref:
                count += 1

    # now get detection probability
    detProb = count / nreal

    if args.dist:
        print '%e %e %f\n' % (freq, gwmc, detProb)
    else:
        print freq, h, detProb

    return detProb - 0.95