def myloglike(cube, ndim, nparams):

        efac = cube[0]
        equad = 10**cube[1]

        fs = np.zeros(args.ss)
        for ii in range(args.ss):
            fs[ii] = 10**cube[2+ii]
        rho = np.zeros(args.nmodes+args.ss)
        for ii in range(args.nmodes+args.ss):
            rho[ii] = cube[ii+2+args.ss]

        F1 = list(PALutils.createfourierdesignmatrix(psr.toas, args.nmodes).T)
        for ii in range(args.ss):
            F1.append(np.cos(2*np.pi*fs[ii]*psr.toas))
            F1.append(np.sin(2*np.pi*fs[ii]*psr.toas))

        F = np.array(F1).T

        F = np.dot(proj, F)
       
        loglike = PALLikelihoods.lentatiMarginalizedLike(psr, F, s, rho, efac, equad)

        #print efac, rho, loglike

        return loglike
def upperLimitFunc(h, fstat_ref, freq, nreal):
    """
    Compute the value of the fstat for a range of parameters, with fixed
    amplitude over many realizations.

    @param h: value of the strain amplitude to keep constant
    @param fstat_ref: value of fstat for real data set
    @param freq: GW frequency
    @param nreal: number of realizations

    """
    Tmaxyr = np.array([(p.toas.max() - p.toas.min())/3.16e7 for p in psr]).max()
    count = 0
    for ii in range(nreal):

        # draw parameter values
        gwtheta = np.arccos(np.random.uniform(-1, 1))
        gwphi = np.random.uniform(0, 2*np.pi)
        gwphase = np.random.uniform(0, 2*np.pi)
        gwinc = np.arccos(np.random.uniform(0, 1))
        gwpsi = np.random.uniform(-np.pi/4, np.pi/4)

        # check to make sure source has not coalesced during observation time
        coal = True
        while coal:
            gwmc = 10**np.random.uniform(7, 10)
            tcoal = 2e6 * (gwmc/1e8)**(-5/3) * (freq/1e-8)**(-8/3)
            if tcoal > Tmaxyr:
                coal = False

        # determine distance in order to keep strain fixed
        gwdist = 4 * np.sqrt(2/5) * (gwmc*4.9e-6)**(5/3) * (np.pi*freq)**(2/3) / h

        # convert back to Mpc
        gwdist /= 1.0267e14

        # create residuals 
        for ct,p in enumerate(psr):
            inducedRes = PALutils.createResiduals(p, gwtheta, gwphi, gwmc, gwdist, \
                            freq, gwphase, gwpsi, gwinc, evolve=True)
 
            # replace residuals in pulsar object
            p.res = res[ct] + np.dot(R[ct], inducedRes)

        # compute f-statistic
        fpstat = PALLikelihoods.fpStat(psr, freq)
        
        # check to see if larger than in real data
        if fpstat > fstat_ref:
            count += 1

    # now get detection probability
    detProb = count/nreal

    print freq, h, detProb

    return detProb - 0.95
Exemple #3
0
    def myloglike(cube, ndim, nparams):

        efac = cube[0]
        equad = 10**cube[1]
        gam = cube[2]
        A = 10**cube[3]

        fs = np.zeros(args.ss)
        for ii in range(args.ss):
            fs[ii] = 10**cube[4 + ii]
        rho2 = np.zeros(args.ss)
        for ii in range(args.ss):
            rho2[ii] = cube[ii + 4 + args.ss]

        # check to make sure frequencies are ordered
        ordered = np.all([fs[ii] < fs[ii + 1] for ii in range(args.ss - 1)])

        if ordered:

            F1 = list(
                PALutils.createfourierdesignmatrix(psr.toas, args.nmodes).T)
            tmp, f = PALutils.createfourierdesignmatrix(psr.toas,
                                                        args.nmodes,
                                                        freq=True)
            for ii in range(args.ss):
                F1.append(np.cos(2 * np.pi * fs[ii] * psr.toas))
                F1.append(np.sin(2 * np.pi * fs[ii] * psr.toas))

            F = np.array(F1).T
            F = np.dot(proj, F)

            # compute rho from A and gam# compute total time span of data
            Tspan = psr.toas.max() - psr.toas.min()

            # get power spectrum coefficients
            f1yr = 1 / 3.16e7
            rho = list(
                np.log10(A**2 / 12 / np.pi**2 * f1yr**(gam - 3) * f**(-gam) /
                         Tspan))

            # compute total rho
            for ii in range(args.ss):
                rho.append(rho2[ii])

            loglike = PALLikelihoods.lentatiMarginalizedLike(
                psr, F, s, np.array(rho), efac**2, equad)

        else:

            loglike = -np.inf

        #print efac, rho, loglike

        return loglike
    def myloglike(cube, ndim, nparams):

        efac = cube[0]
        equad = 10**cube[1]
        A = 10**cube[2] 
        gam = cube[3] 
        
        loglike = PALLikelihoods.lentatiMarginalizedLikePL(psr, F, s, A, f, gam, efac, equad)

        #print efac, rho, loglike

        return loglike
Exemple #5
0
        def myloglike(cube, ndim, nparams):

            efac = np.ones(npsr)
            equad = np.zeros(npsr)
            A = 10 ** cube[0]
            gam = cube[1]

            loglike = PALLikelihoods.modelIndependentFullPTAPL(psr, F, Diag, f, A, gam, Ared, gred, efac, equad, ORF)

            # print efac, rho, loglike

            return loglike
    def loglike(x):

        rho = x[0:args.nmodes]
        efac = x[args.nmodes:(args.nmodes+npsr)]
        equad = 10**x[(args.nmodes+npsr):(args.nmodes+2*npsr)]
        Ared = 10**x[(args.nmodes+2*npsr):(args.nmodes+3*npsr)]
        gred = x[(args.nmodes+3*npsr):(args.nmodes+4*npsr)]
        
        loglike = PALLikelihoods.modelIndependentFullPTANoisePL(psr, F, s, f, rho, \
                                                    Ared, gred, efac, equad, ORF)

        return loglike
Exemple #7
0
    def loglike(x):

        rho = x[0:args.nmodes]
        efac = x[args.nmodes:(args.nmodes + npsr)]
        equad = 10**x[(args.nmodes + npsr):(args.nmodes + 2 * npsr)]
        Ared = 10**x[(args.nmodes + 2 * npsr):(args.nmodes + 3 * npsr)]
        gred = x[(args.nmodes + 3 * npsr):(args.nmodes + 4 * npsr)]

        loglike = PALLikelihoods.modelIndependentFullPTANoisePL(psr, F, s, f, rho, \
                                                    Ared, gred, efac, equad, ORF)

        return loglike
Exemple #8
0
        def myloglike(cube, ndim, nparams):

            efac = np.ones(npsr)
            equad = np.zeros(npsr)
            rho = np.zeros(ndim)
            for ii in range(ndim):
                rho[ii] = cube[ii]

            loglike = PALLikelihoods.modelIndependentFullPTA(psr, F, Diag, rho, kappa, efac, equad, ORF)

            # print efac, rho, loglike

            return loglike
Exemple #9
0
        def myloglike(cube, ndim, nparams):

            efac = np.ones(npsr)
            equad = np.zeros(npsr)
            A = 10**cube[0]
            gam = cube[1]

            loglike = PALLikelihoods.modelIndependentFullPTAPL(psr, F, Diag, f, A, gam, \
                                                               Ared, gred, efac, equad, ORF)

            #print efac, rho, loglike

            return loglike
Exemple #10
0
    def myloglike(cube, ndim, nparams):

        efac = cube[0]
        equad = 10**cube[1]
        A = 10**cube[2]
        gam = cube[3]

        loglike = PALLikelihoods.lentatiMarginalizedLikePL(
            psr, F, s, A, f, gam, efac, equad)

        #print efac, rho, loglike

        return loglike
    def myloglike(cube, ndim, nparams):

        efac = cube[0]
        equad = 10**cube[1]
        rho = np.zeros(ndim-2)
        for ii in range(ndim-2):
            rho[ii] = cube[ii+2]
       
        loglike = PALLikelihoods.lentatiMarginalizedLike(psr, F, s, rho, efac, equad)

        #print efac, rho, loglike

        return loglike
Exemple #12
0
    def myloglike(cube, ndim, nparams):

        efac = cube[0]
        equad = 10**cube[1]
        rho = np.zeros(ndim - 2)
        for ii in range(ndim - 2):
            rho[ii] = cube[ii + 2]

        loglike = PALLikelihoods.lentatiMarginalizedLike(
            psr, F, s, rho, efac, equad)

        #print efac, rho, loglike

        return loglike
Exemple #13
0
        def myloglike(cube, ndim, nparams):

            efac = np.ones(npsr)
            equad = np.zeros(npsr)
            rho = np.zeros(ndim)
            for ii in range(ndim):
                rho[ii] = cube[ii]

            loglike = PALLikelihoods.modelIndependentFullPTA(
                psr, F, Diag, rho, kappa, efac, equad, ORF)

            #print efac, rho, loglike

            return loglike
Exemple #14
0
    def loglike(x):

        A = x[0] * 1e-14
        gam = x[1]
        efac = x[2:(2 + npsr)]
        equad = 10**x[(2 + npsr):(2 + 2 * npsr)]
        Ared = 10**x[(2 + 2 * npsr):(2 + 3 * npsr)]
        gred = x[(2 + 3 * npsr):(2 + 4 * npsr)]


        loglike = PALLikelihoods.modelIndependentFullPTAPL(psr, F, Diag, f, A, gam, \
                                                           Ared, gred, efac, equad, ORF)

        return loglike
Exemple #15
0
    def loglike(x):


        A = x[0]*1e-14
        gam = x[1]
        efac = x[2:(2+npsr)]
        equad = 10**x[(2+npsr):(2+2*npsr)]
        Ared = 10**x[(2+2*npsr):(2+3*npsr)]
        gred = x[(2+3*npsr):(2+4*npsr)]

        
        loglike = PALLikelihoods.modelIndependentFullPTAPL(psr, F, Diag, f, A, gam, \
                                                           Ared, gred, efac, equad, ORF)

        return loglike
    def myloglike(cube, ndim, nparams):

        efac = cube[0]
        equad = 10**cube[1]
        gam = cube[2]
        A = 10**cube[3]

        fs = np.zeros(args.ss)
        for ii in range(args.ss):
            fs[ii] = 10**cube[4+ii]
        rho2 = np.zeros(args.ss)
        for ii in range(args.ss):
            rho2[ii] = cube[ii+4+args.ss]
        
        # check to make sure frequencies are ordered
        ordered = np.all([fs[ii] < fs[ii+1] for ii in range(args.ss-1)])

        if ordered:

            F1 = list(PALutils.createfourierdesignmatrix(psr.toas, args.nmodes).T)
            tmp, f = PALutils.createfourierdesignmatrix(psr.toas, args.nmodes, freq=True)
            for ii in range(args.ss):
                F1.append(np.cos(2*np.pi*fs[ii]*psr.toas))
                F1.append(np.sin(2*np.pi*fs[ii]*psr.toas))

            F = np.array(F1).T
            F = np.dot(proj, F)

            # compute rho from A and gam# compute total time span of data
            Tspan = psr.toas.max() - psr.toas.min()

            # get power spectrum coefficients
            f1yr = 1/3.16e7
            rho = list(np.log10(A**2/12/np.pi**2 * f1yr**(gam-3) * f**(-gam)/Tspan))

            # compute total rho
            for ii in range(args.ss):
                rho.append(rho2[ii])

            loglike = PALLikelihoods.lentatiMarginalizedLike(psr, F, s, np.array(rho), efac**2, equad)
            
        else:

            loglike = -np.inf

        #print efac, rho, loglike

        return loglike
Exemple #17
0
        def myloglike(cube, ndim, nparams):

            efac = np.zeros(npsr)
            equad = np.zeros(npsr)
            Ared = np.zeros(npsr)
            gred = np.zeros(npsr)
            A = cube[0]
            gam = cube[1]
            for ii in range(npsr):
                efac[ii] = cube[ii + 2]
                equad[ii] = 10 ** cube[ii + 2 + npsr]
                Ared[ii] = 10 ** cube[ii + 2 + 2 * npsr]
                gred[ii] = cube[ii + 2 + 3 * npsr]

            loglike = PALLikelihoods.modelIndependentFullPTAPL(psr, F, Diag, f, A, gam, Ared, gred, efac, equad, ORF)

            # print efac, rho, loglike

            return loglike
def loglike(x):

    theta = x[0]
    phi = x[1]
    f = 10**x[2]
    h = x[3] * 1e-14
    psi = x[4]
    inc = x[5]
    phase = x[6]

    loglike = PALLikelihoods.marginalizedPulsarPhaseLikeNumerical(
        psr, theta, phi, phase, inc, psi, f, h, maximize=False)

    #print loglike

    if np.isnan(loglike):
        print 'NaN log-likelihood. Not good...'
        return -np.inf
    else:
        return loglike
Exemple #19
0
def loglike(x):


    theta = x[0]
    phi = x[1]
    f = 10**x[2]
    h = x[3]*1e-14
    psi = x[4]
    inc = x[5]
    phase = x[6]
        
    loglike = PALLikelihoods.marginalizedPulsarPhaseLikeNumerical(psr, theta, phi, phase, inc, psi, f, h, maximize=False)

    #print loglike

    if np.isnan(loglike):
        print 'NaN log-likelihood. Not good...'
        return -np.inf
    else:
        return loglike
Exemple #20
0
        def myloglike(cube, ndim, nparams):

            efac = np.zeros(npsr)
            equad = np.zeros(npsr)
            Ared = np.zeros(npsr)
            gred = np.zeros(npsr)
            A = cube[0]
            gam = cube[1]
            for ii in range(npsr):
                efac[ii] = cube[ii + 2]
                equad[ii] = 10**cube[ii + 2 + npsr]
                Ared[ii] = 10**cube[ii + 2 + 2 * npsr]
                gred[ii] = cube[ii + 2 + 3 * npsr]

            loglike = PALLikelihoods.modelIndependentFullPTAPL(psr, F, Diag, f, A, gam, \
                                                               Ared, gred, efac, equad, ORF)

            #print efac, rho, loglike

            return loglike
Exemple #21
0
    def myloglike(cube, ndim, nparams):

        efac = cube[0]
        equad = 10**cube[1]

        fs = np.zeros(args.ss)
        for ii in range(args.ss):
            fs[ii] = 10**cube[2 + ii]
        rho = np.zeros(args.nmodes + args.ss)
        for ii in range(args.nmodes + args.ss):
            rho[ii] = cube[ii + 2 + args.ss]

        # check to make sure frequencies are ordered
        ordered = np.all([fs[ii] < fs[ii + 1] for ii in range(args.ss - 1)])

        if ordered:

            #F1 = list(PALutils.createfourierdesignmatrix(psr.toas, args.nmodes).T)
            if args.nmodes > 0:
                F1 = list(F.T)
            else:
                F1 = []

            for ii in range(args.ss):
                F1.append(np.cos(2 * np.pi * fs[ii] * psr.toas))
                F1.append(np.sin(2 * np.pi * fs[ii] * psr.toas))

            F2 = np.array(F1).T

            F2 = np.dot(proj, F2)

            loglike = PALLikelihoods.lentatiMarginalizedLike(
                psr, F2, s, rho, efac, equad)

        else:

            loglike = -np.inf

        #print efac, rho, loglike

        return loglike
    def myloglike(cube, ndim, nparams):

        efac = cube[0]
        equad = 10**cube[1]

        fs = np.zeros(args.ss)
        for ii in range(args.ss):
            fs[ii] = 10**cube[2+ii]
        rho = np.zeros(args.nmodes+args.ss)
        for ii in range(args.nmodes+args.ss):
            rho[ii] = cube[ii+2+args.ss]

        # check to make sure frequencies are ordered
        ordered = np.all([fs[ii] < fs[ii+1] for ii in range(args.ss-1)])

        if ordered:

            #F1 = list(PALutils.createfourierdesignmatrix(psr.toas, args.nmodes).T)
            if args.nmodes > 0:
                F1 = list(F.T)
            else:
                F1 = []

            for ii in range(args.ss):
                F1.append(np.cos(2*np.pi*fs[ii]*psr.toas))
                F1.append(np.sin(2*np.pi*fs[ii]*psr.toas))

            F2 = np.array(F1).T

            F2 = np.dot(proj, F2)
           
            loglike = PALLikelihoods.lentatiMarginalizedLike(psr, F2, s, rho, efac, equad)

        else:

            loglike = -np.inf

        #print efac, rho, loglike

        return loglike
Exemple #23
0
# set up frequency vector
if args.logsample:
    f = np.logspace(np.log10(flow), np.log10(fhigh), args.nfreqs)
else:
    f = np.linspace(flow, fhigh, args.nfreqs)

# carry out Fp search
if args.fpFlag:

    print 'Beginning Fp Search with {0} pulsars, with frequency range {1} -- {2}'.format(npsr, f[0], f[-1])
    
    fpstat = np.zeros(args.nfreqs)
    for ii in range(args.nfreqs):

        fpstat[ii] = PALLikelihoods.fpStat(psr, f[ii])


    print 'Done Search. Computing False Alarm Probability'
    
    # single template FAP
    pf = np.array([PALutils.ptSum(npsr, fpstat[ii]) for ii in range(np.alen(f))])

    # get total false alarm probability with trials factor
    pfT = 1 - (1-pf)**np.alen(f)

    # write results to file
    if not os.path.exists(args.outDir):
        os.makedirs(args.outDir)

    # get filename from hdf5 file

#############################################################################################


# now compute bound with scalar minimization function using Brent's method
hhigh = 1e-13
hlow = 1e-16
xtol = 1e-16
freq = args.freq
nreal = args.nreal

if freq is not None:

    # get reference f-statistic
    fstat_ref = PALLikelihoods.fpStat(psr, freq)

    # perfrom upper limit calculation
    inRange = False
    while inRange == False:

        try:    # try brentq method
            h_up = brentq(upperLimitFunc, hlow, hhigh, xtol=xtol, \
                  args=(fstat_ref, freq, nreal, args.theta, args.phi, args.detect, args.dist))
            inRange = True
        except ValueError:      # bounds not in range
            if hhigh < 1e-11:   # don't go too high
                hhigh *= 2      # double high strain
            else:
                h_up = hhigh    # if too high, just set to upper bound
                inRange = True
Exemple #25
0
tref = np.min(tt)

# now scale pulsar time
for p in psr:
    p.toas -= tref

# compute pairwise overlap reduction function values
print 'Computing Overlap Reduction Function Values'
ORF = PALutils.computeORF(psr)

# since we have defined our ORF to be normalized to 1
hdcoeff = ORF / 2

# compute optimal statistic
print 'Running Cross correlation Statistic on {0} Pulsars'.format(npsr)
crosspower, crosspowererr = PALLikelihoods.crossPower(psr, args.gam)

# angular separation
xi = []
for ll in range(npsr):
    for kk in range(ll + 1, npsr):
        xi.append(PALutils.angularSeparation(psr[ll].theta, psr[ll].phi, \
                                            psr[kk].theta, psr[kk].phi))

# Perform chi-squared fit to determine best fit amplituded to HD curve
hc_sqr = np.sum(crosspower*hdcoeff / (crosspowererr*crosspowererr)) / \
            np.sum(hdcoeff*hdcoeff / (crosspowererr*crosspowererr))

hc_sqrerr = 1.0 / np.sqrt(
    np.sum(hdcoeff * hdcoeff / (crosspowererr * crosspowererr)))
Exemple #26
0
# set up frequency vector
if args.logsample:
    f = np.logspace(np.log10(flow), np.log10(fhigh), args.nfreqs)
else:
    f = np.linspace(flow, fhigh, args.nfreqs)

# carry out Fp search
if args.fpFlag:

    print 'Beginning Fp Search with {0} pulsars, with frequency range {1} -- {2}'.format(
        npsr, f[0], f[-1])

    fpstat = np.zeros(args.nfreqs)
    for ii in range(args.nfreqs):

        fpstat[ii] = PALLikelihoods.fpStat(psr, f[ii])

    print 'Done Search. Computing False Alarm Probability'

    # single template FAP
    pf = np.array(
        [PALutils.ptSum(npsr, fpstat[ii]) for ii in range(np.alen(f))])

    # get total false alarm probability with trials factor
    pfT = 1 - (1 - pf)**np.alen(f)

    # write results to file
    if not os.path.exists(args.outDir):
        os.makedirs(args.outDir)

    # get filename from hdf5 file
Exemple #27
0
        # check to see if larger than in real data
        if fpstat > fstat_ref:
            count += 1

    # now get detection probability
    detProb = count/nreal

    print h, detProb

    return detProb - 0.95


#############################################################################################

# compute reference f-statistic
fstat_ref = PALLikelihoods.fpStat(psr, args.freq)

# now compute bound with scalar minimization function using Brent's method
hhigh = 5e-14
hlow = 1e-15
xtol = 1e-16
freq = args.freq
nreal = args.nreal
h_up = brentq(upperLimitFunc, hlow, hhigh, xtol=xtol)
#fbounded = minimize_scalar(upperLimitFunc, args=(fstat_ref, args.freq, args.nreal), \
#                           bounds=(hlow, hmid, hhigh), method='Brent')


    
    return detProb - 0.95


#############################################################################################

# now compute bound with scalar minimization function using Brent's method
hhigh = 1e-13
hlow = 1e-16
xtol = 1e-16
freq = args.freq
nreal = args.nreal

if freq is not None:

    # get reference f-statistic
    fstat_ref = PALLikelihoods.fpStat(psr, freq)

    # perfrom upper limit calculation
    inRange = False
    while inRange == False:

        try:  # try brentq method
            h_up = brentq(upperLimitFunc, hlow, hhigh, xtol=xtol, \
                  args=(fstat_ref, freq, nreal, args.theta, args.phi, args.detect, args.dist))
            inRange = True
        except ValueError:  # bounds not in range
            if hhigh < 1e-11:  # don't go too high
                hhigh *= 2  # double high strain
            else:
                h_up = hhigh  # if too high, just set to upper bound
                inRange = True
Exemple #29
0
tref = np.min(tt)

# now scale pulsar time
for p in psr:
    p.toas -= tref

# get G matrices
for p in psr:
    p.G = PALutils.createGmatrix(p.dmatrix)

# run Fp statistic to determine starting frequency
print 'Running initial Fpstat search'
fsearch = np.logspace(-9, -7, 200)
fpstat = np.zeros(len(fsearch))
for ii in range(len(fsearch)):
    fpstat[ii] = PALLikelihoods.fpStat(psr, fsearch[ii])

# determine maximum likelihood frequency
fmaxlike = fsearch[np.argmax(fpstat)]
print 'Maximum likelihood from f-stat search = {0}\n'.format(fmaxlike)


    
# prior ranges
thmin = phasemin = incmin = psimin = 0
thmax = incmax = np.pi
psimax = np.pi/2
phimin = 0
phimax = phasemax = 2*np.pi
ldmin = -4
ldmax = 4
#############################################################################################


# now compute bound with scalar minimization function using Brent's method
Ahigh = 1e-13
Alow = 5e-15
xtol = 1e-16
nreal = args.nreal

# initiate global variables
injAmp = []
injDetProb = []

# get reference optimal-statistic
print 'Getting reference Optimal Statistic Value'
optStat_ref = PALLikelihoods.optStat(psr, ORF)[0]

# perfrom upper limit calculation
inRange = False
while inRange == False:

    try:    # try brentq method
        A_up = brentq(upperLimitFunc, Alow, Ahigh, xtol=xtol, \
              args=(optStat_ref, nreal))
        inRange = True
    except ValueError:      # bounds not in range
        if Ahigh < 1e-11:   # don't go too high
            Ahigh *= 2      # double high strain
        else:
            A_up = Ahigh    # if too high, just set to upper bound
            inRange = True
Exemple #31
0
#invmat = glob.glob('/Users/Justin/Work/nanograv/nanograv/data_products/joris/*invCov*')
#
## get list of R matrices
#R = [PALutils.createRmatrix(p.dmatrix, p.err) for p in psr]
#
#for ct,p in enumerate(psr):
#    p.invCov = np.dot(R[ct].T, np.dot(p.invCov, R[ct]))


# compute pairwise overlap reduction function values
print 'Computing Overlap Reduction Function Values'
ORF = PALutils.computeORF(psr)

# compute optimal statistic
print 'Running Optimal Statistic on {0} Pulsars'.format(npsr)
Opt, sigma, snr = PALLikelihoods.optStat(psr, ORF, gam=args.gam)

print 'Results of Search\n'

print '------------------------------------\n'

print 'A_gw^2 = {0}'.format(Opt)
print 'std. dev. = {0}'.format(sigma)
print 'SNR = {0}'.format(snr)

if snr > 3.0:
    print 'SNR of {0} is above threshold!'.format(snr)
else:
    up = np.sqrt(Opt + np.sqrt(2)*sigma*ss.erfcinv(2*(1-0.95)))
    print '2-sigma upper limit based on variance of estimators is A_gw < {0}'.format(up)
def upperLimitFunc(h, fstat_ref, freq, nreal, theta=None, phi=None, detect=False, \
                  dist=None):
    """
    Compute the value of the fstat for a range of parameters, with fixed
    amplitude over many realizations.

    @param h: value of the strain amplitude to keep constant
    @param fstat_ref: value of fstat for real data set
    @param freq: GW frequency
    @param nreal: number of realizations

    """
    Tmaxyr = np.array([(p.toas.max() - p.toas.min()) / 3.16e7
                       for p in psr]).max()
    count = 0
    for ii in range(nreal):

        # draw parameter values
        gwtheta = np.arccos(np.random.uniform(-1, 1))
        gwphi = np.random.uniform(0, 2 * np.pi)
        gwphase = np.random.uniform(0, 2 * np.pi)
        gwinc = np.arccos(np.random.uniform(-1, 1))
        #gwpsi = np.random.uniform(-np.pi/4, np.pi/4)
        gwpsi = np.random.uniform(0, np.pi)

        # check to make sure source has not coalesced during observation time
        coal = True
        while coal:
            gwmc = 10**np.random.uniform(7, 10)
            tcoal = 2e6 * (gwmc / 1e8)**(-5 / 3) * (freq / 1e-8)**(-8 / 3)
            if tcoal > Tmaxyr:
                coal = False

        # determine distance in order to keep strain fixed
        gwdist = 4 * np.sqrt(
            2 / 5) * (gwmc * 4.9e-6)**(5 / 3) * (np.pi * freq)**(2 / 3) / h

        # convert back to Mpc
        gwdist /= 1.0267e14

        # check for fixed sky location
        if theta is not None:
            gwtheta = theta
        if phi is not None:
            gwphi = phi
        if dist is not None:
            gwdist = dist
            gwmc = ((gwdist * 1.0267e14) / 4 / np.sqrt(2 / 5) /
                    (np.pi * freq)**(2 / 3) * h)**(3 / 5) / 4.9e-6

        # create residuals
        for ct, p in enumerate(psr):
            inducedRes = PALutils.createResiduals(p, gwtheta, gwphi, gwmc, gwdist, \
                            freq, gwphase, gwpsi, gwinc, evolve=True)

            # replace residuals in pulsar object
            noise = np.dot(L[ct], np.random.randn(L[ct].shape[0]))
            p.res = np.dot(R[ct], noise + inducedRes)

        # compute f-statistic
        fpstat = PALLikelihoods.fpStat(psr, freq)

        # check to see if larger than in real data
        if detect:
            if PALutils.ptSum(npsr, fpstat) < 1e-4:
                count += 1
        else:
            if fpstat > fstat_ref:
                count += 1

    # now get detection probability
    detProb = count / nreal

    if args.dist:
        print '%e %e %f\n' % (freq, gwmc, detProb)
    else:
        print freq, h, detProb

    return detProb - 0.95
Exemple #33
0
#import glob
#invmat = glob.glob('/Users/Justin/Work/nanograv/nanograv/data_products/joris/*invCov*')
#
## get list of R matrices
#R = [PALutils.createRmatrix(p.dmatrix, p.err) for p in psr]
#
#for ct,p in enumerate(psr):
#    p.invCov = np.dot(R[ct].T, np.dot(p.invCov, R[ct]))

# compute pairwise overlap reduction function values
print 'Computing Overlap Reduction Function Values'
ORF = PALutils.computeORF(psr)

# compute optimal statistic
print 'Running Optimal Statistic on {0} Pulsars'.format(npsr)
Opt, sigma, snr = PALLikelihoods.optStat(psr, ORF, gam=args.gam)

print 'Results of Search\n'

print '------------------------------------\n'

print 'A_gw^2 = {0}'.format(Opt)
print 'std. dev. = {0}'.format(sigma)
print 'SNR = {0}'.format(snr)

if snr > 3.0:
    print 'SNR of {0} is above threshold!'.format(snr)
else:
    up = np.sqrt(Opt + np.sqrt(2) * sigma * ss.erfcinv(2 * (1 - 0.95)))
    print '2-sigma upper limit based on variance of estimators is A_gw < {0}'.format(
        up)
Exemple #34
0
# now scale pulsar time
for p in psr:
    p.toas -= tref

# get G matrices
for p in psr:
    p.G = PALutils.createGmatrix(p.dmatrix)

# run Fp statistic to determine starting frequency
if args.freq is None:
    print 'Running initial Fpstat search'
    fsearch = np.logspace(-9, -7, 1000)
    fpstat = np.zeros(len(fsearch))
    for ii in range(len(fsearch)):
        fpstat[ii] = PALLikelihoods.fpStat(psr, fsearch[ii])

    # determine maximum likelihood frequency
    fmaxlike = fsearch[np.argmax(fpstat)]
    print 'Maximum likelihood from f-stat search = {0}\n'.format(fmaxlike)

# get determinant of covariance matrix for use in likelihood
logdetTerm = []
for ct, p in enumerate(psr):

    efac = p.efac
    equad = p.equad
    Amp = p.Amp
    gam = p.gam
    fH = p.fH
# now scale pulsar time
for p in psr:
    p.toas -= tref


# compute pairwise overlap reduction function values
print 'Computing Overlap Reduction Function Values'
ORF = PALutils.computeORF(psr)

# since we have defined our ORF to be normalized to 1
hdcoeff = ORF/2

# compute optimal statistic
print 'Running Cross correlation Statistic on {0} Pulsars'.format(npsr)
crosspower, crosspowererr = PALLikelihoods.crossPower(psr, args.gam)

# angular separation
xi = []
for ll in range(npsr):
    for kk in range(ll+1, npsr):
        xi.append(PALutils.angularSeparation(psr[ll].theta, psr[ll].phi, \
                                            psr[kk].theta, psr[kk].phi))

# Perform chi-squared fit to determine best fit amplituded to HD curve
hc_sqr = np.sum(crosspower*hdcoeff / (crosspowererr*crosspowererr)) / \
            np.sum(hdcoeff*hdcoeff / (crosspowererr*crosspowererr))

hc_sqrerr = 1.0 / np.sqrt(np.sum(hdcoeff * hdcoeff / (crosspowererr * crosspowererr)))

# get reduced chi-squared value
Exemple #36
0
def upperLimitFunc(h):
    """
    Compute the value of the fstat for a range of parameters, with fixed
    amplitude over many realizations.

    @param h: value of the strain amplitude to keep constant
    @param fstat_ref: value of fstat for real data set
    @param freq: GW frequency
    @param nreal: number of realizations

    """
    
    Tmaxyr = np.array([(p.toas.max() - p.toas.min())/3.16e7 for p in psr]).max()
    count = 0
    for ii in range(nreal):

        # draw parameter values
        gwtheta = np.arccos(np.random.uniform(-1, 1))
        gwphi = np.random.uniform(0, 2*np.pi)
        gwphase = np.random.uniform(0, 2*np.pi)
        gwinc = np.arccos(np.random.uniform(-1, 1))
        gwpsi = np.random.uniform(-np.pi/4, np.pi/4)

        # check to make sure source has not coalesced during observation time
        gwmc = 10**np.random.uniform(7, 10)
        tcoal = 2e6 * (gwmc/1e8)**(-5/3) * (freq/1e-8)**(-8/3)
        if tcoal < Tmaxyr:
            gwmc = 1e5

        # determine distance in order to keep strain fixed
        gwdist = 4 * np.sqrt(2/5) * (gwmc*4.9e-6)**(5/3) * (np.pi*freq)**(2/3) / h

        # convert back to Mpc
        gwdist /= 1.0267e14

        # create residuals and refit for all pulsars
        for ct,p in enumerate(psr):
            inducedRes = PALutils.createResiduals(p, gwtheta, gwphi, gwmc, gwdist, \
                            freq, gwphase, gwpsi, gwinc)
 
            # create simulated data set
            noise = np.dot(L[ct], np.random.randn(L[ct].shape[0]))
            pp[ct].stoas[:] -= pp[ct].residuals()/86400
            pp[ct].stoas[:] += np.longdouble(np.dot(RQ[ct], noise)/86400)
            pp[ct].stoas[:] += np.longdouble(np.dot(RQ[ct], inducedRes)/86400)

            # refit
            pp[ct].fit(iters=3)

            # replace residuals in pulsar object
            p.res = pp[ct].residuals()

            print p.name, p.rms()*1e6

        # compute f-statistic
        fpstat = PALLikelihoods.fpStat(psr, freq)

        # check to see if larger than in real data
        if fpstat > fstat_ref:
            count += 1

    # now get detection probability
    detProb = count/nreal

    print h, detProb

    return detProb - 0.95
Exemple #37
0
def upperLimitFunc(h):
    """
    Compute the value of the fstat for a range of parameters, with fixed
    amplitude over many realizations.

    @param h: value of the strain amplitude to keep constant
    @param fstat_ref: value of fstat for real data set
    @param freq: GW frequency
    @param nreal: number of realizations

    """

    Tmaxyr = np.array([(p.toas.max() - p.toas.min()) / 3.16e7
                       for p in psr]).max()
    count = 0
    for ii in range(nreal):

        # draw parameter values
        gwtheta = np.arccos(np.random.uniform(-1, 1))
        gwphi = np.random.uniform(0, 2 * np.pi)
        gwphase = np.random.uniform(0, 2 * np.pi)
        gwinc = np.arccos(np.random.uniform(-1, 1))
        gwpsi = np.random.uniform(-np.pi / 4, np.pi / 4)

        # check to make sure source has not coalesced during observation time
        gwmc = 10**np.random.uniform(7, 10)
        tcoal = 2e6 * (gwmc / 1e8)**(-5 / 3) * (freq / 1e-8)**(-8 / 3)
        if tcoal < Tmaxyr:
            gwmc = 1e5

        # determine distance in order to keep strain fixed
        gwdist = 4 * np.sqrt(
            2 / 5) * (gwmc * 4.9e-6)**(5 / 3) * (np.pi * freq)**(2 / 3) / h

        # convert back to Mpc
        gwdist /= 1.0267e14

        # create residuals and refit for all pulsars
        for ct, p in enumerate(psr):
            inducedRes = PALutils.createResiduals(p, gwtheta, gwphi, gwmc, gwdist, \
                            freq, gwphase, gwpsi, gwinc)

            # create simulated data set
            noise = np.dot(L[ct], np.random.randn(L[ct].shape[0]))
            pp[ct].stoas[:] -= pp[ct].residuals() / 86400
            pp[ct].stoas[:] += np.longdouble(np.dot(RQ[ct], noise) / 86400)
            pp[ct].stoas[:] += np.longdouble(
                np.dot(RQ[ct], inducedRes) / 86400)

            # refit
            pp[ct].fit(iters=3)

            # replace residuals in pulsar object
            p.res = pp[ct].residuals()

            print p.name, p.rms() * 1e6

        # compute f-statistic
        fpstat = PALLikelihoods.fpStat(psr, freq)

        # check to see if larger than in real data
        if fpstat > fstat_ref:
            count += 1

    # now get detection probability
    detProb = count / nreal

    print h, detProb

    return detProb - 0.95
Exemple #38
0
    print h, detProb

    return detProb - 0.95


#############################################################################################

hhigh = 1e-13
hlow = 1e-15
xtol = 1e-16
nreal = args.nreal
freq = args.freq

# get reference f-statistic
fstat_ref = PALLikelihoods.fpStat(psr, freq)

# perfrom upper limit calculation
inRange = False
while inRange == False:

    try:  # try brentq method
        h_up = brentq(upperLimitFunc, hlow, hhigh, xtol=xtol)
        inRange = True
    except ValueError:  # bounds not in range
        if hhigh < 1e-11:  # don't go too high
            hhigh *= 2  # double high strain
        else:
            h_up = hhigh  # if too high, just set to upper bound
            inRange = True