Ejemplo n.º 1
0
    def addInverseCovFromNoiseFile(self, parfile, timfile, noisefile, DMOFF=None, DMXOFF=None, dailyAverage=False):
        """
        
        Add noise covariance matrix after timing model subtraction.

        """

        # Check whether the two files exist
        if not os.path.isfile(parfile) or not os.path.isfile(timfile):
            raise IOError, "Cannot find parfile (%s) or timfile (%s)!" % (parfile, timfile)
        assert(self.filename != None), "ERROR: HDF5 file not set!"

        # 'a' means: read/write if exists, create otherwise
        self.h5file = h5.File(self.filename, 'a')

        # Create the data subgroup if it does not exist
        if "Data" in self.h5file:
            datagroup = self.h5file["Data"]
        else:
            raise IOError, "Cannot add noise parameters if Data group does not exist!"

        # Load pulsar data from the JPL Cython tempo2 library
        t2pulsar = t2.tempopulsar(parfile, timfile)
        
        # turn off DMMODEL fitting
        if DMOFF is not None:
            t2pulsar['DMMODEL'].fit = False

        # turn off DMX fitting
        if DMXOFF is not None:
            DMXFlag = False
            print 'Turning off DMX fitting and turning DM fitting on'
            for par in t2pulsar.pars:
                if 'DMX' in par:
                    t2pulsar[par].fit = False
                    t2pulsar['DM'].fit = True
                    DMXFlag = True
            if DMXFlag== False: 
                print 'NO DMX for pulsar {0}'.format(t2pulsar.name)

        # refit 5 times to make sure we are converged
        t2pulsar.fit(iters=5)

        # Create the pulsar subgroup if it does not exist
        if "Pulsars" in datagroup:
            pulsarsgroup = datagroup["Pulsars"]
        else:
            raise IOError, "Cannot add noise parameters if pulsar group does not exist!"

        # Look up the name of the pulsar, and see if it exist
        if t2pulsar.name in pulsarsgroup:
            pass
        else:
            raise IOError, "%s must already exists in %s to add noise parameters!"\
                    % (t2pulsar.name, self.filename)

        pulsarsgroup = pulsarsgroup[t2pulsar.name]

        # first create G matrix from design matrix and toas
        designmatrix = np.double(t2pulsar.designmatrix())
        toas = np.double(t2pulsar.toas()*86400)
        errs = np.double(t2pulsar.toaerrs*1e-6)

        # if doing daily averaging
        if dailyAverage:

            # get average quantities
            toas, qmatrix, errs, dmatrix, freqs, bands = PALutils.dailyAverage(t2pulsar)

            # construct new daily averaged residuals and designmatrix
            toas *= 86400
            designmatrix = np.dot(qmatrix, dmatrix)
        
        G = PALutils.createGmatrix(designmatrix)

        # create matrix of time lags
        tm = PALutils.createTimeLags(toas, toas, round=True)

        # now read noise file to get model and parameters
        file = open(noisefile,'r')

        fH = None
        tau = None
        DMAmp = None
        DMgam = None
 
        for line in file.readlines():
            
            # default parameters for different models other than pure PL
            key = line.split()[0]

            # get amplitude
            if "Amp" == key:
                Amp = float(line.split()[-1])

            # get spectral index
            elif "gam" == key:
                gam = float(line.split()[-1])
            
            # get efac
            elif "efac" == key:
                efac = float(line.split()[-1])
            
            # get quad
            elif "equad" == key:
                equad = float(line.split()[-1])
            
            # get high frequency cutoff if available
            elif "fH" == key:
                fH = float(line.split()[-1])
            
            # get correlation time scale if available
            elif "tau" == key:
                tau = float(line.split()[-1])

            # get DM Amplitude if available
            elif "DMAmp" == key:
                DMAmp = float(line.split()[-1])

            # get DM Spectral Index if available
            elif "DMgam" == key:
                DMgam = float(line.split()[-1])

        # cosstruct red and white noise covariance matrices
        red = PALutils.createRedNoiseCovarianceMatrix(tm, Amp, gam, fH=fH)
        white = PALutils.createWhiteNoiseCovarianceMatrix(errs, efac, equad, tau=tau)

        # construct post timing model marginalization covariance matrix
        cov = red + white
        pcov = np.dot(G.T, np.dot(cov, G))

        # finally construct "inverse"
        invCov = np.dot(G, np.dot(np.linalg.inv(pcov), G.T))

        # create dataset for inverse covariance matrix
        pulsarsgroup.create_dataset('invCov', data = invCov) 

        # create dataset for G matrix
        pulsarsgroup.create_dataset('Gmatrix', data = G) 

        # record noise parameter values
        pulsarsgroup.create_dataset('Amp', data = Amp)
        pulsarsgroup.create_dataset('gam', data = gam)
        pulsarsgroup.create_dataset('efac', data = efac)
        pulsarsgroup.create_dataset('equad', data = equad)
        if fH is not None:
            pulsarsgroup.create_dataset('fH', data = fH)
        if tau is not None:
            pulsarsgroup.create_dataset('tau', data = tau)
        if DMAmp is not None:
            pulsarsgroup.create_dataset('DMAmp', data = DMAmp)
        if DMgam is not None:
            pulsarsgroup.create_dataset('DMgam', data = DMgam)


        # Close the hdf5 file
        self.h5file.close()
Ejemplo n.º 2
0
    print "Maximum likelihood from f-stat search = {0}\n".format(fmaxlike)

# get determinant of covariance matrix for use in likelihood
logdetTerm = []
for ct, p in enumerate(psr):

    efac = p.efac
    equad = p.equad
    Amp = p.Amp
    gam = p.gam
    fH = p.fH

    print p.name, efac, equad, Amp, gam

    # get white noise covariance matrix
    white = PALutils.createWhiteNoiseCovarianceMatrix(p.err, efac, equad)

    # get red noise covariance matrix
    tm = PALutils.createTimeLags(p.toas, p.toas, round=True)
    red = PALutils.createRedNoiseCovarianceMatrix(tm, Amp, gam, fH=fH)

    C = np.dot(p.G.T, np.dot(red + white, p.G))
    cf = sl.cho_factor(C)
    logdetTerm.append(np.sum(2 * np.log(np.diag(cf[0]))))  # + p.G.shape[1]*np.log(2*np.pi))

# get null model log-likelihood
nullLike = 0
for ct, p in enumerate(psr):
    nullLike += -0.5 * logdetTerm[ct]
    nullLike += -0.5 * np.dot(p.res, np.dot(p.invCov, p.res))
Ejemplo n.º 3
0
    def addInverseCovFromNoiseFile(self,
                                   parfile,
                                   timfile,
                                   noisefile,
                                   DMOFF=None,
                                   DMXOFF=None,
                                   dailyAverage=False):
        """
        
        Add noise covariance matrix after timing model subtraction.

        """

        # Check whether the two files exist
        if not os.path.isfile(parfile) or not os.path.isfile(timfile):
            raise IOError, "Cannot find parfile (%s) or timfile (%s)!" % (
                parfile, timfile)
        assert (self.filename != None), "ERROR: HDF5 file not set!"

        # 'a' means: read/write if exists, create otherwise
        self.h5file = h5.File(self.filename, 'a')

        # Create the data subgroup if it does not exist
        if "Data" in self.h5file:
            datagroup = self.h5file["Data"]
        else:
            raise IOError, "Cannot add noise parameters if Data group does not exist!"

        # Load pulsar data from the JPL Cython tempo2 library
        t2pulsar = t2.tempopulsar(parfile, timfile)

        # turn off DMMODEL fitting
        if DMOFF is not None:
            t2pulsar['DMMODEL'].fit = False

        # turn off DMX fitting
        if DMXOFF is not None:
            DMXFlag = False
            print 'Turning off DMX fitting and turning DM fitting on'
            for par in t2pulsar.pars:
                if 'DMX' in par:
                    t2pulsar[par].fit = False
                    t2pulsar['DM'].fit = True
                    DMXFlag = True
            if DMXFlag == False:
                print 'NO DMX for pulsar {0}'.format(t2pulsar.name)

        # refit 5 times to make sure we are converged
        t2pulsar.fit(iters=5)

        # Create the pulsar subgroup if it does not exist
        if "Pulsars" in datagroup:
            pulsarsgroup = datagroup["Pulsars"]
        else:
            raise IOError, "Cannot add noise parameters if pulsar group does not exist!"

        # Look up the name of the pulsar, and see if it exist
        if t2pulsar.name in pulsarsgroup:
            pass
        else:
            raise IOError, "%s must already exists in %s to add noise parameters!"\
                    % (t2pulsar.name, self.filename)

        pulsarsgroup = pulsarsgroup[t2pulsar.name]

        # first create G matrix from design matrix and toas
        designmatrix = np.double(t2pulsar.designmatrix())
        toas = np.double(t2pulsar.toas() * 86400)
        errs = np.double(t2pulsar.toaerrs * 1e-6)

        # if doing daily averaging
        if dailyAverage:

            # get average quantities
            toas, qmatrix, errs, dmatrix, freqs, bands = PALutils.dailyAverage(
                t2pulsar)

            # construct new daily averaged residuals and designmatrix
            toas *= 86400
            designmatrix = np.dot(qmatrix, dmatrix)

        G = PALutils.createGmatrix(designmatrix)

        # create matrix of time lags
        tm = PALutils.createTimeLags(toas, toas, round=True)

        # now read noise file to get model and parameters
        file = open(noisefile, 'r')

        fH = None
        tau = None
        DMAmp = None
        DMgam = None

        for line in file.readlines():

            # default parameters for different models other than pure PL
            key = line.split()[0]

            # get amplitude
            if "Amp" == key:
                Amp = float(line.split()[-1])

            # get spectral index
            elif "gam" == key:
                gam = float(line.split()[-1])

            # get efac
            elif "efac" == key:
                efac = float(line.split()[-1])

            # get quad
            elif "equad" == key:
                equad = float(line.split()[-1])

            # get high frequency cutoff if available
            elif "fH" == key:
                fH = float(line.split()[-1])

            # get correlation time scale if available
            elif "tau" == key:
                tau = float(line.split()[-1])

            # get DM Amplitude if available
            elif "DMAmp" == key:
                DMAmp = float(line.split()[-1])

            # get DM Spectral Index if available
            elif "DMgam" == key:
                DMgam = float(line.split()[-1])

        # cosstruct red and white noise covariance matrices
        red = PALutils.createRedNoiseCovarianceMatrix(tm, Amp, gam, fH=fH)
        white = PALutils.createWhiteNoiseCovarianceMatrix(errs,
                                                          efac,
                                                          equad,
                                                          tau=tau)

        # construct post timing model marginalization covariance matrix
        cov = red + white
        pcov = np.dot(G.T, np.dot(cov, G))

        # finally construct "inverse"
        invCov = np.dot(G, np.dot(np.linalg.inv(pcov), G.T))

        # create dataset for inverse covariance matrix
        pulsarsgroup.create_dataset('invCov', data=invCov)

        # create dataset for G matrix
        pulsarsgroup.create_dataset('Gmatrix', data=G)

        # record noise parameter values
        pulsarsgroup.create_dataset('Amp', data=Amp)
        pulsarsgroup.create_dataset('gam', data=gam)
        pulsarsgroup.create_dataset('efac', data=efac)
        pulsarsgroup.create_dataset('equad', data=equad)
        if fH is not None:
            pulsarsgroup.create_dataset('fH', data=fH)
        if tau is not None:
            pulsarsgroup.create_dataset('tau', data=tau)
        if DMAmp is not None:
            pulsarsgroup.create_dataset('DMAmp', data=DMAmp)
        if DMgam is not None:
            pulsarsgroup.create_dataset('DMgam', data=DMgam)

        # Close the hdf5 file
        self.h5file.close()
Ejemplo n.º 4
0
def firstOrderLikelihood(psr, ORF, Agw, gamgw, Ared, gred, efac, equad, \
                        interpolate=False):
    """
    Compute the value of the first-order likelihood as defined in 
    Ellis, Siemens, van Haasteren (2013).

    @param psr: List of pulsar object instances
    @param ORF: Vector of pairwise overlap reduction values
    @param Agw: Amplitude of GWB in standard strain amplitude units
    @param gamgw: Power spectral index of GWB
    @param Ared: Vector of amplitudes of intrinsic red noise in GWB strain units
    @param gamgw: Vector of power spectral index of red noise
    @param efac: Vector of efacs 
    @param equad: Vector of equads
    @param interpolate: Boolean to perform interpolation only with compressed
                        data. (default = False)

    @return: Log-likelihood value

    """
    npsr = len(psr)
    loglike = 0
    tmp = []

    # start loop to evaluate auto-terms
    for ll in range(npsr):

       r1 = np.dot(psr[ll].G.T, psr[ll].res)

       # create time lags
       tm = PALutils.createTimeLags(psr[ll].toas, psr[ll].toas)

       #TODO: finish option to do interpolation when using compression

       # calculate auto GW covariance matrix
       SC = PALutils.createRedNoiseCovarianceMatrix(tm, Agw, gamgw)

       # calculate auto red noise covariance matrix
       SA = PALutils.createRedNoiseCovarianceMatrix(tm, Ared[ll], gred[ll])

       # create white noise covariance matrix
       #TODO: add ability to use multiple efacs for different backends
       white = PALutils.createWhiteNoiseCovarianceMatrix(psr[ll].err, efac[ll], equad[ll])

       # total auto-covariance matrix
       P = SC + SA + white

       # sandwich with G matrices
       Ppost = np.dot(psr[ll].G.T, np.dot(P, psr[ll].G))

       # do cholesky solve
       cf = sl.cho_factor(Ppost)

       # solution vector P^_1 r
       rr = sl.cho_solve(cf, r1)

       # temporarily store P^-1 r
       tmp.append(np.dot(psr[ll].G, rr))

       # add to log-likelihood
       loglike  += -0.5 * (np.sum(np.log(2*np.pi*np.diag(cf[0])**2)) + np.dot(r1, rr))

 
    # now compute cross terms
    k = 0
    for ll in range(npsr):
        for kk in range(ll+1, npsr):

            # create time lags
            tm = PALutils.createTimeLags(psr[ll].toas, psr[kk].toas)

            # create cross covariance matrix
            SIJ = PALutils.createRedNoiseCovarianceMatrix(tm, 1, gamgw)

            # carry out matrix-vetor operations
            tmp1 = np.dot(SIJ, tmp[kk])

            # add to likelihood
            loglike += ORF[k]/2 * Agw**2 * np.dot(tmp[ll], tmp1)
            
            # increment ORF counter
            k += 1

    return loglike
Ejemplo n.º 5
0
    print 'Maximum likelihood from f-stat search = {0}\n'.format(fmaxlike)

# get determinant of covariance matrix for use in likelihood
logdetTerm = []
for ct, p in enumerate(psr):

    efac = p.efac
    equad = p.equad
    Amp = p.Amp
    gam = p.gam
    fH = p.fH

    print p.name, efac, equad, Amp, gam
    
    # get white noise covariance matrix
    white = PALutils.createWhiteNoiseCovarianceMatrix(p.err, efac, equad)

    # get red noise covariance matrix
    tm = PALutils.createTimeLags(p.toas, p.toas, round=True)
    red = PALutils.createRedNoiseCovarianceMatrix(tm, Amp, gam, fH=fH)

    C = np.dot(p.G.T, np.dot(red+white, p.G))
    cf = sl.cho_factor(C)
    logdetTerm.append(np.sum(2*np.log(np.diag(cf[0])))) #+ p.G.shape[1]*np.log(2*np.pi))

# get null model log-likelihood
nullLike = 0
for ct, p in enumerate(psr):
    nullLike += -0.5 * logdetTerm[ct]
    nullLike += -0.5 * np.dot(p.res, np.dot(p.invCov, p.res))
Ejemplo n.º 6
0
        try:
            fH = pfile['Data']['Pulsars'][p.name]['fH'].value
        except KeyError:
            fH = None


        # get values from hdf5 file
        try:
            Amp = pfile['Data']['Pulsars'][p.name]['Amp'].value
            gam = pfile['Data']['Pulsars'][p.name]['gam'].value
            efac = pfile['Data']['Pulsars'][p.name]['efac'].value
            equad = pfile['Data']['Pulsars'][p.name]['equad'].value
            tm = PALutils.createTimeLags(p.toas, p.toas)

            red = PALutils.createRedNoiseCovarianceMatrix(tm, Amp, gam, fH=fH)
            white = PALutils.createWhiteNoiseCovarianceMatrix(p.err, efac, equad)

            cov = red + white

            # cholesky decomp
            L = np.linalg.cholesky(cov)

            # set random number seed
            if args.seed:
                print 'Using fixed random number seed!'
                np.random.seed(seed=args.seed*(ct+1))
            
            # zero mean unit variance
            w = np.random.randn(p.ntoa)

            # get induced residuals
Ejemplo n.º 7
0
def firstOrderLikelihood(psr, ORF, Agw, gamgw, Ared, gred, efac, equad, \
                        interpolate=False):
    """
    Compute the value of the first-order likelihood as defined in 
    Ellis, Siemens, van Haasteren (2013).

    @param psr: List of pulsar object instances
    @param ORF: Vector of pairwise overlap reduction values
    @param Agw: Amplitude of GWB in standard strain amplitude units
    @param gamgw: Power spectral index of GWB
    @param Ared: Vector of amplitudes of intrinsic red noise in GWB strain units
    @param gamgw: Vector of power spectral index of red noise
    @param efac: Vector of efacs 
    @param equad: Vector of equads
    @param interpolate: Boolean to perform interpolation only with compressed
                        data. (default = False)

    @return: Log-likelihood value

    """
    npsr = len(psr)
    loglike = 0
    tmp = []

    # start loop to evaluate auto-terms
    for ll in range(npsr):

        r1 = np.dot(psr[ll].G.T, psr[ll].res)

        # create time lags
        tm = PALutils.createTimeLags(psr[ll].toas, psr[ll].toas)

        #TODO: finish option to do interpolation when using compression

        # calculate auto GW covariance matrix
        SC = PALutils.createRedNoiseCovarianceMatrix(tm, Agw, gamgw)

        # calculate auto red noise covariance matrix
        SA = PALutils.createRedNoiseCovarianceMatrix(tm, Ared[ll], gred[ll])

        # create white noise covariance matrix
        #TODO: add ability to use multiple efacs for different backends
        white = PALutils.createWhiteNoiseCovarianceMatrix(
            psr[ll].err, efac[ll], equad[ll])

        # total auto-covariance matrix
        P = SC + SA + white

        # sandwich with G matrices
        Ppost = np.dot(psr[ll].G.T, np.dot(P, psr[ll].G))

        # do cholesky solve
        cf = sl.cho_factor(Ppost)

        # solution vector P^_1 r
        rr = sl.cho_solve(cf, r1)

        # temporarily store P^-1 r
        tmp.append(np.dot(psr[ll].G, rr))

        # add to log-likelihood
        loglike += -0.5 * (np.sum(np.log(2 * np.pi * np.diag(cf[0])**2)) +
                           np.dot(r1, rr))

    # now compute cross terms
    k = 0
    for ll in range(npsr):
        for kk in range(ll + 1, npsr):

            # create time lags
            tm = PALutils.createTimeLags(psr[ll].toas, psr[kk].toas)

            # create cross covariance matrix
            SIJ = PALutils.createRedNoiseCovarianceMatrix(tm, 1, gamgw)

            # carry out matrix-vetor operations
            tmp1 = np.dot(SIJ, tmp[kk])

            # add to likelihood
            loglike += ORF[k] / 2 * Agw**2 * np.dot(tmp[ll], tmp1)

            # increment ORF counter
            k += 1

    return loglike