Exemple #1
0
def main():
    for pfdfn in args.files:
        print pfdfn
        pfd = prepfold.pfd(pfdfn)
        if args.sefd is not None:
            sefd = args.sefd
        elif args.gain is not None and args.tsys is not None:
            fctr = 0.5*(pfd.hifreq+pfd.lofreq)
            glon, glat = sextant.equatorial_to_galactic(pfd.rastr, pfd.decstr, 
                                                        input='sexigesimal', 
                                                        output='deg')
            print "Galactic Coords: l=%g deg, b=%g deg" % (glon, glat)
            tsky = skytemp.get_skytemp(glon, glat, freq=fctr)[0]
            print "Sky temp at %g MHz: %g K" % (fctr, tsky)
            sefd = (args.tsys+tsky)/args.gain
        print sefd, args.fwhm, args.sep
        if (sefd is not None) and (args.fwhm is not None) and (args.sep is not None):
            factor = estimate_snr.airy_pattern(args.fwhm, args.sep)
            print "Pulsar is off-centre"
            print "Reducing SEFD by factor of %g (SEFD: %g->%g)" % (factor, sefd, sefd/factor)
            sefd /= factor
        if args.model_file is not None:
            obs = ObservationWithModel(pfd, args.model_file, sefd=sefd, verbose=True)
        elif args.gauss_file is not None:
            obs = ObservationWithGauss(pfd, args.gauss_file, sefd=sefd, verbose=True)
        else:
            obs = Observation(pfd, sefd=sefd, verbose=True)
        plt.show()
        print " ".join(obs.notes)
Exemple #2
0
def load_pfds(dir=SAMPLE_FILES_DIR):
    SAMPLE_FILES = glob.glob(dir + '*.pfd')
    pfds = []
    for f in SAMPLE_FILES:
        pf = pfd(f)
        pfds.append(pf)
    return pfds
Exemple #3
0
def rate_candidate(pfdfile, ratings=all_ratings):
    """
    Calculate all the ratings for a candidate.

    Parameters
    ----------
    pfdfile : string
        The name of a prepfold .pfd file

    Returns
    -------
    status : int
        The exit stats.  status = 0 for success, 1 for failure.
    """
    try:
        # Create an instance of the pfd class
        pfd = PF.pfd(pfdfile)
        # Open a file for storing the ratings
        outfile = open("%s.ratings"%pfdfile, "w")
        # Calculate all the ratings
        for rating in all_ratings:
            rating_names,rating_values = rating(pfd)
            # Write each rating
            for n,v in zip(rating_names,rating_values):
                outfile.write("%-25s  %9.6f\n"%(n,v))
        outfile.close()
        status = 0

    # If anything went wrong, return a non-zero exit status
    except:
        status = 1

    return status
Exemple #4
0
def load_pfds(dir=SAMPLE_FILES_DIR):
    SAMPLE_FILES = glob.glob(dir+'*.pfd')
    pfds = []
    for f in SAMPLE_FILES:
        pf = pfd(f)
        pfds.append(pf)
    return pfds
Exemple #5
0
def main():
    pfdfn = sys.argv[1]

    pfd = prepfold.pfd(pfdfn)

    cand = candidate.Candidate(pfd.topo_p1, pfd.bary_p1, pfd.bestdm, \
                        psr_utils.ra_to_rad(pfd.rastr)*psr_utils.RADTODEG, \
                        psr_utils.dec_to_rad(pfd.decstr)*psr_utils.RADTODEG, \
                        pfdfn)

    print "Loaded %s" % cand.pfdfn
    print "    Best topo period (s): %f" % cand.topo_period
    print "    Best bary period (s): %f" % cand.bary_period
    print "    Best DM (cm^-3/pc): %f" % cand.dm
    print "    RA (J2000 - deg): %f" % cand.raj_deg
    print "    Dec (J2000 - deg): %f" % cand.decj_deg

    print "-"*10
    pprint.pprint(cand.__dict__)
    print "-"*10

    gauss.add_data(cand)

    print "Added gaussian fit to cand"
    pprint.pprint(cand.__dict__)
    print "-"*10
    fit_and_plot(cand)
Exemple #6
0
def rate_candidate(pfdfile, ratings=all_ratings):
    """
    Calculate all the ratings for a candidate.

    Parameters
    ----------
    pfdfile : string
        The name of a prepfold .pfd file

    Returns
    -------
    status : int
        The exit stats.  status = 0 for success, 1 for failure.
    """
    try:
        # Create an instance of the pfd class
        pfd = PF.pfd(pfdfile)
        # Open a file for storing the ratings
        outfile = open("%s.ratings" % pfdfile, "w")
        # Calculate all the ratings
        for rating in all_ratings:
            rating_names, rating_values = rating(pfd)
            # Write each rating
            for n, v in zip(rating_names, rating_values):
                outfile.write("%-25s  %9.6f\n" % (n, v))
        outfile.close()
        status = 0

    # If anything went wrong, return a non-zero exit status
    except:
        status = 1

    return status
Exemple #7
0
def extract_file(dir,candidate,f,with_bestprof):
        filename = f["filename"]
        pfd_filename = filename.split(".ps.gz")[0]

        path = f["path"]
        base, junk = filename.split('_DM')
        
	# Get filename of pfd file ("rfn")
	if c.survey=="PALFA":
            beam = base.split("_")[-1][0] # PALFA beam number
            rfn = os.path.join(dir,beam,pfd_filename) # PALFA pfd files are inside subdirectory in tarball
        elif c.survey=="DRIFT":
            rfn = os.path.join(dir,pfd_filename)
        else:
            raise ValueError("Unknown survey '%s'" % c.survey)
        
	if not os.path.exists(rfn):
	    # Un-tarring pfd files
	    tar_cmd = ["tar","-C",dir,"--wildcards","-x"]
            
            # PALFA tarballs don't have _pfd in the filename
            tgz_path = os.path.join(path,base+".tgz")
            tar_path = os.path.join(path,base+".tar")
            tar_gz_path = os.path.join(path,base+".tar.gz")
            pfd_tgz_path = os.path.join(path,base+"_pfd.tgz")
            pfd_tar_path = os.path.join(path,base+"_pfd.tar")
            pfd_tar_gz_path = os.path.join(path,base+"_pfd.tar.gz")
            if os.path.exists(tar_path):
                extra_args = ["-f",tar_path]
            elif os.path.exists(tgz_path):
                extra_args = ["-z","-f",tgz_path]
            elif os.path.exists(tar_gz_path):
                extra_args = ["-z","-f",tar_gz_path]
            elif os.path.exists(pfd_tar_path):
                extra_args = ["-f",pfd_tar_path]
            elif os.path.exists(pfd_tgz_path):
                extra_args = ["-z","-f",pfd_tgz_path]
            elif os.path.exists(pfd_tar_gz_path):
               extra_args = ["-z","-f",pfd_tar_gz_path]
            else:
                raise ValueError("Cannot find tar file")

            tar_cmd = tar_cmd + extra_args + ["*.pfd"]
            retcode = subprocess.call(tar_cmd)
            if retcode:
                raise ValueError("tar extraction failed with return code %d" % retcode)

        if with_bestprof:
            if not os.path.exists(rfn+".bestprof"):
                # Create bestprof file using 'show_pfd'
                dir, pfdfn = os.path.split(rfn)
                retcode = subprocess.call(["show_pfd",rfn,"-noxwin"],stdout=subprocess.PIPE)
                #retcode = subprocess.call(["show_pfd",rfn,"-noxwin"])
                if retcode:
                    raise ValueError("show_pfd failed with return code %d; pfd file is %s, directory contents are %s\n" % (retcode,rfn,sorted(os.listdir(dir))))
                shutil.move(pfdfn+".bestprof", dir)
                shutil.move(pfdfn+".ps", dir)

        return prepfold.pfd(rfn)
def folding_job(job, sjob):

    # Fold the best candidates
    cands_folded = 0
    for cand in job.all_accel_cands:
        print "At cand %s" % str(cand)
        if cands_folded == config.searching.max_cands_to_fold:
            break
        if cand.sigma >= config.searching.to_prepfold_sigma:
            print "...folding"

	    # If the candidate was found in a fraction of the data, just fold this fraction
	    if cand.npart:
	        folding_cmd, outfilenm = get_folding_command(cand, sjob[cand.ipart]) 
	    else:
	        folding_cmd, outfilenm = get_folding_command(cand, job) 
            job.folding_time += timed_execute(folding_cmd)

	    # now apply KJ Algorithm
	    kj_score_cmd = "%s -f %s"%(os.path.join(config.basic.kj_utilsdir, 'bin/autos2.exe'), \
			outfilenm+'_ACCEL_Cand_%d.pfd'%cand.candnum)
	    job.folding_time += timed_execute(kj_score_cmd)


            cands_folded += 1
    job.num_cands_folded = cands_folded
    
    # Rate candidates
    timed_execute("rate_pfds.py --redirect-warnings --include-all -x pulse_width *.pfd ")
    sys.stdout.flush()

    # Calculate some candidate attributes from pfds
    attrib_file = open('candidate_attributes.txt','w')
    for pfdfn in glob.glob("*.pfd"):
        attribs = {}
        pfd = prepfold.pfd(pfdfn)
        red_chi2 = pfd.bestprof.chi_sqr
        dof = pfd.proflen - 1
        attribs['prepfold_sigma'] = \
                -scipy.stats.norm.ppf(scipy.stats.chi2.sf(red_chi2*dof, dof))
	off_red_chi2 = pfd.estimate_offsignal_redchi2()
	new_red_chi2 = red_chi2 / off_red_chi2
        # prepfold sigma rescaled to deal with chi-squared suppression
        # a problem when strong rfi is present
        attribs['rescaled_prepfold_sigma'] = \
                -scipy.stats.norm.ppf(scipy.stats.chi2.sf(new_red_chi2*dof, dof))
        for key in attribs:
            attrib_file.write("%s\t%s\t%.3f\n" % (pfdfn, key, attribs[key]))
    attrib_file.close()

    
    # Now step through the .ps files and convert them to .png and gzip them
    psfiles = glob.glob("*.ps")
    for psfile in psfiles:
        timed_execute("pstoimg -density 150 -flip cw %s" % (psfile))
        timed_execute("gzip -f "+psfile)
Exemple #9
0
def get_bp(scan_fname, temp_fname, direction, dr=0.1333):

    pf = prepfold.pfd(scan_fname)
    pf.dedisperse()

    # BEST WAY TO DO THIS??
    nsub = pf.npart
    nchan = 1
    nbin = pf.proflen

    profs = pf.combine_profs(nsub, nchan).squeeze()

    # Calculate position offsets.
    mid_time = (pf.mid_secs[0] + pf.mid_secs[-1]) / 2.0
    times = np.linspace(pf.mid_secs[0], pf.mid_secs[-1], nsub) - mid_time
    offsets_arcmin = times * dr

    # Is this necessary? Uses the template to determine, given some threshold.
    #on_inds, off_inds = on_off(model_fname,nbin,test=0)
    ii, ee = amps_errs(profs, temp_fname)

    # ***  Probably need a cos(dec) correction for offsets_arcmin...!  ***
    # 1=Dec, 0=RA
    if direction == 1:
        yy = offsets_arcmin
        xx = np.zeros(len(yy))
        offs = yy

    elif direction == 0:
        xx = offsets_arcmin
        yy = np.zeros(len(xx))
        offs = xx

    # Fit beam profile, print/plot results
    bp_vals, bp_errs = fit_1d_bp(offs, ii, ee)
    #plt.errorbar(offs,ii,yerr=ee,fmt='o',capsize=3)
    #plt.plot(offs,gaussian_beam(offs,bp_vals[0],bp_vals[1]),'--r')
    print bp_vals, bp_errs
    resids = ii - gaussian_beam(offs, bp_vals[0], bp_vals[1])
    R_std = np.std(resids[ii > 0])

    #print "Offset    Err:"
    #for oset, err, amp in zip(offs,ee,ii): print oset,err, amp

    plt.figure(0)
    plt.errorbar(offs, ii / R_std, yerr=ee / R_std, fmt='o', capsize=3)
    plt.plot(offs, gaussian_beam(offs, bp_vals[0], bp_vals[1]) / R_std, '--r')
    plt.figure(1)
    plt.errorbar(offs, resids, yerr=ee / R_std, fmt='o', capsize=3)
    plt.plot(offs, [0] * len(offs), '--r')
    plt.show()

    return np.array(xx), np.array(yy), np.array(ii), np.array(ee)
def read_pfd_file(pfdfn):
    """Return a Candidate object for the pfd given.
        Input:
            pfdfn: PRESTO *.pfd file name.
        Output:
            cand: Candidate object constructed from the given pfd
                file name.
    """
    pfd = prepfold.pfd(pfdfn)
    cand = Candidate(pfd.topo_p1, pfd.bary_p1, pfd.bestdm, \
                    psr_utils.ra_to_rad(pfd.rastr)*psr_utils.RADTODEG, \
                    psr_utils.dec_to_rad(pfd.decstr)*psr_utils.RADTODEG, \
                    pfdfn)
    return cand
Exemple #11
0
def main():
    for pfdfn in args.pfdfns:
        pfd = prepfold.pfd(pfdfn)
        lines = []
        if args.headers is not None:
            for header in args.headers:
                lines.append("# " + args.sep.join(header.split(",")).decode("string-escape"))
        for attrs in args.attrs:
            vals = []
            for attr in attrs.split(","):
                if (attr[0] == "[") and (attr[-1] == "]"):
                    vals.append(attr[1:-1])
                else:
                    vals.append("%s" % getattr(pfd, attr))
            lines.append(args.sep.join(vals).decode("string-escape"))
        print "\n".join(lines)
Exemple #12
0
def read_pfd_file(pfdfn):
    """Return a Candidate object for the pfd given.

        Input:
            pfdfn: PRESTO *.pfd file name.

        Output:
            cand: Candidate object constructed from the given pfd
                file name.
    """
    pfd = prepfold.pfd(pfdfn)
    cand = Candidate(pfd.topo_p1, pfd.bary_p1, pfd.bestdm, \
                    psr_utils.ra_to_rad(pfd.rastr)*psr_utils.RADTODEG, \
                    psr_utils.dec_to_rad(pfd.decstr)*psr_utils.RADTODEG, \
                    pfdfn)
    return cand
Exemple #13
0
def main():
    for pfdfn in args.pfdfns:
        pfd = prepfold.pfd(pfdfn)
        lines = []
        if args.headers is not None:
            for header in args.headers:
                lines.append(
                    '# ' +
                    args.sep.join(header.split(',')).decode('string-escape'))
        for attrs in args.attrs:
            vals = []
            for attr in attrs.split(','):
                if (attr[0] == '[') and (attr[-1] == ']'):
                    vals.append(attr[1:-1])
                else:
                    vals.append("%s" % getattr(pfd, attr))
            lines.append(args.sep.join(vals).decode('string-escape'))
        print "\n".join(lines)
Exemple #14
0
def extract_file(dir,candidate):
    DBconn = usual_database()
    f = get_one(MySQLdb.cursors.DictCursor(DBconn),"SELECT * FROM pdm_plot_pointers WHERE pdm_cand_id = %s", candidate["pdm_cand_id"])
    DBconn.close()
    if f is None:
        raise ValueError("Warning: candidate %d does not appear to have file information\n" % candidate['pdm_cand_id'])

    filename = f["filename"]
    pfd_filename = filename.split(".ps.gz")[0]

    path = f["path"]
    base, junk = filename.split('_DM')
    if c.survey=="PALFA":
        beam = base.split("_")[-1][0] # PALFA beam number
        rfn = os.path.join(dir,beam,pfd_filename) # PALFA pfd files are inside subdirectory in tarball
    elif c.survey=="DRIFT":
        rfn = os.path.join(dir,pfd_filename)
    else:
        raise ValueError("Unknown survey '%s'" % c.survey)
    if not os.path.exists(rfn):
        # FIXME: double-check none starts with /
        
        # PALFA tarballs don't have _pfd in the filename
        tgz_path = os.path.join(path,base+".tgz")
        tar_path = os.path.join(path,base+".tar")
        tar_gz_path = os.path.join(path,base+".tar.gz")
        pfd_tgz_path = os.path.join(path,base+"_pfd.tgz")
        pfd_tar_path = os.path.join(path,base+"_pfd.tar")
        pfd_tar_gz_path = os.path.join(path,base+"_pfd.tar.gz")
        if os.path.exists(tar_path):
            subprocess.call(["tar","-C",dir,"--wildcards","-x","*.pfd","-f",tar_path])
        elif os.path.exists(tgz_path):
            subprocess.call(["tar","-C",dir,"--wildcards","-x","*.pfd","-z","-f",tgz_path])
        elif os.path.exists(tar_gz_path):
            subprocess.call(["tar","-C",dir,"--wildcards","-x","*.pfd","-z","-f",tar_gz_path])
        elif os.path.exists(pfd_tar_path):
            subprocess.call(["tar","-C",dir,"--wildcards","-x","*.pfd","-f",pfd_tar_path])
        elif os.path.exists(pfd_tgz_path):
            subprocess.call(["tar","-C",dir,"--wildcards","-x","*.pfd","-z","-f",pfd_tgz_path])
        elif os.path.exists(pfd_tar_gz_path):
            subprocess.call(["tar","-C",dir,"--wildcards","-x","*.pfd","-z","-f",pfd_tar_gz_path])
        else:
            raise ValueError("Cannot find tar file")
    return prepfold.pfd(rfn)
Exemple #15
0
def get_bpo(pfd_fname, model_fname, dr=0.666, mode="snr", nsubints=None):

    pf = prepfold.pfd(pfd_fname)
    pf.dedisperse()

    # BEST WAY TO DO THIS??
    nsub = pf.npart
    nchan = 1
    nbin = pf.proflen

    if nsubints and nsub > nsubints:

        if nsub % nsubints:
            newsubints = float(nsub) / (int(nsub) / int(nsubints))

        else:
            newsubints = nsubints

        profs = pf.combine_profs(newsubints, nchan)

    elif nsub < nsubints:
        print "Cannot scrunch to %d subints since original file has %d subints." % (
            nsubints, nsub)

    else:
        newsubints = nsub

    profs = pf.combine_profs(newsubints, nchan)

    # Calculate position offsets. First method doesn't work well. Why?
    mid_time = (pf.mid_secs[0] + pf.mid_secs[-1]) / 2.0
    times = np.linspace(pf.mid_secs[0], pf.mid_secs[-1], newsubints) - mid_time
    offsets_arcmin = times * dr  #*(nsub/newsubints)  # dr is in arcmin/second

    #pdata = pf.combine_profs(1,1).flatten()
    on_inds, off_inds = on_off(model_fname, nbin, test=0)
    snrs = get_snrs(profs, model_fname, mode)

    # Get BPO ready to return...
    x = bpo(offsets_arcmin, snrs)
    x.fname = pfd_fname
    x.times = times  #_0
    x.drift_rate = dr
    return x
Exemple #16
0
def test(pfd_fname,nsubints=None,dr=0.666):

    pf = prepfold.pfd(pfd_fname)
    pf.dedisperse()

    # BEST WAY TO DO THIS?? 
    nsub    = pf.npart
    nchan   = 1
    nbin    = pf.proflen
  
    if nsubints and nsub>nsubints:

        if nsub%nsubints:
            newsubints = float(nsub)/(int(nsub)/int(nsubints))

        else: newsubints = nsubints
    
        profs = pf.combine_profs(newsubints,nchan)

    elif nsub<nsubints:
        print "Cannot scrunch to %d subints since original file has %d subints." % (nsubints,nsub)

    else: newsubints = nsub
  
    profs = pf.combine_profs(newsubints,nchan)

    mid_time = (pf.mid_secs[0]+pf.mid_secs[-1])/2.0
    times = np.linspace(pf.mid_secs[0],pf.mid_secs[-1],newsubints) - mid_time
    offsets_arcmin = times*dr    # dr is in arcmin/second

    print ""
    print "Time (mid) first subint:    %s" % (pf.mid_secs[0])
    print "Time (mid) last subint:     %s" % (pf.mid_secs[-1])
    print "Time half-way through scan: %s" % (mid_time)
    print ""
    print "Times array (seconds):"
    print times
    print ""
    print "Offsets array (arcmin):"
    print offsets_arcmin
    print ""
Exemple #17
0
def read_pfds(fns):
    """
    Read pfd files and return arrays of period, period error and mjds
    
    Input: fns - a list of files that contain a list of pfd files to read
    """
    ps = []
    perrs = []
    mjds = []
    
    for fn in fns:
        file = open(fn, 'r')
        pfdfns = [x.strip() for x in file.readlines() if x.strip()[0] != '#']
        file.close()
        for pfdfn in pfdfns:
            p = prepfold.pfd(pfdfn)
            b = bestprof.bestprof("%s.bestprof" % pfdfn)
            if options.extendnum and (b.p1err_bary < b.p1_bary):
                # If extension is reqested and p-dot is at least a 4-sigma detection
                maxt = 300#np.sqrt((0.25*b.p0_bary)**2 - b.p0err_bary**2)/b.p1err_bary
                tstep = maxt/options.extendnum
                if options.debuglevel:
                    print "Bary Period (s): %g +/- %g" % (b.p0_bary, b.p0err_bary)
                    print "Bary P-dot (s/s): %g +/- %g" % (b.p1_bary, b.p1err_bary)
                    print "Bary Epoch (MJD): %.12f" % p.bepoch
                    print "    Period points (included phantom period measurements)"
                    print "    (T_max = %g (s), T_step = %g (s))" % (maxt, tstep)
                for ii in np.arange(-options.extendnum, options.extendnum+1):
                    ps.append(b.p0_bary+b.p1_bary*ii*tstep)
                    perrs.append(np.sqrt((b.p0err_bary)**2+(ii*tstep*b.p1err_bary)**2)*options.efac)
                    mjds.append(p.bepoch+ii*tstep/psr_utils.SECPERDAY)
                    print "  %.15f  %.10f   %.10f" % (mjds[-1], ps[-1]*1000, perrs[-1]*1000)
                    if options.debuglevel:
                        print "   P (s): %g +/- %g @ MJD=%.12f" % (ps[-1], perrs[-1], mjds[-1])
            else:
                ps.append(b.p0_bary)
                perrs.append(b.p0err_bary*options.efac)
                mjds.append(p.bepoch)
                print "  %.15f  %.10f   %.10f" % (mjds[-1], ps[-1]*1000, perrs[-1]*1000)
    return np.array(ps), np.array(perrs), np.array(mjds)
Exemple #18
0
def main():
    for pfdfn in args.files:
        print pfdfn
        pfd = prepfold.pfd(pfdfn)
        if args.sefd is not None:
            sefd = args.sefd
        elif args.gain is not None and args.tsys is not None:
            fctr = 0.5 * (pfd.hifreq + pfd.lofreq)
            glon, glat = sextant.equatorial_to_galactic(pfd.rastr,
                                                        pfd.decstr,
                                                        input='sexigesimal',
                                                        output='deg')
            print "Galactic Coords: l=%g deg, b=%g deg" % (glon, glat)
            tsky = skytemp.get_skytemp(glon, glat, freq=fctr)[0]
            print "Sky temp at %g MHz: %g K" % (fctr, tsky)
            sefd = (args.tsys + tsky) / args.gain
        print sefd, args.fwhm, args.sep
        if (sefd is not None) and (args.fwhm is not None) and (args.sep
                                                               is not None):
            factor = estimate_snr.airy_pattern(args.fwhm, args.sep)
            print "Pulsar is off-centre"
            print "Reducing SEFD by factor of %g (SEFD: %g->%g)" % (
                factor, sefd, sefd / factor)
            sefd /= factor
        if args.model_file is not None:
            obs = ObservationWithModel(pfd,
                                       args.model_file,
                                       sefd=sefd,
                                       verbose=True)
        elif args.gauss_file is not None:
            obs = ObservationWithGauss(pfd,
                                       args.gauss_file,
                                       sefd=sefd,
                                       verbose=True)
        else:
            obs = Observation(pfd, sefd=sefd, verbose=True)
        plt.show()
        print " ".join(obs.notes)
Exemple #19
0
from prepfold import pfd
import numpy as np
from pylab import *
import matplotlib.pyplot as plt
from scipy.interpolate import RectBivariateSpline as interp
from scipy import ndimage, array, ogrid, mgrid
import sys, os

if __name__ == '__main__':
    f1 = sys.argv[1]
    f2 = sys.argv[2]
    if not (f1.endswith('pfd')):
        print 'file name %s not end with pfd ' % (f1)
        sys.exit(1)

    pfdfile = pfd(f1)
    pfdfile.dedisperse()
    profs = pfdfile.profs
    pshape = profs.shape
    x, y, z = profs.shape
    data = profs.reshape((-1, 1))
    del pfdfile

    mean = np.mean(data)
    var = np.std(data)
    data = (data - mean) / var
    profs = data.reshape(pshape)
    X, Y, Z = ogrid[0:1:x, 0:1:y, 0:1:z]
    coords = array([X, Y, Z])
    coeffs = ndimage.spline_filter(profs)
    X, Y, Z = mgrid[0:1:8j, 0:1:8j, 0:1:8j]
def check_candidates(header_id, versionnum, directory, dbname='common-copy'):
    """Check candidates in common DB.

        Inputs:
            header_id: header_id number for this beam, as returned by
                        spHeaderLoader/header.upload_header
            versionnum: A combination of the githash values from 
                        PRESTO and from the pipeline. 
            directory: The directory containing results from the pipeline.
            dbname: Name of database to connect to, or a database
                        connection to use (Defaut: 'common-copy').
        Output:
            match: Boolean value. True if all candidates and plots match what
                    is in the DB, False otherwise.
    """
    # find *.accelcands file
    candlists = glob.glob(os.path.join(directory, "*.accelcands"))

    if len(candlists) != 1:
        raise PeriodicityCandidateError("Wrong number of candidate lists found (%d)!" % \
                                            len(candlists))

    # Get list of candidates from *.accelcands file
    candlist = accelcands.parse_candlist(candlists[0])
    minsigma = config.searching.to_prepfold_sigma
    foldedcands = [c for c in candlist if c.sigma > minsigma]
    foldedcands = foldedcands[:config.searching.max_cands_to_fold]
    foldedcands.sort(reverse=True)  # Sort by descending sigma

    # Create temporary directory
    tempdir = tempfile.mkdtemp(suffix="_tmp", prefix="PALFA_pfds_")
    tarfns = glob.glob(os.path.join(directory, "*_pfd.tgz"))
    if len(tarfns) != 1:
        raise PeriodicityCandidateError("Wrong number (%d) of *_pfd.tgz " \
                                         "files found in %s" % (len(tarfns), \
                                            directory))

    tar = tarfile.open(tarfns[0])
    try:
        tar.extractall(path=tempdir)
    except IOError:
        if os.path.isdir(tempdir):
            shutil.rmtree(tempdir)
        raise PeriodicityCandidateError("Error while extracting pfd files " \
                                        "from tarball (%s)!" % tarfns[0])
    finally:
        tar.close()
    # Loop over candidates that were folded
    matches = []
    if isinstance(dbname, database.Database):
        db = dbname
    else:
        db = database.Database(dbname)
    for ii, c in enumerate(foldedcands):
        basefn = "%s_ACCEL_Cand_%d" % (c.accelfile.replace("ACCEL_", "Z"), \
                                    c.candnum)
        pfdfn = os.path.join(tempdir, basefn + ".pfd")
        pngfn = os.path.join(directory, basefn + ".pfd.png")

        pfd = prepfold.pfd(pfdfn)

        try:
            cand = PeridocityCandidate(header_id, ii+1, pfd, c.snr, \
                                    c.cpow, c.ipow, len(c.dmhits), \
                                    c.numharm, versionnum, c.sigma)
        except Exception:
            raise PeriodicityCandidateError("PeriodicityCandidate could not be " \
                                            "created (%s)!" % pfdfn)

        matches.append(cand.compare_with_db(dbname))
        if not matches[-1]:
            break

        # Get candidate's ID number from common DB
        db.execute("SELECT pdm_cand_id " \
                   "FROM pdm_candidates AS c " \
                   "LEFT JOIN versions AS v ON v.version_id = c.version_id " \
                   "WHERE c.header_id=%d AND c.cand_num=%d " \
                        "AND v.version_number='%s'" % \
                        (header_id, cand.cand_num, versionnum))
        # For cand.compare_with_db() to return True there must be a unique
        # entry in the common DB matching this candidate
        r = db.cursor.fetchone()
        cand_id = r[0]

        pfdplot = PeriodicityCandidatePFD(cand_id, pfdfn)
        matches.append(pfdplot.compare_with_db(dbname))
        if not matches[-1]:
            break

        pngplot = PeriodicityCandidatePNG(cand_id, pngfn)
        matches.append(pngplot.compare_with_db(dbname))
        if not matches[-1]:
            break

    if type(dbname) == types.StringType:
        db.close()
    shutil.rmtree(tempdir)
    return all(matches)
Exemple #21
0
def get_candidates(versionnum, directory, header_id=None):
    """Upload candidates to common DB.

        Inputs:
            versionnum: A combination of the githash values from 
                        PRESTO and from the pipeline. 
            directory: The directory containing results from the pipeline.
            header_id: header_id number for this beam, as returned by
                        spHeaderLoader/header.upload_header (default=None)

        Ouput:
            cands: List of candidates.
    """
    # find *.accelcands file    
    candlists = glob.glob(os.path.join(directory, "*.accelcands"))
                                                
    if len(candlists) != 1:
        raise PeriodicityCandidateError("Wrong number of candidate lists found (%d)!" % \
                                            len(candlists))

    # Get list of candidates from *.accelcands file
    candlist = accelcands.parse_candlist(candlists[0])
    # find the search_params.txt file
    paramfn = os.path.join(directory, 'search_params.txt')
    if os.path.exists(paramfn):
        tmp, params = {}, {}
        execfile(paramfn, tmp, params)
    else:
        raise PeriodicityCandidateError("Search parameter file doesn't exist!")
    minsigma = params['to_prepfold_sigma']
    foldedcands = [c for c in candlist \
                    if c.sigma > params['to_prepfold_sigma']]
    foldedcands = foldedcands[:params['max_cands_to_fold']]
    foldedcands.sort(reverse=True) # Sort by descending sigma

        
    # Create temporary directory
    tempdir = tempfile.mkdtemp(suffix="_tmp", prefix="PALFA_pfds_")

    if foldedcands:

        tarfns = glob.glob(os.path.join(directory, "*_pfd.tgz"))
        if len(tarfns) != 1:
            raise PeriodicityCandidateError("Wrong number (%d) of *_pfd.tgz " \
                                             "files found in %s" % (len(tarfns), \
                                                directory))
        
        tar = tarfile.open(tarfns[0])
        try:
            tar.extractall(path=tempdir)
        except IOError:
            if os.path.isdir(tempdir):
                shutil.rmtree(tempdir)
            raise PeriodicityCandidateError("Error while extracting pfd files " \
                                            "from tarball (%s)!" % tarfns[0])
        finally:
            tar.close()

    # Loop over candidates that were folded
    cands = []
    for ii, c in enumerate(foldedcands):
        basefn = "%s_ACCEL_Cand_%d" % (c.accelfile.replace("ACCEL_", "Z"), \
                                    c.candnum)
        pfdfn = os.path.join(tempdir, basefn+".pfd")
        pngfn = os.path.join(directory, basefn+".pfd.png")
        
        pfd = prepfold.pfd(pfdfn)
        
        try:
            cand = PeriodicityCandidate(ii+1, pfd, c.snr, \
                                    c.cpow, c.ipow, len(c.dmhits), \
                                    c.numharm, versionnum, c.sigma, \
                                    header_id=header_id)
        except Exception:
            raise PeriodicityCandidateError("PeriodicityCandidate could not be " \
                                            "created (%s)!" % pfdfn)


        cand.add_dependent(PeriodicityCandidatePFD(pfdfn))
        cand.add_dependent(PeriodicityCandidatePNG(pngfn))
        cands.append(cand)
        
    shutil.rmtree(tempdir)
    return cands
def main():
    pfdfn = sys.argv[1]

    pfd = prepfold.pfd(pfdfn)

    cand = candidate.Candidate(pfd.topo_p1, pfd.bary_p1, pfd.bestdm, \
                        psr_utils.ra_to_rad(pfd.rastr)*psr_utils.RADTODEG, \
                        psr_utils.dec_to_rad(pfd.decstr)*psr_utils.RADTODEG, \
                        pfdfn)

    print "Loaded %s" % cand.pfdfn
    print "    Best topo period (s): %f" % cand.topo_period
    print "    Best bary period (s): %f" % cand.bary_period
    print "    Best DM (cm^-3/pc): %f" % cand.dm
    print "    RA (J2000 - deg): %f" % cand.raj_deg
    print "    Dec (J2000 - deg): %f" % cand.decj_deg

    print "-"*10
    pprint.pprint(cand.__dict__)
    print "-"*10

    from rating_classes import freq_vs_phase
    fvph = freq_vs_phase.FreqVsPhaseClass()
    fvph.add_data(cand)

    print "Added freq vs phase data to cand"
    pprint.pprint(cand.__dict__)
    print "-"*10

    pprint.pprint(cand.freq_vs_phase.__dict__)

    print "Check if freq_vs_phase rotates out and back to same array"
    orig = cand.freq_vs_phase.data.copy()
    orig_dm = cand.freq_vs_phase.curr_dm
    cand.freq_vs_phase.dedisperse(0)
    mod = cand.freq_vs_phase.data.copy()
    cand.freq_vs_phase.dedisperse(orig_dm)
    new = cand.freq_vs_phase.data.copy()

    print "Compare orig/mod"
    print (orig == mod).all()
    print "Compare mod/new"
    print (mod == new).all()
    print "Compare orig/new"
    print (orig == new).all()

    print "Compare with best prof"
    # Create best prof file
    print "show_pfd -noxwin %s" % cand.info['pfdfn']
    subprocess.call("show_pfd -noxwin %s" % cand.info['pfdfn'], shell=True, stdout=open(os.devnull))
    pfd_bestprof = np.loadtxt(os.path.split(cand.pfd.pfd_filename+".bestprof")[-1], usecols=(1,))

    cand.freq_vs_phase.dedisperse(cand.pfd.bestdm)
    fvph_prof = cand.freq_vs_phase.data.sum(axis=0).squeeze()
    print test_profiles(pfd_bestprof, fvph_prof, 1e-6)
    
    print "Testing dedispersion"
    for dm in np.random.randint(0, 1000, size=10):
        print "DM: %g" % dm,
        cand.freq_vs_phase.dedisperse(dm)
        fvph_prof = cand.freq_vs_phase.get_profile()

        cand.pfd.dedisperse(dm, doppler=1)
        cand.pfd.adjust_period()
        pfd_prof = cand.pfd.time_vs_phase().sum(axis=0).squeeze()
        print test_profiles(fvph_prof, pfd_prof, 1e-6)
        plt.figure(dm)
        plt.plot(fvph_prof, label="FvsPh")
        plt.plot(pfd_prof, label="PFD")
        plt.title("DM: %g" % dm)
        plt.legend(loc='best')
    plt.show()
#!/usr/bin/env python
from __future__ import print_function
import sys, prepfold

if len(sys.argv) == 1:
    sys.stderr.write("""usage:  pfd_for_timing.py PFDFILES\n
    This script returns 'true' or 'false' if a .pfd file can be
    used for timing via get_TOAs.py or not.\n""")
    sys.exit(0)

for pfdfile in sys.argv[1:]:
    try:
        pfd = prepfold.pfd(pfdfile)
        if pfd.use_for_timing():
            print("%s: true" % pfdfile)
        else:
            print("%s: false" % pfdfile)
    except:
        sys.stderr.write("Error:  Can't check '%s'\n" % pfdfile)
Exemple #24
0
            for subs in a.split(','):
                if (subs.find("-") > 0):
                    lo, hi = subs.split("-")
                    kill.extend(range(int(lo), int(hi)+1))
                else:
                    kill.append(int(subs))
        if o in ("-i", "--kints"):
            for ints in a.split(','):
                if (ints.find("-") > 0):
                    lo, hi = ints.split("-")
                    kints.extend(range(int(lo), int(hi)+1))
                else:
                    kints.append(int(ints))

    # Read the prepfold output file and the binary profiles
    fold_pfd = pfd(sys.argv[-1])
    
    # Read key information from the bestprof file
    if fold_pfd.bestprof:
	fold = fold_pfd.bestprof
    else:
	sys.sterr.write("Warning:  Could not open the bestprof file!")
    timestep_sec = fold.T / numtoas
    timestep_day = timestep_sec / SECPERDAY
    fold.epoch = fold.epochi+fold.epochf

    # Over-ride the DM that was used during the fold
    if (DM!=0.0):
        fold_pfd.bestdm = DM
    if (fold_pfd.numchan==1 and DM==0.0 and events):
        fold_pfd.bestdm = 0.0
def check_candidates(header_id, versionnum, directory, dbname='common-copy'):
    """Check candidates in common DB.

        Inputs:
            header_id: header_id number for this beam, as returned by
                        spHeaderLoader/header.upload_header
            versionnum: A combination of the githash values from 
                        PRESTO and from the pipeline. 
            directory: The directory containing results from the pipeline.
            dbname: Name of database to connect to, or a database
                        connection to use (Defaut: 'common-copy').
        Output:
            match: Boolean value. True if all candidates and plots match what
                    is in the DB, False otherwise.
    """
    # find *.accelcands file    
    candlists = glob.glob(os.path.join(directory, "*.accelcands"))
                                                
    if len(candlists) != 1:
        raise PeriodicityCandidateError("Wrong number of candidate lists found (%d)!" % \
                                            len(candlists))

    # Get list of candidates from *.accelcands file
    candlist = accelcands.parse_candlist(candlists[0])
    minsigma = config.searching.to_prepfold_sigma
    foldedcands = [c for c in candlist if c.sigma > minsigma]
    foldedcands = foldedcands[:config.searching.max_cands_to_fold]
    foldedcands.sort(reverse=True) # Sort by descending sigma
    
    # Create temporary directory
    tempdir = tempfile.mkdtemp(suffix="_tmp", prefix="PALFA_pfds_")
    tarfns = glob.glob(os.path.join(directory, "*_pfd.tgz"))
    if len(tarfns) != 1:
        raise PeriodicityCandidateError("Wrong number (%d) of *_pfd.tgz " \
                                         "files found in %s" % (len(tarfns), \
                                            directory))
    
    tar = tarfile.open(tarfns[0])
    try:
        tar.extractall(path=tempdir)
    except IOError:
        if os.path.isdir(tempdir):
            shutil.rmtree(tempdir)
        raise PeriodicityCandidateError("Error while extracting pfd files " \
                                        "from tarball (%s)!" % tarfns[0])
    finally:
        tar.close()
    # Loop over candidates that were folded
    matches = []
    if isinstance(dbname, database.Database):
        db = dbname
    else:
        db = database.Database(dbname)
    for ii, c in enumerate(foldedcands):
        basefn = "%s_ACCEL_Cand_%d" % (c.accelfile.replace("ACCEL_", "Z"), \
                                    c.candnum)
        pfdfn = os.path.join(tempdir, basefn+".pfd")
        pngfn = os.path.join(directory, basefn+".pfd.png")
        
        pfd = prepfold.pfd(pfdfn)
        
        try:
            cand = PeridocityCandidate(header_id, ii+1, pfd, c.snr, \
                                    c.cpow, c.ipow, len(c.dmhits), \
                                    c.numharm, versionnum, c.sigma)
        except Exception:
            raise PeriodicityCandidateError("PeriodicityCandidate could not be " \
                                            "created (%s)!" % pfdfn)
        
        matches.append(cand.compare_with_db(dbname))
        if not matches[-1]:
            break
        
        # Get candidate's ID number from common DB
        db.execute("SELECT pdm_cand_id " \
                   "FROM pdm_candidates AS c " \
                   "LEFT JOIN versions AS v ON v.version_id = c.version_id " \
                   "WHERE c.header_id=%d AND c.cand_num=%d " \
                        "AND v.version_number='%s'" % \
                        (header_id, cand.cand_num, versionnum))
        # For cand.compare_with_db() to return True there must be a unique
        # entry in the common DB matching this candidate
        r = db.cursor.fetchone()
        cand_id = r[0]
        
        pfdplot = PeriodicityCandidatePFD(cand_id, pfdfn)
        matches.append(pfdplot.compare_with_db(dbname))
        if not matches[-1]:
            break

        pngplot = PeriodicityCandidatePNG(cand_id, pngfn)
        matches.append(pngplot.compare_with_db(dbname))
        if not matches[-1]:
            break
        
    if type(dbname) == types.StringType:
        db.close()
    shutil.rmtree(tempdir)
    return all(matches)
Exemple #26
0
    sumprof = Num.zeros(numbins, dtype='d')

    base_T = None
    base_BW = None
    orig_fctr = None
    Tprerfi = 0.0
    Tpostrfi = 0.0
    avg_S = 0.0

    # Step through the profiles and determine the offsets
    for pfdfilenm, killsubs, killints in zip(pfdfilenms, killsubss, killintss):

        print "\n  Processing '%s'..."%pfdfilenm

        # Read the fold data and de-disperse at the requested DM
        current_pfd = pfd(pfdfilenm)
        current_pfd.dedisperse(DM)
        T = current_pfd.T
        Tprerfi += T
        BW = current_pfd.nsub*current_pfd.subdeltafreq
        fctr = current_pfd.lofreq + 0.5*BW

        # If there are subbands to kill, kill em'
        if killsubs is not None:
            print "    killing subbands:  ", killsubs
            current_pfd.kill_subbands(killsubs)
            BW *= (current_pfd.nsub-len(killsubs))/float(current_pfd.nsub)
        # If there are intervals to kill, kill em'
        if killints is not None:
            print "    killing intervals: ", killints
            current_pfd.kill_intervals(killints)
Exemple #27
0
def calc_features_from_pfd(pfd_filepath):

    pfd_data = prepfold.pfd(str(pfd_filepath))

    if pfd_filepath.parent.name == 'positive':
        label = 1
    elif pfd_filepath.parent.name == 'negative':
        label = 0
    else:
        label = -1
        # raise RuntimeError('unable to decide the label of pfd file: {}'.format(
        #     str(pfd_filepath)))

    pfd_data.dedisperse()

    #### As done in: prepfold.pfd.plot_sumprofs
    profile = pfd_data.sumprof
    profile = normalise_1d(profile)
    ####

    profile_mean = np.mean(profile)
    profile_std_dev = np.std(profile)
    profile_skewness = scipy.stats.skew(profile)
    profile_excess_kurtosis = scipy.stats.kurtosis(profile)

    profiles_sum_axis0 = pfd_data.profs.sum(0)

    #### As done in: prepfold.pfd.plot_chi2_vs_DM
    loDM = 0
    hiDM = pfd_data.numdms
    N = pfd_data.numdms
    profs = profiles_sum_axis0.copy()  # = pfd_data.profs.sum(0)
    DMs = psr_utils.span(loDM, hiDM, N)
    chis = np.zeros(N, dtype='f')
    subdelays_bins = pfd_data.subdelays_bins.copy()
    for ii, DM in enumerate(DMs):
        subdelays = psr_utils.delay_from_DM(DM, pfd_data.barysubfreqs)
        hifreqdelay = subdelays[-1]
        subdelays = subdelays - hifreqdelay
        delaybins = subdelays * pfd_data.binspersec - subdelays_bins
        new_subdelays_bins = np.floor(delaybins + 0.5)
        for jj in range(pfd_data.nsub):
            profs[jj] = psr_utils.rotate(profs[jj],
                                         int(new_subdelays_bins[jj]))
        subdelays_bins += new_subdelays_bins
        sumprof = profs.sum(0)
        chis[ii] = pfd_data.calc_redchi2(prof=sumprof, avg=pfd_data.avgprof)
    ####

    best_dm = pfd_data.bestdm

    # crop_radius = 100
    # best_dm_index = np.searchsorted(DMs, best_dm)  # Not accurate, but close.
    # bloated_chis = np.insert(chis, N, np.full(crop_radius, chis[-1]))
    # bloated_chis = np.insert(bloated_chis, 0, np.full(crop_radius, chis[0]))
    # cropped_chis = bloated_chis[ best_dm_index : best_dm_index+2*crop_radius ]
    # chis = cropped_chis

    chis_mean = np.mean(chis)
    chis_std_dev = np.std(chis)
    chis_skewness = scipy.stats.skew(chis)
    chis_excess_kurtosis = scipy.stats.kurtosis(chis)

    #### As done in: prepfold.pfd.plot_intervals
    intervals = pfd_data.profs.sum(1)
    intervals = normalise_2d_rowwise(intervals)
    ####

    #### As done in: prepfold.pfd.plot_subbands
    subbands = profiles_sum_axis0.copy()  # = pfd_data.profs.sum(0)
    subbands = normalise_2d_rowwise(subbands)
    ####

    return (label, profile_mean, profile_std_dev, profile_skewness,
            profile_excess_kurtosis, chis_mean, chis_std_dev, chis_skewness,
            chis_excess_kurtosis, best_dm, profile, intervals, subbands, chis)
def upload_candidates(header_id, versionnum, directory, verbose=False, \
                        dry_run=False, *args, **kwargs):
    """Upload candidates to common DB.

        Inputs:
            header_id: header_id number for this beam, as returned by
                        spHeaderLoader/header.upload_header
            versionnum: A combination of the githash values from 
                        PRESTO and from the pipeline. 
            directory: The directory containing results from the pipeline.
            verbose: An optional boolean value that determines if information 
                        is printed to stdout.
            dry_run: An optional boolean value. If True no connection to DB
                        will be made and DB command will not be executed.
                        (If verbose is True DB command will be printed 
                        to stdout.)

            *** NOTE: Additional arguments are passed to the uploader function.

        Ouputs:
            cand_ids: List of candidate IDs corresponding to these candidates
                        in the common DB. (Or a list of None values if
                        dry_run is True).
    """
    # find *.accelcands file    
    candlists = glob.glob(os.path.join(directory, "*.accelcands"))
                                                
    if len(candlists) != 1:
        raise PeriodicityCandidateError("Wrong number of candidate lists found (%d)!" % \
                                            len(candlists))

    # Get list of candidates from *.accelcands file
    candlist = accelcands.parse_candlist(candlists[0])
    minsigma = config.searching.to_prepfold_sigma
    foldedcands = [c for c in candlist if c.sigma > minsigma]
    foldedcands = foldedcands[:config.searching.max_cands_to_fold]
    foldedcands.sort(reverse=True) # Sort by descending sigma
    
    # Create temporary directory
    tempdir = tempfile.mkdtemp(suffix="_tmp", prefix="PALFA_pfds_")
    tarfns = glob.glob(os.path.join(directory, "*_pfd.tgz"))
    if len(tarfns) != 1:
        raise PeriodicityCandidateError("Wrong number (%d) of *_pfd.tgz " \
                                         "files found in %s" % (len(tarfns), \
                                            directory))
    tar = tarfile.open(tarfns[0])
    tar.extractall(path=tempdir)
    tar.close()
    # Loop over candidates that were folded
    results = []
    for ii, c in enumerate(foldedcands):
        basefn = "%s_ACCEL_Cand_%d" % (c.accelfile.replace("ACCEL_", "Z"), \
                                    c.candnum)
        pfdfn = os.path.join(tempdir, basefn+".pfd")
        pngfn = os.path.join(directory, basefn+".pfd.png")
        
        pfd = prepfold.pfd(pfdfn)
        
        try:
            cand = PeridocityCandidate(header_id, ii+1, pfd, c.snr, \
                                    c.cpow, c.ipow, len(c.dmhits), \
                                    c.numharm, versionnum, c.sigma)
        except Exception:
            raise PeriodicityCandidateError("PeriodicityCandidate could not be " \
                                            "created (%s)!" % pfdfn)

        if dry_run:
            cand.get_upload_sproc_call()
            if verbose:
                print cand
            results.append(None)
            cand_id = -1
        else:
            cand_id = cand.upload(*args, **kwargs)
        
        pfdplot = PeriodicityCandidatePFD(cand_id, pfdfn)
        pngplot = PeriodicityCandidatePNG(cand_id, pngfn)
        if dry_run:
            pfdplot.get_upload_sproc_call()
            pngplot.get_upload_sproc_call()
            if verbose:
                print pfdplot
                print pngplot
        else:
            pfdplot.upload(*args, **kwargs)
            pngplot.upload(*args, **kwargs)
        
    shutil.rmtree(tempdir)
    return results
Exemple #29
0
def get_candidates(versionnum, directory, header_id=None, timestamp_mjd=None, inst_cache=None):
    """Upload candidates to common DB.

        Inputs:
            versionnum: A combination of the githash values from 
                        PRESTO and from the pipeline. 
            directory: The directory containing results from the pipeline.
            header_id: header_id number for this beam, as returned by
                        spHeaderLoader/header.upload_header (default=None)
            timestamp_mjd: mjd timstamp for this observation (default=None).
            inst_cache: ratings2 RatingInstanceIDCache instance.

        Ouput:
            cands: List of candidates.
            tempdir: Path of temporary directory that PFDs have been untarred,
                     returned so that it can be deleted after successful PFD upload.
    """
    # find *.accelcands file    
    candlists = glob.glob(os.path.join(directory, "*.accelcands"))
    ffa_candlists = glob.glob(os.path.join(directory, "*.ffacands"))
                                                
    if len(candlists) != 1 or len(ffa_candlists) != 1:
        raise PeriodicityCandidateError("Wrong number of candidate lists found accel" \
                                        "candlists: (%d) and ffa candlists: (%d)" % \
                                            (len(candlists)), len(ffa_candlists))

    # Get list of candidates from *.accelcands file
    candlist = accelcands.parse_candlist(candlists[0])
    ffa_candlist = ffacands.parse_candlist(ffa_candlists[0])
    # find the search_params.txt file
    paramfn = os.path.join(directory, 'search_params.txt')
    if os.path.exists(paramfn):
        tmp, params = {}, {}
        execfile(paramfn, tmp, params)
    else:
        raise PeriodicityCandidateError("Search parameter file doesn't exist!")
    minsigma = params['to_prepfold_sigma']
    foldedcands = [c for c in candlist \
                    if c.sigma > params['to_prepfold_sigma']]
    foldedcands = foldedcands[:params['max_accel_cands_to_fold']]
    foldedcands.sort(reverse=True) # Sort by descending sigma

    ffa_foldedcands = [c for c in ffa_candlist \
                         if c.snr > params['to_prepfold_sigma']] 
    #                if c.snr > params['ffa_snr']]# need something for ffa
    ffa_foldedcands = ffa_foldedcands[:params['max_ffa_cands_to_fold']]
    ffa_foldedcands.sort(reverse=True) # Sort by descending snr
    # Open attribute file
    attrib_fn = os.path.join(directory, 'candidate_attributes.txt')
    attribs = np.loadtxt(attrib_fn,dtype='S')
        
    # Create temporary directory
    N = 6
    prefix = "/localscratch/PALFA_pfds_"
    suffix = "_tmp/"
    String = ''.join(random.choice(string.ascii_uppercase + string.digits + string.ascii_lowercase) for _ in range(N))
    tempdir = prefix+String+suffix
    os.makedirs(tempdir)
    #tempdir = tempname

    if foldedcands or ffa_foldedcands:

        pfd_tarfns = glob.glob(os.path.join(directory, "*_pfd.tgz"))

        if len(pfd_tarfns) != 1:
            raise PeriodicityCandidateError("Wrong number (%d) of *_pfd.tgz " \
                                             "files found in %s" % (len(pfd_tarfns), \
                                                directory))

        rating_tarfns = glob.glob(os.path.join(directory, "*_pfd_rat.tgz"))
        if len(rating_tarfns) != 1:
            raise PeriodicityCandidateError("Wrong number (%d) of *_pfd_rat.tgz " \
                                             "files found in %s" % (len(rating_tarfns), \
                                                directory))

        mjd = int(timestamp_mjd)
        remote_pfd_base = os.path.join(config.upload.pfd_ftp_dir,str(mjd)) 
        remote_pfd_dir = os.path.join(remote_pfd_base,\
                                      os.path.basename(pfd_tarfns[0]).rstrip('_pfd.tgz'))
        pfd_tarball = PFDTarball(pfd_tarfns[0],remote_pfd_base,tempdir)
        pfd_tempdir, pfd_list = pfd_tarball.extract()
        
        # extract ratings tarball 
        tar = tarfile.open(rating_tarfns[0])
        try:
            tar.extractall(path=tempdir)
        except IOError:
            if os.path.isdir(tempdir):
                shutil.rmtree(tempdir)
            raise PeriodicityCandidateError("Error while extracting pfd files " \
                                            "from tarball (%s)!" % tarfn)
        finally:
            tar.close()

    # Loop over candidates that were folded
    cands = []
    cands.append(pfd_tarball)
    counter = 0
    for ii, c in enumerate(foldedcands):
        basefn = "%s_ACCEL_Cand_%d" % (c.accelfile.replace("ACCEL_", "Z"), \
                                    c.candnum)
        pfdfn = os.path.join(pfd_tempdir, basefn+".pfd")
        pngfn = os.path.join(directory, basefn+".pfd.png")
        ratfn = os.path.join(tempdir, basefn+".pfd.rat")

        pfd = prepfold.pfd(pfdfn)
        cand_attribs = dict(attribs[attribs[:,0] == basefn+".pfd"][:,1:])
        
        try:
            cand = PeriodicityCandidate(ii+1, pfd, c.snr, \
                                    c.cpow, c.ipow, len(c.dmhits), \
                                    c.numharm, versionnum, c.sigma, \
                                    c.period, c.dm, cand_attribs, c.search_type, \
                                    header_id=header_id)
        except Exception:
            raise PeriodicityCandidateError("PeriodicityCandidate could not be " \
                                            "created (%s)!" % pfdfn)
        pfd_size = dict(pfd_list)[pfdfn]
        cand.add_dependent(PeriodicityCandidatePFD(pfdfn, pfd_size, remote_pfd_dir=remote_pfd_dir))
        cand.add_dependent(PeriodicityCandidatePNG(pngfn))

        ratvals = ratings2.rating_value.read_file(ratfn)
        cand.add_dependent(PeriodicityCandidateRating(ratvals,inst_cache=inst_cache))
        cands.append(cand)
        counter +=1
        
    for ii, c in enumerate(ffa_foldedcands):
        basefn = "%s%.2fms_Cand" % (c.ffafile.replace("_cands.ffa","_ffa_"), c.period*1000)
        pfdfn = os.path.join(pfd_tempdir, basefn+".pfd")
        pngfn = os.path.join(directory, basefn+".pfd.png")
        ratfn = os.path.join(tempdir, basefn+".pfd.rat")

        pfd = prepfold.pfd(pfdfn)
        cand_attribs = dict(attribs[attribs[:,0] == basefn+".pfd"][:,1:])
        
        try:
            cand = PeriodicityCandidate(counter+ii+1, pfd, c.snr, \
                                    c.cpow, c.ipow, len(c.dmhits), \
                                    c.numharm, versionnum, c.sigma, \
                                    c.period, c.dm, cand_attribs, c.search_type, \
                                    header_id=header_id)
        except Exception:
            raise PeriodicityCandidateError("PeriodicityCandidate could not be " \
                                            "created (%s)!" % pfdfn)
        pfd_size = dict(pfd_list)[pfdfn]
        cand.add_dependent(PeriodicityCandidatePFD(pfdfn, pfd_size, remote_pfd_dir=remote_pfd_dir))
        cand.add_dependent(PeriodicityCandidatePNG(pngfn))

        ratvals = ratings2.rating_value.read_file(ratfn)
        cand.add_dependent(PeriodicityCandidateRating(ratvals,inst_cache=inst_cache))
        cands.append(cand)
        
    #shutil.rmtree(tempdir)
    return cands,tempdir
Exemple #30
0
import matplotlib.pyplot as plt
from scipy.interpolate import  RectBivariateSpline as interp
from scipy import ndimage, array, ogrid, mgrid
import sys,os




if __name__ == '__main__':
    f1 = sys.argv[1]
    f2 = sys.argv[2]
    if not (f1.endswith('pfd')):
        print 'file name %s not end with pfd ' % (f1)
        sys.exit(1)

    pfdfile = pfd(f1)
    pfdfile.dedisperse()
    profs = pfdfile.profs
    pshape = profs.shape
    x, y, z = profs.shape
    data = profs.reshape((-1,1))
    del pfdfile

    mean = np.mean(data)
    var = np.std(data)
    data = (data-mean)/var
    profs = data.reshape(pshape)
    X, Y, Z = ogrid[0:1:x,0:1:y,0:1:z]
    coords = array([X, Y, Z])
    coeffs = ndimage.spline_filter(profs )
    X, Y, Z = mgrid[0:1:8j,0:1:8j,0:1:8j]
Exemple #31
0
def main():
    pfdfn = sys.argv[1]

    pfd = prepfold.pfd(pfdfn)

    cand = candidate.Candidate(pfd.topo_p1, pfd.bary_p1, pfd.bestdm, \
                        psr_utils.ra_to_rad(pfd.rastr)*psr_utils.RADTODEG, \
                        psr_utils.dec_to_rad(pfd.decstr)*psr_utils.RADTODEG, \
                        pfdfn)

    print "Loaded %s" % cand.pfdfn
    print "    Best topo period (s): %f" % cand.topo_period
    print "    Best bary period (s): %f" % cand.bary_period
    print "    Best DM (cm^-3/pc): %f" % cand.dm
    print "    RA (J2000 - deg): %f" % cand.raj_deg
    print "    Dec (J2000 - deg): %f" % cand.decj_deg

    print "-"*10
    pprint.pprint(cand.__dict__)
    print "-"*10

    intstats.add_data(cand)

    print "Added subint stats to cand"
    pprint.pprint(cand.__dict__)
    print "-"*10

    print "Subint stats object:"
    pprint.pprint(cand.subint_stats.__dict__)
    print "-"*10

    print "Sub-int Stats:"
    print "On-fraction (area): %g" % cand.subint_stats.get_on_frac()
    print "On-fraction (peak): %g" % cand.subint_stats.get_peak_on_frac()
    print "Area SNR stddev: %g" % cand.subint_stats.get_snr_stddev()
    print "Peak SNR stddev: %g" % cand.subint_stats.get_peak_snr_stddev()
    print "Avg correlation coefficient: %g" % cand.subint_stats.get_avg_corrcoef()
    
    mgauss = cand.multigaussfit
    tvph = cand.time_vs_phase
    onpulse_region = mgauss.get_onpulse_region(tvph.nbin)
    offpulse_region = np.bitwise_not(onpulse_region)
    m = np.ma.masked_array(onpulse_region, mask=offpulse_region)
    onpulse_ranges = np.ma.notmasked_contiguous(m)
    print onpulse_ranges
    prof = utils.get_scaled_profile(cand.profile, cand.pfd.varprof)
    imax = plt.axes([0.1,0.1,0.5,0.7])
    plt.imshow(scale2d(tvph.data), interpolation='nearest', \
            aspect='auto', origin='lower', cmap=matplotlib.cm.gist_yarg)
    for opr in onpulse_ranges:
        onpulse_bin = (opr.start, opr.stop)
        if onpulse_bin[0] < onpulse_bin[1]:
            plt.axvspan(onpulse_bin[0], onpulse_bin[1]+1, fc='g', alpha=0.2, lw=0)
        else:
            plt.axvspan(onpulse_bin[0], tvph.nbin, fc='g', alpha=0.2, lw=0)
            plt.axvspan(-0.5, onpulse_bin[1]+1, fc='g', alpha=0.2, lw=0)
        

    plt.axes([0.1,0.8,0.5,0.15], sharex=imax)
    plt.plot(prof, 'k-', label='Profile')
    plt.plot(mgauss.make_gaussians(len(prof)), 'r--', label='Fit')
    for ii, opr in enumerate(onpulse_ranges):
        onpulse_bin = (opr.start, opr.stop)
        if ii == 0:
            lbl = 'On-pulse'
        else:
            lbl = '_nolabel_'
        if onpulse_bin[0] < onpulse_bin[1]:
            plt.axvspan(onpulse_bin[0], onpulse_bin[1]+1, fc='g', alpha=0.2, lw=0, label=lbl)
        else:
            plt.axvspan(onpulse_bin[0], tvph.nbin, fc='g', alpha=0.2, lw=0, label=lbl)
            plt.axvspan(-0.5, onpulse_bin[1]+1, fc='g', alpha=0.2, lw=0, label='_nolabel_')
    plt.legend(loc='best', prop=dict(size='xx-small'))
    
    plt.axes([0.6,0.1,0.15,0.7], sharey=imax)
    snrs = cand.subint_stats.snrs
    plt.plot(snrs, np.arange(len(snrs)), 'mo', label='SNR (area)')
    plt.axvline(5.0, c='m', ls='--', lw=2)
    peaksnrs = cand.subint_stats.peak_snrs
    plt.plot(peaksnrs, np.arange(len(peaksnrs)), 'cD', label='SNR (peak)')
    plt.axvline(3.0, c='c', ls='--', lw=2)
    plt.legend(loc='best', prop=dict(size='xx-small'))
   
    plt.axes([0.75,0.1,0.15,0.7], sharey=imax)
    corrcoefs = cand.subint_stats.corr_coefs
    plt.plot(corrcoefs, np.arange(len(corrcoefs)), 'k.')
    imax.set_xlim(-0.5,tvph.nbin-0.5)
    imax.set_ylim(-0.5,tvph.nsubint-0.5)
    plt.show()
#!/usr/bin/env python
from __future__ import print_function
import sys, prepfold

if len(sys.argv) == 1:
    sys.stderr.write("""usage:  pfd_for_timing.py PFDFILES\n
    This script returns 'true' or 'false' if a .pfd file can be
    used for timing via get_TOAs.py or not.\n""")
    sys.exit(0)

for pfdfile in sys.argv[1:]:
    try:
        pfd = prepfold.pfd(pfdfile)
        if pfd.use_for_timing():
            print("%s: true"%pfdfile)
        else:
            print("%s: false"%pfdfile)
    except:
        sys.stderr.write("Error:  Can't check '%s'\n"%pfdfile)
def search_job(job):
    """Search the observation defined in the obs_info
        instance 'job'.
    """
    # Use whatever .zaplist is found in the current directory
    zaplist = glob.glob("*.zaplist")[0]
    print "Using %s as zaplist" % zaplist
    if config.searching.use_subbands and config.searching.fold_rawdata:
        # make a directory to keep subbands so they can be used to fold later
        try:
            os.makedirs(os.path.join(job.workdir, 'subbands'))
        except: pass

    # rfifind the data file
    cmd = "rfifind %s -time %.17g -o %s %s" % \
          (config.searching.datatype_flag, config.searching.rfifind_chunk_time, job.basefilenm,
           job.filenmstr)
    job.rfifind_time += timed_execute(cmd, stdout="%s_rfifind.out" % job.basefilenm)
    maskfilenm = job.basefilenm + "_rfifind.mask"
    # Find the fraction that was suggested to be masked
    # Note:  Should we stop processing if the fraction is
    #        above some large value?  Maybe 30%?
    job.masked_fraction = find_masked_fraction(job)
    
    # Iterate over the stages of the overall de-dispersion plan
    dmstrs = []
    for ddplan in job.ddplans:

        # Iterate over the individual passes through the data file
        for passnum in range(ddplan.numpasses):
            subbasenm = "%s_DM%s"%(job.basefilenm, ddplan.subdmlist[passnum])

            if config.searching.use_subbands:
                try:
                    os.makedirs(os.path.join(job.tempdir, 'subbands'))
                except: pass
    
                # Create a set of subbands
                cmd = "prepsubband %s -sub -subdm %s -downsamp %d -nsub %d -mask %s " \
                        "-o %s/subbands/%s %s" % \
                        (config.searching.datatype_flag, ddplan.subdmlist[passnum], ddplan.sub_downsamp,
                        ddplan.numsub, maskfilenm, job.tempdir, job.basefilenm,
                        job.filenmstr)
                job.subbanding_time += timed_execute(cmd, stdout="%s.subout" % subbasenm)
            
                # Now de-disperse using the subbands
                cmd = "prepsubband -lodm %.2f -dmstep %.2f -numdms %d -downsamp %d " \
                        "-nsub %d -numout %d -o %s/%s %s/subbands/%s.sub[0-9]*" % \
                        (ddplan.lodm+passnum*ddplan.sub_dmstep, ddplan.dmstep,
                        ddplan.dmsperpass, ddplan.dd_downsamp, ddplan.numsub,
                        psr_utils.choose_N(job.orig_N/ddplan.downsamp),
                        job.tempdir, job.basefilenm, job.tempdir, subbasenm)
                job.dedispersing_time += timed_execute(cmd, stdout="%s.prepout" % subbasenm)
            
                if config.searching.use_zerodm_sp or config.searching.use_zerodm_accel:
		    cmd = "prepsubband -lodm %.2f -dmstep %.2f -numdms %d -downsamp %d " \
			    "-nsub %d -numout %d -zerodm -o %s/%s_zerodm %s/subbands/%s.sub[0-9]*" % \
			    (ddplan.lodm+passnum*ddplan.sub_dmstep, ddplan.dmstep,
			    ddplan.dmsperpass, ddplan.dd_downsamp, ddplan.numsub,
			    psr_utils.choose_N(job.orig_N/ddplan.downsamp),
			    job.tempdir, job.basefilenm, job.tempdir, subbasenm)
		    job.dedispersing_time += timed_execute(cmd, stdout="%s.prepout" % subbasenm)

            else:  # Not using subbands
                cmd = "prepsubband -mask %s -lodm %.2f -dmstep %.2f -numdms %d -downsamp %d " \
                        "-numout %d -nsub %d -o %s/%s %s"%\
                        (maskfilenm, ddplan.lodm+passnum*ddplan.sub_dmstep, ddplan.dmstep,
                        ddplan.dmsperpass, ddplan.dd_downsamp*ddplan.sub_downsamp, 
                        psr_utils.choose_N(job.orig_N/ddplan.downsamp), ddplan.numsub, 
                        job.tempdir, job.basefilenm, job.filenmstr)
                job.dedispersing_time += timed_execute(cmd)
            
            # Iterate over all the new DMs
            for dmstr in ddplan.dmlist[passnum]:
                dmstrs.append(dmstr)
                basenm = os.path.join(job.tempdir, job.basefilenm+"_DM"+dmstr)
                basenm_zerodm = os.path.join(job.tempdir, job.basefilenm+"_zerodm_DM"+dmstr)
                datnm = basenm+".dat"
                datnm_zerodm = basenm_zerodm+".dat"
                fftnm = basenm+".fft"
                infnm = basenm+".inf"

                # Do the single-pulse search
                cmd = "single_pulse_search.py -p -m %f -t %f %s"%\
                      (config.searching.singlepulse_maxwidth, \
                       config.searching.singlepulse_threshold, datnm)
                job.singlepulse_time += timed_execute(cmd)
                try:
                    shutil.move(basenm+".singlepulse", job.workdir)
                except: pass

                if config.searching.use_zerodm_sp:
		    cmd = "single_pulse_search.py -p -m %f -t %f %s"%\
			  (config.searching.singlepulse_maxwidth, \
			   config.searching.singlepulse_threshold, datnm_zerodm)
		    job.singlepulse_time += timed_execute(cmd)
		    try:
			shutil.move(basenm_zerodm+".singlepulse", job.workdir)
		    except: pass

                # FFT, zap, and de-redden
                cmd = "realfft %s"%datnm
                job.FFT_time += timed_execute(cmd)
                cmd = "zapbirds -zap -zapfile %s -baryv %.6g %s"%\
                      (zaplist, job.baryv, fftnm)
                job.FFT_time += timed_execute(cmd)
                cmd = "rednoise %s"%fftnm
                job.FFT_time += timed_execute(cmd)
                try:
                    os.rename(basenm+"_red.fft", fftnm)
                except: pass
                
                # Do the low-acceleration search
                cmd = "accelsearch -harmpolish -numharm %d -sigma %f " \
                        "-zmax %d -flo %f %s"%\
                        (config.searching.lo_accel_numharm, \
                         config.searching.lo_accel_sigma, \
                         config.searching.lo_accel_zmax, \
                         config.searching.lo_accel_flo, fftnm)
                job.lo_accelsearch_time += timed_execute(cmd)
                try:
                    os.remove(basenm+"_ACCEL_%d.txtcand" % config.searching.lo_accel_zmax)
                except: pass
                try:  # This prevents errors if there are no cand files to copy
                    shutil.move(basenm+"_ACCEL_%d.cand" % config.searching.lo_accel_zmax, \
                                    job.workdir)
                    shutil.move(basenm+"_ACCEL_%d" % config.searching.lo_accel_zmax, \
                                    job.workdir)
                except: pass
        
                # Do the high-acceleration search
                cmd = "accelsearch -harmpolish -numharm %d -sigma %f " \
                        "-zmax %d -flo %f %s"%\
                        (config.searching.hi_accel_numharm, \
                         config.searching.hi_accel_sigma, \
                         config.searching.hi_accel_zmax, \
                         config.searching.hi_accel_flo, fftnm)
                job.hi_accelsearch_time += timed_execute(cmd)
                try:
                    os.remove(basenm+"_ACCEL_%d.txtcand" % config.searching.hi_accel_zmax)
                except: pass
                try:  # This prevents errors if there are no cand files to copy
                    shutil.move(basenm+"_ACCEL_%d.cand" % config.searching.hi_accel_zmax, \
                                    job.workdir)
                    shutil.move(basenm+"_ACCEL_%d" % config.searching.hi_accel_zmax, \
                                    job.workdir)
                except: pass

                # Move the .inf files
                try:
                    shutil.move(infnm, job.workdir)
                except: pass
                # Remove the .dat and .fft files
                try:
                    os.remove(datnm)
                except: pass
                try:
                    os.remove(fftnm)
                except: pass

            if config.searching.use_subbands:
                if config.searching.fold_rawdata:
                    # Subband files are no longer needed
                    shutil.rmtree(os.path.join(job.tempdir, 'subbands'))
                else:
                    # Move subbands to workdir
                    for sub in glob.glob(os.path.join(job.tempdir, 'subbands', "*")):
                        shutil.move(sub, os.path.join(job.workdir, 'subbands'))

    # Make the single-pulse plots
    basedmb = job.basefilenm+"_DM"
    basedmb_zerodm = job.basefilenm+"_zerodm_DM"
    basedme = ".singlepulse "
    # The following will make plots for DM ranges:
    #    0-110, 100-310, 300-1000+
    dmglobs = [basedmb+"[0-9].[0-9][0-9]"+basedme +
               basedmb+"[0-9][0-9].[0-9][0-9]"+basedme +
               basedmb+"10[0-9].[0-9][0-9]"+basedme,
               basedmb+"[12][0-9][0-9].[0-9][0-9]"+basedme +
               basedmb+"30[0-9].[0-9][0-9]"+basedme,
               basedmb+"[3-9][0-9][0-9].[0-9][0-9]"+basedme +
               basedmb+"1[0-9][0-9][0-9].[0-9][0-9]"+basedme]
    dmrangestrs = ["0-110", "100-310", "300-1000+"]
    psname = job.basefilenm+"_singlepulse.ps"
    psname_zerodm = job.basefilenm+"_zerodm_singlepulse.ps"

    if config.searching.use_zerodm_sp:
	dmglobs.extend([basedmb_zerodm+"[0-9].[0-9][0-9]"+basedme +
		   basedmb_zerodm+"[0-9][0-9].[0-9][0-9]"+basedme +
		   basedmb_zerodm+"10[0-9].[0-9][0-9]"+basedme,
		   basedmb_zerodm+"[12][0-9][0-9].[0-9][0-9]"+basedme +
		   basedmb_zerodm+"30[0-9].[0-9][0-9]"+basedme,
		   basedmb_zerodm+"[3-9][0-9][0-9].[0-9][0-9]"+basedme +
		   basedmb_zerodm+"1[0-9][0-9][0-9].[0-9][0-9]"+basedme])
	dmrangestrs.extend(["0-110_zerodm", "100-310_zerodm", "300-1000+_zerodm"])

    for dmglob, dmrangestr in zip(dmglobs, dmrangestrs):
        dmfiles = []
        for dmg in dmglob.split():
            dmfiles += glob.glob(dmg.strip())
        # Check that there are matching files and they are not all empty
        if dmfiles and sum([os.path.getsize(f) for f in dmfiles]):
            cmd = 'single_pulse_search.py -t %f -g "%s"' % \
                (config.searching.singlepulse_plot_SNR, dmglob)
            job.singlepulse_time += timed_execute(cmd)
            if dmrangestr.endswith("zerodm"):
                os.rename(psname_zerodm,
                        job.basefilenm+"_DMs%s_singlepulse.ps" % dmrangestr)
            else:
                os.rename(psname,
                        job.basefilenm+"_DMs%s_singlepulse.ps" % dmrangestr)

    # Sift through the candidates to choose the best to fold
    job.sifting_time = time.time()

    lo_accel_cands = sifting.read_candidates(glob.glob("*ACCEL_%d" % config.searching.lo_accel_zmax))
    if len(lo_accel_cands):
        lo_accel_cands = sifting.remove_duplicate_candidates(lo_accel_cands)
    if len(lo_accel_cands):
        lo_accel_cands = sifting.remove_DM_problems(lo_accel_cands, config.searching.numhits_to_fold,
                                                    dmstrs, config.searching.low_DM_cutoff)

    hi_accel_cands = sifting.read_candidates(glob.glob("*ACCEL_%d" % config.searching.hi_accel_zmax))
    if len(hi_accel_cands):
        hi_accel_cands = sifting.remove_duplicate_candidates(hi_accel_cands)
    if len(hi_accel_cands):
        hi_accel_cands = sifting.remove_DM_problems(hi_accel_cands, config.searching.numhits_to_fold,
                                                    dmstrs, config.searching.low_DM_cutoff)

    all_accel_cands = lo_accel_cands + hi_accel_cands
    if len(all_accel_cands):
        all_accel_cands = sifting.remove_harmonics(all_accel_cands)
        # Note:  the candidates will be sorted in _sigma_ order, not _SNR_!
        all_accel_cands.sort(sifting.cmp_sigma)
        print "Sending candlist to stdout before writing to file"
        sifting.write_candlist(all_accel_cands)
        sys.stdout.flush()
        sifting.write_candlist(all_accel_cands, job.basefilenm+".accelcands")
        # Make sifting summary plots
        all_accel_cands.plot_goodbad()
        plt.title("%s Rejected Cands" % job.basefilenm)
        plt.savefig(job.basefilenm+".accelcands.rejects.png")
        all_accel_cands.plot_summary()
        plt.title("%s Periodicity Summary" % job.basefilenm)
        plt.savefig(job.basefilenm+".accelcands.summary.png")
        
        # Write out sifting candidate summary
        all_accel_cands.print_cand_summary(job.basefilenm+".accelcands.summary")
        # Write out sifting comprehensive report of bad candidates
        all_accel_cands.write_cand_report(job.basefilenm+".accelcands.report")
        timed_execute("gzip --best %s" % job.basefilenm+".accelcands.report")

        # Moving of results to resultsdir now happens in clean_up(...)
        # shutil.copy(job.basefilenm+".accelcands", job.outputdir)

    job.sifting_time = time.time() - job.sifting_time

    #####
    # Print some info useful for debugging
    print "Contents of workdir (%s) before folding: " % job.workdir
    for fn in os.listdir(job.workdir):
        print "    %s" % fn
    print "Contents of resultsdir (%s) before folding: " % job.outputdir
    for fn in os.listdir(job.outputdir):
        print "    %s" % fn
    print "Contents of job.tempdir (%s) before folding: " % job.tempdir
    for fn in os.listdir(job.tempdir):
        print "    %s" % fn
    sys.stdout.flush()
    #####

    # Fold the best candidates
    cands_folded = 0
    for cand in all_accel_cands:
        print "At cand %s" % str(cand)
        if cands_folded == config.searching.max_cands_to_fold:
            break
        if cand.sigma >= config.searching.to_prepfold_sigma:
            print "...folding"
            job.folding_time += timed_execute(get_folding_command(cand, job))
            cands_folded += 1
    job.num_cands_folded = cands_folded
    
    # Rate candidates
    timed_execute("rate_pfds.py --redirect-warnings --include-all -x pulse_width *.pfd")
    sys.stdout.flush()

    # Calculate some candidate attributes from pfds
    attrib_file = open('candidate_attributes.txt','w')
    for pfdfn in glob.glob("*.pfd"):
        attribs = {}
        pfd = prepfold.pfd(pfdfn)
        red_chi2 = pfd.bestprof.chi_sqr
        dof = pfd.proflen - 1
        attribs['prepfold_sigma'] = \
                -scipy.stats.norm.ppf(scipy.stats.chi2.sf(red_chi2*dof, dof))
	off_red_chi2 = pfd.estimate_offsignal_redchi2()
	new_red_chi2 = red_chi2 / off_red_chi2
        # prepfold sigma rescaled to deal with chi-squared suppression
        # a problem when strong rfi is present
        attribs['rescaled_prepfold_sigma'] = \
                -scipy.stats.norm.ppf(scipy.stats.chi2.sf(new_red_chi2*dof, dof))
        for key in attribs:
            attrib_file.write("%s\t%s\t%.3f\n" % (pfdfn, key, attribs[key]))
    attrib_file.close()

    # Print some info useful for debugging
    print "Contents of workdir (%s) after folding: " % job.workdir
    for fn in os.listdir(job.workdir):
        print "    %s" % fn
    print "Contents of resultsdir (%s) after folding: " % job.outputdir
    for fn in os.listdir(job.outputdir):
        print "    %s" % fn
    print "Contents of job.tempdir (%s) after folding: " % job.tempdir
    for fn in os.listdir(job.tempdir):
        print "    %s" % fn
    sys.stdout.flush()
    #####
    
    # Now step through the .ps files and convert them to .png and gzip them

    psfiles = glob.glob("*.ps")
    for psfile in psfiles:
        # The '[0]' appeneded to the end of psfile is to convert only the 1st page
        timed_execute("convert -quality 90 %s -background white -flatten -rotate 90 +matte %s" % \
                            (psfile+"[0]", psfile[:-3]+".png"))
        timed_execute("gzip "+psfile)
    
    # Print some info useful for debugging
    print "Contents of workdir (%s) after conversion: " % job.workdir
    for fn in os.listdir(job.workdir):
        print "    %s" % fn
    print "Contents of resultsdir (%s) after conversion: " % job.outputdir
    for fn in os.listdir(job.outputdir):
        print "    %s" % fn
    print "Contents of job.tempdir (%s) after conversion: " % job.tempdir
    for fn in os.listdir(job.tempdir):
        print "    %s" % fn
    sys.stdout.flush()
def main():
    pfdfn = sys.argv[1]

    pfd = prepfold.pfd(pfdfn)

    cand = candidate.Candidate(pfd.topo_p1, pfd.bary_p1, pfd.bestdm, \
                        psr_utils.ra_to_rad(pfd.rastr)*psr_utils.RADTODEG, \
                        psr_utils.dec_to_rad(pfd.decstr)*psr_utils.RADTODEG, \
                        pfdfn)

    print "Loaded %s" % cand.pfdfn
    print "    Best topo period (s): %f" % cand.topo_period
    print "    Best bary period (s): %f" % cand.bary_period
    print "    Best DM (cm^-3/pc): %f" % cand.dm
    print "    RA (J2000 - deg): %f" % cand.raj_deg
    print "    Dec (J2000 - deg): %f" % cand.decj_deg

    print "-" * 10
    pprint.pprint(cand.__dict__)
    print "-" * 10

    from rating_classes import freq_vs_phase
    fvph = freq_vs_phase.FreqVsPhaseClass()
    fvph.add_data(cand)

    print "Added freq vs phase data to cand"
    pprint.pprint(cand.__dict__)
    print "-" * 10

    pprint.pprint(cand.freq_vs_phase.__dict__)

    print "Check if freq_vs_phase rotates out and back to same array"
    orig = cand.freq_vs_phase.data.copy()
    orig_dm = cand.freq_vs_phase.curr_dm
    cand.freq_vs_phase.dedisperse(0)
    mod = cand.freq_vs_phase.data.copy()
    cand.freq_vs_phase.dedisperse(orig_dm)
    new = cand.freq_vs_phase.data.copy()

    print "Compare orig/mod"
    print(orig == mod).all()
    print "Compare mod/new"
    print(mod == new).all()
    print "Compare orig/new"
    print(orig == new).all()

    print "Compare with best prof"
    # Create best prof file
    print "show_pfd -noxwin %s" % cand.info['pfdfn']
    subprocess.call("show_pfd -noxwin %s" % cand.info['pfdfn'],
                    shell=True,
                    stdout=open(os.devnull))
    pfd_bestprof = np.loadtxt(os.path.split(cand.pfd.pfd_filename +
                                            ".bestprof")[-1],
                              usecols=(1, ))

    cand.freq_vs_phase.dedisperse(cand.pfd.bestdm)
    fvph_prof = cand.freq_vs_phase.data.sum(axis=0).squeeze()
    print test_profiles(pfd_bestprof, fvph_prof, 1e-6)

    print "Testing dedispersion"
    for dm in np.random.randint(0, 1000, size=10):
        print "DM: %g" % dm,
        cand.freq_vs_phase.dedisperse(dm)
        fvph_prof = cand.freq_vs_phase.get_profile()

        cand.pfd.dedisperse(dm, doppler=1)
        cand.pfd.adjust_period()
        pfd_prof = cand.pfd.time_vs_phase().sum(axis=0).squeeze()
        print test_profiles(fvph_prof, pfd_prof, 1e-6)
        plt.figure(dm)
        plt.plot(fvph_prof, label="FvsPh")
        plt.plot(pfd_prof, label="PFD")
        plt.title("DM: %g" % dm)
        plt.legend(loc='best')
    plt.show()
Exemple #35
0
            for subs in a.split(','):
                if (subs.find("-") > 0):
                    lo, hi = subs.split("-")
                    kill.extend(range(int(lo), int(hi) + 1))
                else:
                    kill.append(int(subs))
        if o in ("-i", "--kints"):
            for ints in a.split(','):
                if (ints.find("-") > 0):
                    lo, hi = ints.split("-")
                    kints.extend(range(int(lo), int(hi) + 1))
                else:
                    kints.append(int(ints))

    # Read the prepfold output file and the binary profiles
    fold_pfd = pfd(sys.argv[-1])

    # Check to make sure we can use this .pfd for timing purposes
    if not fold_pfd.use_for_timing():
        sys.stderr.write(
            "Error: '%s' was made allowing prepfold to search!\n" % \
            sys.argv[-1])
        sys.exit(2)

    # Read key information from the bestprof file
    if fold_pfd.bestprof:
        fold = fold_pfd.bestprof
    else:
        sys.stderr.write(
            "Error:  Can't open '%s.bestrof'!  Regenerate with show_pfd.\n" % \
            sys.argv[-1])
def upload_candidates(header_id, versionnum, directory, verbose=False, \
                        dry_run=False, *args, **kwargs):
    """Upload candidates to common DB.

        Inputs:
            header_id: header_id number for this beam, as returned by
                        spHeaderLoader/header.upload_header
            versionnum: A combination of the githash values from 
                        PRESTO and from the pipeline. 
            directory: The directory containing results from the pipeline.
            verbose: An optional boolean value that determines if information 
                        is printed to stdout.
            dry_run: An optional boolean value. If True no connection to DB
                        will be made and DB command will not be executed.
                        (If verbose is True DB command will be printed 
                        to stdout.)

            *** NOTE: Additional arguments are passed to the uploader function.

        Ouputs:
            cand_ids: List of candidate IDs corresponding to these candidates
                        in the common DB. (Or a list of None values if
                        dry_run is True).
    """
    # find *.accelcands file
    candlists = glob.glob(os.path.join(directory, "*.accelcands"))

    if len(candlists) != 1:
        raise PeriodicityCandidateError("Wrong number of candidate lists found (%d)!" % \
                                            len(candlists))

    # Get list of candidates from *.accelcands file
    candlist = accelcands.parse_candlist(candlists[0])
    minsigma = config.searching.to_prepfold_sigma
    foldedcands = [c for c in candlist if c.sigma > minsigma]
    foldedcands = foldedcands[:config.searching.max_cands_to_fold]
    foldedcands.sort(reverse=True)  # Sort by descending sigma

    # Create temporary directory
    tempdir = tempfile.mkdtemp(suffix="_tmp", prefix="PALFA_pfds_")
    tarfns = glob.glob(os.path.join(directory, "*_pfd.tgz"))
    if len(tarfns) != 1:
        raise PeriodicityCandidateError("Wrong number (%d) of *_pfd.tgz " \
                                         "files found in %s" % (len(tarfns), \
                                            directory))

    tar = tarfile.open(tarfns[0])
    try:
        tar.extractall(path=tempdir)
    except IOError:
        if os.path.isdir(tempdir):
            shutil.rmtree(tempdir)
        raise PeriodicityCandidateError("Error while extracting pfd files " \
                                        "from tarball (%s)!" % tarfns[0])
    finally:
        tar.close()
    # Loop over candidates that were folded
    results = []
    for ii, c in enumerate(foldedcands):
        basefn = "%s_ACCEL_Cand_%d" % (c.accelfile.replace("ACCEL_", "Z"), \
                                    c.candnum)
        pfdfn = os.path.join(tempdir, basefn + ".pfd")
        pngfn = os.path.join(directory, basefn + ".pfd.png")

        pfd = prepfold.pfd(pfdfn)

        try:
            cand = PeridocityCandidate(header_id, ii+1, pfd, c.snr, \
                                    c.cpow, c.ipow, len(c.dmhits), \
                                    c.numharm, versionnum, c.sigma)
        except Exception:
            raise PeriodicityCandidateError("PeriodicityCandidate could not be " \
                                            "created (%s)!" % pfdfn)

        if dry_run:
            cand.get_upload_sproc_call()
            if verbose:
                print cand
            results.append(None)
            cand_id = -1
        else:
            cand_id = cand.upload(*args, **kwargs)

        pfdplot = PeriodicityCandidatePFD(cand_id, pfdfn)
        pngplot = PeriodicityCandidatePNG(cand_id, pngfn)
        if dry_run:
            pfdplot.get_upload_sproc_call()
            pngplot.get_upload_sproc_call()
            if verbose:
                print pfdplot
                print pngplot
        else:
            pfdplot.upload(*args, **kwargs)
            pngplot.upload(*args, **kwargs)

    shutil.rmtree(tempdir)
    return results
Exemple #37
0
    sumprof = Num.zeros(numbins, dtype='d')

    base_T = None
    base_BW = None
    orig_fctr = None
    Tprerfi = 0.0
    Tpostrfi = 0.0
    avg_S = 0.0

    # Step through the profiles and determine the offsets
    for pfdfilenm, killsubs, killints in zip(pfdfilenms, killsubss, killintss):

        print("\n  Processing '%s'..." % pfdfilenm)

        # Read the fold data and de-disperse at the requested DM
        current_pfd = pfd(pfdfilenm)
        current_pfd.dedisperse(DM)
        # This corrects for any searching that prepfold did to find the peak
        current_pfd.adjust_period()
        T = current_pfd.T
        Tprerfi += T
        BW = current_pfd.nsub * current_pfd.subdeltafreq
        fctr = current_pfd.lofreq + 0.5 * BW

        # If there are subbands to kill, kill em'
        if killsubs is not None:
            print("    killing subbands:  ", killsubs)
            current_pfd.kill_subbands(killsubs)
            BW *= (current_pfd.nsub - len(killsubs)) / float(current_pfd.nsub)
        # If there are intervals to kill, kill em'
        if killints is not None:
Exemple #38
0
def get_candidates(versionnum, directory, header_id=None):
    """Upload candidates to common DB.

        Inputs:
            versionnum: A combination of the githash values from 
                        PRESTO and from the pipeline. 
            directory: The directory containing results from the pipeline.
            header_id: header_id number for this beam, as returned by
                        spHeaderLoader/header.upload_header (default=None)

        Ouput:
            cands: List of candidates.
    """
    # find *.accelcands file
    candlists = glob.glob(os.path.join(directory, "*.accelcands"))

    if len(candlists) != 1:
        raise PeriodicityCandidateError("Wrong number of candidate lists found (%d)!" % \
                                            len(candlists))

    # Get list of candidates from *.accelcands file
    candlist = accelcands.parse_candlist(candlists[0])
    # find the search_params.txt file
    paramfn = os.path.join(directory, 'search_params.txt')
    if os.path.exists(paramfn):
        tmp, params = {}, {}
        execfile(paramfn, tmp, params)
    else:
        raise PeriodicityCandidateError("Search parameter file doesn't exist!")
    minsigma = params['to_prepfold_sigma']
    foldedcands = [c for c in candlist \
                    if c.sigma > params['to_prepfold_sigma']]
    foldedcands = foldedcands[:params['max_cands_to_fold']]
    foldedcands.sort(reverse=True)  # Sort by descending sigma

    # Create temporary directory
    tempdir = tempfile.mkdtemp(suffix="_tmp", prefix="PALFA_pfds_")

    if foldedcands:

        tarfns = glob.glob(os.path.join(directory, "*_pfd.tgz"))
        if len(tarfns) != 1:
            raise PeriodicityCandidateError("Wrong number (%d) of *_pfd.tgz " \
                                             "files found in %s" % (len(tarfns), \
                                                directory))

        tar = tarfile.open(tarfns[0])
        try:
            tar.extractall(path=tempdir)
        except IOError:
            if os.path.isdir(tempdir):
                shutil.rmtree(tempdir)
            raise PeriodicityCandidateError("Error while extracting pfd files " \
                                            "from tarball (%s)!" % tarfns[0])
        finally:
            tar.close()

    # Loop over candidates that were folded
    cands = []
    for ii, c in enumerate(foldedcands):
        basefn = "%s_ACCEL_Cand_%d" % (c.accelfile.replace("ACCEL_", "Z"), \
                                    c.candnum)
        pfdfn = os.path.join(tempdir, basefn + ".pfd")
        pngfn = os.path.join(directory, basefn + ".pfd.png")

        pfd = prepfold.pfd(pfdfn)

        try:
            cand = PeriodicityCandidate(ii+1, pfd, c.snr, \
                                    c.cpow, c.ipow, len(c.dmhits), \
                                    c.numharm, versionnum, c.sigma, \
                                    header_id=header_id)
        except Exception:
            raise PeriodicityCandidateError("PeriodicityCandidate could not be " \
                                            "created (%s)!" % pfdfn)

        cand.add_dependent(PeriodicityCandidatePFD(pfdfn))
        cand.add_dependent(PeriodicityCandidatePNG(pngfn))
        cands.append(cand)

    shutil.rmtree(tempdir)
    return cands
Exemple #39
0
    def __init__(self, pfd_file, temp_file, direction, rate, threshold, order):

        pf = pfd(pfd_file)
        self.temp_file = temp_file
        self.rate = rate
        self.direction = direction
        self.thresh = threshold
        self.order = order
        self.length = pf.T

        pf.dedisperse()

        self.nsub = pf.npart
        self.nbin = pf.proflen
        self.phase = np.arange(self.nbin) / np.float(self.nbin)

        self.template = read_gaussfit(self.temp_file, self.nbin)
        self.on = np.where(self.template > self.thresh)[0]
        self.off = np.where(self.template < self.thresh)[0]

        # Axis 0 = subints, axis 1 = channels, axis 2 = phase bins
        bandpass_off = np.median(pf.profs[:, :, self.off],
                                 axis=(0, 2),
                                 keepdims=True)
        self.subbed_bandpass = bandpass_off.squeeze()
        temp = pf.profs - bandpass_off
        self.raw_profs = np.mean(temp, axis=1).squeeze()
        # Bandpass subtraction here makes raw_profs a bit of a misnomer (and now, = sub_profs)

        # Subtract the median from each profile (slick version!)
        self.sub_profs = self.raw_profs  #- np.median(self.raw_profs[:,self.off],axis=1,keepdims=True)
        self.subbed_profile = np.sum(self.sub_profs, axis=0).squeeze()

        # Note: pf.mid_secs represents the time associated with the center of each subint.
        self.times = pf.mid_secs
        self.offs = self.times * self.rate  # Position offset from start position (pf.rastr/pf.decstr)

        # Scott: pfd header positions correspond to the telescope position on the leading edge of the first subint
        # This may disagree w/ expected starting position since there's a lag (~5 sec?) between the start
        # of a scan and when GUPPI starts writing data.
        self.actual_start_ra = pf.rastr
        self.actual_start_dec = pf.decstr
        pos_str = pf.rastr + ' ' + pf.decstr
        self.actual_start_coord = SkyCoord(pos_str,
                                           frame=ICRS,
                                           unit=(u.hourangle, u.deg),
                                           obstime="J2015.0")

        # Remove DC/scale by STDEV, subtract polynomials, remove DC/scale by STDEV.
        self.cleaned_profs = self.clean_profs(self.sub_profs)
        self.folded_profile = np.sum(self.cleaned_profs, axis=0).squeeze()
        self.amps_errs(self.cleaned_profs)

        xy = self.get_xy()
        self.xx = xy[0]
        self.yy = xy[1]

        self.efac = 1.0
        self.scale_errors()

        #"""
        # Bootstrap?
        self.avals = []  # Amplitude values
        self.ovals = []  # Offset values
        self.n_bootstrap_trials = 10000

        # For RFI/nulling test, remove N points from consideration:
        #n_removed = 10
        #pass_inds = np.random.randint(0,high=self.nsub,size=self.nsub-n_removed)

        for k in range(self.n_bootstrap_trials):
            boot_inds = np.random.randint(0, high=self.nsub, size=self.nsub)
            bp_vals, bp_errs = fit_1d_bp(self.offs[boot_inds],
                                         self.ii[boot_inds],
                                         self.ee[boot_inds])
            self.avals.append(bp_vals[0])
            self.ovals.append(bp_vals[1])

        self.bp_vals = [np.mean(self.avals), np.mean(self.ovals)]
        self.bp_errs = [np.std(self.avals), np.std(self.ovals)]
        #"""

        #self.bp_vals,self.bp_errs = fit_1d_bp(self.offs,self.ii,self.ee)

        # Use 1D fit results as initial guesses for full, 2D fit.
        self.amp_guess = self.bp_vals[0]
        self.off_guess = self.bp_vals[1]
        #print '...end bootstrap...',datetime.now()

        # USE 1D FITS TO CALCULATE BEST RA/DEC VALUES
        # Direction = 1 (dec); no correction necessary!
        if self.direction:
            xxx = self.actual_start_coord.dec + Angle(self.off_guess, u.arcmin)
            dd = coord_string(xxx, c_type='dms')
            #print "Best Dec = %s" % (dd)
            self.best = dd

        # Direction = 0 (ra); cos(dec) factor necessary!
        else:
            cos_dec_factor = 1.0 / np.cos(self.actual_start_coord.dec)
            xxx = self.actual_start_coord.ra + Angle(
                self.off_guess * cos_dec_factor * u.arcmin)
            aa = coord_string(xxx, c_type='hms')
            #print "Best RA = %s" % (aa)
            self.best = aa
def fold_periodicity_candidates(job, accel_cands):
    """ Fold a list of candidates from sifting, rate them, 
        and write candidate attributes to file.
    """
    # Fold the best candidates
    cands_folded = 0
    start = time.time()
    for cand in accel_cands:
        print "At cand %s" % str(cand)
        if cands_folded == config.searching.max_cands_to_fold:
            break
        if cand.sigma >= config.searching.to_prepfold_sigma:
            print "...folding"
            queue.put(get_folding_command(cand, job))
            cands_folded += 1
    queue.join()
    end = time.time()
    job.folding_time += (end - start)
    job.num_cands_folded = cands_folded

    # Set up theano compile dir (UBC_AI rating uses theano)
    theano_compiledir = os.path.join(job.tempdir, 'theano_compile')
    os.mkdir(theano_compiledir)
    os.putenv("THEANO_FLAGS", "compiledir=%s" % theano_compiledir)

    # Rate candidates
    #JGM: Rating not working properly at the moment,
    #Testing if it works now!!
    timed_execute("rate_pfds.py --redirect-warnings --include-all *.pfd")
    sys.stdout.flush()

    # Calculate some candidate attributes from pfds
    attrib_file = open('candidate_attributes.txt', 'w')
    for pfdfn in glob.glob("*.pfd"):
        attribs = {}
        pfd = prepfold.pfd(pfdfn)
        red_chi2 = pfd.bestprof.chi_sqr
        dof = pfd.proflen - 1
        attribs['prepfold_sigma'] = \
                -scipy.stats.norm.ppf(scipy.stats.chi2.sf(red_chi2*dof, dof))

        if config.searching.use_fixchi:
            # Remake prepfold plot with rescaled chi-sq
            cmd = "show_pfd -noxwin -fixchi %s" % pfdfn
            timed_execute(cmd)

            # Get prepfold sigma from the rescaled bestprof
            pfd = prepfold.pfd(pfdfn)
            red_chi2 = pfd.bestprof.chi_sqr
            attribs['rescaled_prepfold_sigma'] = \
                    -scipy.stats.norm.ppf(scipy.stats.chi2.sf(red_chi2*dof, dof))
        else:
            # Rescale prepfold sigma by estimating the off-signal
            # reduced chi-sq
            off_red_chi2 = pfd.estimate_offsignal_redchi2()
            new_red_chi2 = red_chi2 / off_red_chi2
            attribs['rescaled_prepfold_sigma'] = \
                    -scipy.stats.norm.ppf(scipy.stats.chi2.sf(new_red_chi2*dof, dof))

        for key in attribs:
            attrib_file.write("%s\t%s\t%.3f\n" % (pfdfn, key, attribs[key]))
    attrib_file.close()
Exemple #41
0
import numpy as np
import sys, prepfold

if len(sys.argv) < 3:
    print("usage:pfd-export <input pfd file> <output csv file>")
    sys.exit(1)

pfd_file = sys.argv[1]
csv_file = sys.argv[2]
print("PFD file: " + pfd_file)
print("CSV file: " + csv_file)

pfd = prepfold.pfd(pfd_file)
print(pfd)
pfd.dedisperse(pfd.bestdm)
time_phase = pfd.time_vs_phase()
global_max = np.maximum.reduce(np.maximum.reduce(time_phase))
min_parts = np.minimum.reduce(time_phase, 1)
tp = (time_phase - min_parts[:, np.newaxis]) / np.fabs(global_max)
sdm = "{0:.3f}".format(pfd.bestdm)
np.savetxt(csv_file, tp, fmt='%0.6f', delimiter=" ")
print("time phase data exported.")
Exemple #42
0
def get_candidates(versionnum, directory, header_id=None, timestamp_mjd=None, inst_cache=None):
    """Upload candidates to common DB.

        Inputs:
            versionnum: A combination of the githash values from 
                        PRESTO and from the pipeline. 
            directory: The directory containing results from the pipeline.
            header_id: header_id number for this beam, as returned by
                        spHeaderLoader/header.upload_header (default=None)
            timestamp_mjd: mjd timstamp for this observation (default=None).
            inst_cache: ratings2 RatingInstanceIDCache instance.

        Ouput:
            cands: List of candidates.
            tempdir: Path of temporary directory that PFDs have been untarred,
                     returned so that it can be deleted after successful PFD upload.
    """
    # find *.accelcands file    
    candlists = glob.glob(os.path.join(directory, "*.accelcands"))
                                                
    if len(candlists) != 1:
        raise PeriodicityCandidateError("Wrong number of candidate lists found (%d)!" % \
                                            len(candlists))

    # Get list of candidates from *.accelcands file
    candlist = accelcands.parse_candlist(candlists[0])
    # find the search_params.txt file
    paramfn = os.path.join(directory, 'search_params.txt')
    if os.path.exists(paramfn):
        tmp, params = {}, {}
        execfile(paramfn, tmp, params)
    else:
        raise PeriodicityCandidateError("Search parameter file doesn't exist!")
    minsigma = params['to_prepfold_sigma']
    foldedcands = [c for c in candlist \
                    if c.sigma > params['to_prepfold_sigma']]
    foldedcands = foldedcands[:params['max_cands_to_fold']]
    foldedcands.sort(reverse=True) # Sort by descending sigma

    # Open attribute file
    attrib_fn = os.path.join(directory, 'candidate_attributes.txt')
    attribs = np.loadtxt(attrib_fn,dtype='S')
        
    # Create temporary directory
    tempdir = tempfile.mkdtemp(suffix="_tmp", prefix="pfds_", dir="/sps/hep/glast/data/survey_pulsar/")

    if foldedcands:

        pfd_tarfns = glob.glob(os.path.join(directory, "*_pfd.tgz"))
        if len(pfd_tarfns) != 1:
            raise PeriodicityCandidateError("Wrong number (%d) of *_pfd.tgz " \
                                             "files found in %s" % (len(pfd_tarfns), \
                                                directory))

        bestprof_tarfns = glob.glob(os.path.join(directory, "*_bestprof.tgz"))
        if len(bestprof_tarfns) != 1:
            raise PeriodicityCandidateError("Wrong number (%d) of *_bestprof.tgz " \
                                             "files found in %s" % (len(bestprof_tarfns), \
                                                directory))

        rating_tarfns = glob.glob(os.path.join(directory, "*_pfd_rat.tgz"))
        if len(rating_tarfns) != 1:
            raise PeriodicityCandidateError("Wrong number (%d) of *_pfd_rat.tgz " \
                                             "files found in %s" % (len(rating_tarfns), \
                                                directory))

        for tarfn in [ pfd_tarfns[0], bestprof_tarfns[0], rating_tarfns[0] ]: 
            tar = tarfile.open(tarfn)
            try:
                tar.extractall(path=tempdir)
            except IOError:
                if os.path.isdir(tempdir):
                    shutil.rmtree(tempdir)
                raise PeriodicityCandidateError("Error while extracting pfd files " \
                                                "from tarball (%s)!" % tarfn)
            finally:
                tar.close()

    # Loop over candidates that were folded
    cands = []
    for ii, c in enumerate(foldedcands):
        basefn = "%s_ACCEL_Cand_%d" % (c.accelfile.replace("ACCEL_", "Z"), \
                                    c.candnum)
        pfdfn = os.path.join(tempdir, basefn+".pfd")
        pngfn = os.path.join(directory, basefn+".pfd.png")
        ratfn = os.path.join(tempdir, basefn+".pfd.rat")

        pfd = prepfold.pfd(pfdfn)
        cand_attribs = dict(attribs[attribs[:,0] == basefn+".pfd"][:,1:])
        
        try:
            cand = PeriodicityCandidate(ii+1, pfd, c.snr, \
                                    c.cpow, c.ipow, len(c.dmhits), \
                                    c.numharm, versionnum, c.sigma, \
                                    c.period, c.dm, cand_attribs, header_id=header_id)
        except Exception:
            raise PeriodicityCandidateError("PeriodicityCandidate could not be " \
                                            "created (%s)!" % pfdfn)
        cand.add_dependent(PeriodicityCandidatePFD(pfdfn, timestamp_mjd=timestamp_mjd))
        cand.add_dependent(PeriodicityCandidatePNG(pngfn))

        ratvals = ratings2.rating_value.read_file(ratfn)
        cand.add_dependent(PeriodicityCandidateRating(ratvals,inst_cache=inst_cache))
        cands.append(cand)
        
    #shutil.rmtree(tempdir)
    return cands,tempdir