def sift_periodicity(job, dmstrs):
    # Sift through the candidates to choose the best to fold
    job.sifting_time = time.time()

    lo_accel_cands = sifting.read_candidates(glob.glob(
        "*ACCEL_%d" % config.searching.lo_accel_zmax),
                                             track=True)
    if len(lo_accel_cands):
        lo_accel_cands = sifting.remove_duplicate_candidates(lo_accel_cands)
    if len(lo_accel_cands):
        lo_accel_cands = sifting.remove_DM_problems(
            lo_accel_cands, config.searching.numhits_to_fold, dmstrs,
            config.searching.low_DM_cutoff)

    hi_accel_cands = sifting.read_candidates(glob.glob(
        "*ACCEL_%d" % config.searching.hi_accel_zmax),
                                             track=True)
    if len(hi_accel_cands):
        hi_accel_cands = sifting.remove_duplicate_candidates(hi_accel_cands)
    if len(hi_accel_cands):
        hi_accel_cands = sifting.remove_DM_problems(
            hi_accel_cands, config.searching.numhits_to_fold, dmstrs,
            config.searching.low_DM_cutoff)

    all_accel_cands = lo_accel_cands + hi_accel_cands
    if len(all_accel_cands):
        all_accel_cands = sifting.remove_harmonics(all_accel_cands)
        # Note:  the candidates will be sorted in _sigma_ order, not _SNR_!
        all_accel_cands.sort(sifting.cmp_sigma)
        print "Sending candlist to stdout before writing to file"
        sifting.write_candlist(all_accel_cands)
        sys.stdout.flush()
        sifting.write_candlist(all_accel_cands, job.basefilenm + ".accelcands")
        # Make sifting summary plots
        all_accel_cands.plot_rejects()
        plt.title("%s Rejected Cands" % job.basefilenm)
        plt.savefig(job.basefilenm + ".accelcands.rejects.png")
        all_accel_cands.plot_summary()
        plt.title("%s Periodicity Summary" % job.basefilenm)
        plt.savefig(job.basefilenm + ".accelcands.summary.png")

        # Write out sifting candidate summary
        all_accel_cands.print_cand_summary(job.basefilenm +
                                           ".accelcands.summary")
        # Write out sifting comprehensive report of bad candidates
        all_accel_cands.write_cand_report(job.basefilenm +
                                          ".accelcands.report")
        timed_execute("gzip --best %s" % job.basefilenm + ".accelcands.report")

        # Moving of results to resultsdir now happens in clean_up(...)
        # shutil.copy(job.basefilenm+".accelcands", job.outputdir)

    job.sifting_time = time.time() - job.sifting_time

    return all_accel_cands
def accelsift(filenm, zmax, wmax):
    """
    This function is a translation of the PRESTO code ACCEL_sift.py
    so that it can be more easily incorporated into our code.  It 
    sifts through the ACCEL cands, making cuts on various parameters
    and removing duplicates and harmonics.
    """
    # Set a bunch of parameters from our params.py file
    min_num_DMs = params.min_num_DMs
    low_DM_cutoff = params.low_DM_cutoff
    sifting.sigma_threshold = params.sigma_threshold
    sifting.c_pow_threshold = params.c_pow_threshold
    sifting.known_birds_p = params.known_birds_p
    sifting.known_birds_f = params.known_birds_f
    sifting.r_err = params.r_err
    sifting.short_period = params.short_period
    sifting.long_period = params.long_period
    sifting.harm_pow_cutoff = params.harm_pow_cutoff

    # Try to read the .inf files first, as _if_ they are present, all of
    # them should be there.  (if no candidates are found by accelsearch
    # we get no ACCEL files...
    inffiles = glob("*.inf")

    # Check to see if this is from a short search
    if len(re.findall("_[0-9][0-9][0-9]M_", inffiles[0])):
        dmstrs = [x.split("DM")[-1].split("_")[0] for x in candfiles]
    else:
        dmstrs = [x.split("DM")[-1].split(".inf")[0] for x in inffiles]
    dms = map(float, dmstrs)
    dms.sort()
    dmstrs = ["%.2f" % x for x in dms]

    candfiles = glob("*ACCEL_%d_JERK_%d" % (zmax, wmax))

    # Read in candfiles
    cands = sifting.read_candidates(candfiles)
    # Remove candidates that are duplicated in other ACCEL files
    if len(cands):
        cands = sifting.remove_duplicate_candidates(cands)
    # Remove candidates with DM problems
    if len(cands):
        cands = sifting.remove_DM_problems(cands, min_num_DMs, dmstrs,
                                           low_DM_cutoff)

    # Remove candidates that are harmonically related to each other
    # Note:  this includes only a small set of harmonics
    if len(cands):
        cands = sifting.remove_harmonics(cands)

    # Write candidates to STDOUT
    if len(cands):
        cands.sort(sifting.cmp_snr)
        sifting.write_candlist(cands, candfilenm=filenm)

    Ncands = len(cands)
    return Ncands
def sift_process_bon(job):

    # 
    #cpu_idx = int(cpu_id)

    # Change to the specified working directory
    os.chdir(job.workdir)

    # Make the single-pulse plot
    cmd = "single_pulse_search.py -t %f %s/*.singlepulse"%(singlepulse_plot_SNR,job.workdir)
    job.singlepulse_time += timed_execute(cmd)

    # Sort dmstrs
    job.dmstrs.sort()

    # Sift through the candidates to choose the best to fold
    
    job.sifting_time = time.time()

    lo_accel_cands = sifting.read_candidates(glob.glob("%s/*ACCEL_%d"%(job.workdir,lo_accel_zmax)))
    if len(lo_accel_cands):
        lo_accel_cands = sifting.remove_duplicate_candidates(lo_accel_cands)
    if len(lo_accel_cands):
        lo_accel_cands = sifting.remove_DM_problems(lo_accel_cands, numhits_to_fold,
                                                    job.dmstrs, low_DM_cutoff)
#
#    hi_accel_cands = sifting.read_candidates(glob.glob("*ACCEL_%d"%hi_accel_zmax))
#    if len(hi_accel_cands):
#        hi_accel_cands = sifting.remove_duplicate_candidates(hi_accel_cands)
#    if len(hi_accel_cands):
#        hi_accel_cands = sifting.remove_DM_problems(hi_accel_cands, numhits_to_fold,
#                                                    dmstrs, low_DM_cutoff)

    job.all_accel_cands = lo_accel_cands #+ hi_accel_cands
    if len(job.all_accel_cands):
        job.all_accel_cands = sifting.remove_harmonics(job.all_accel_cands)
        # Note:  the candidates will be sorted in _sigma_ order, not _SNR_!
        job.all_accel_cands.sort(sifting.cmp_sigma)
        sifting.write_candlist(job.all_accel_cands, job.basefilenm+".accelcands")

    try:
        cmd = "cp %s/*.accelcands "%job.workdir +job.outputdir
        os.system(cmd)
    except: pass

    job.sifting_time = time.time() - job.sifting_time
Esempio n. 4
0
def prestocand_to_hdf(cand_path, hdf_path, prelim_reject = False, track = True):
    # This module require presto sifting module to run.

    # Timing starts
    start = datetime.datetime.now()
    # Do the job
    candlist = sifting.read_candidates(glob.glob(cand_path), prelim_reject, track)
    # Timeing
    end = datetime.datetime.now()
    time_consummed = end-start

    df = pd.DataFrame([[x.path, x.filename, x.candnum, x.sigma, x.numharm, x.ipow_det, x.cpow, \
                        x.r, x.f, x.z, x.T, x.p, x.DMstr, x.DM, x.harm_pows, x.harm_amps, x.snr, \
                        x.hits, x.note] for x in candlist], \
                    columns = ['path', 'filename', 'candnum', 'sigma', 'numharm', 'ipow_det', \
                            'cpow', 'r', 'f', 'z', 'T', 'p', 'DMstr', 'DM', 'harm_pows', 'harm_amps', \
                            'snr', 'hits', 'note'])
    # ImportError if no PyTables
    df.to_hdf(hdf_path, key='df', mode='w')
    
    return time_consummed # TODO: return conv_status #tuple (total_cand_num, file_num, cand_num etc) 
Esempio n. 5
0
# Try to read the .inf files first, as _if_ they are present, all of
# them should be there.  (if no candidates are found by accelsearch
# we get no ACCEL files...

# Check to see if this is from a short search
if len(re.findall("_[0-9][0-9][0-9]M_", inffiles[0])):
    dmstrs = [x.split("DM")[-1].split("_")[0] for x in candfiles]
else:
    dmstrs = [x.split("DM")[-1].split(".inf")[0] for x in inffiles]
dms = map(float, dmstrs)
dms.sort()
dmstrs = ["%.2f" % x for x in dms]

# Read in all the candidates
cands = sifting.read_candidates(candfiles)

# Remove candidates that are duplicated in other ACCEL files
if len(cands):
    cands = sifting.remove_duplicate_candidates(cands)

# Remove candidates with DM problems
if len(cands):
    cands = sifting.remove_DM_problems(cands, min_num_DMs, dmstrs,
                                       low_DM_cutoff)

# Remove candidates that are harmonically related to each other
# Note:  this includes only a small set of harmonics
if len(cands) > 1:
    cands = sifting.remove_harmonics(cands)
print("Number of candidates remaining {}".format(len(cands)))
Esempio n. 6
0
def main(fits_filenm, workdir, ddplans):
   
    # Change to the specified working directory
    os.chdir(workdir)

    # Get information on the observation and the job
    job = obs_info(fits_filenm)
    if job.raw_T < low_T_to_search:
        print "The observation is too short (%.2f s) to search."%job.raw_T
        sys.exit()
    job.total_time = time.time()
    if job.dt == 163.84:
        ddplans = ddplans[str(job.nchans)+"slow"]
    else:
        ddplans = ddplans[str(job.nchans)+"fast"]
    
    # Use whatever .zaplist is found in the current directory
    default_zaplist = glob.glob("*.zaplist")[0]

    # Make sure the output directory (and parent directories) exist
    try:
        os.makedirs(job.outputdir)
        os.chmod(job.outputdir, stat.S_IRWXU | stat.S_IRWXG | S_IROTH | S_IXOTH)
    except: pass

    # Make sure the tmp directory (in a tmpfs mount) exists
    tmpdir = os.path.join(base_tmp_dir, job.basefilenm)
    try:
        os.makedirs(tmpdir)
    except: pass

    print "\nBeginning GBNCC search of '%s'"%job.fits_filenm
    print "UTC time is:  %s"%(time.asctime(time.gmtime()))

    rfifindout=job.basefilenm+"_rfifind.out"
    rfifindmask=job.basefilenm+"_rfifind.mask"

    if not os.path.exists(rfifindout) or not os.path.exists(rfifindmask):
        # rfifind the filterbank file
        cmd = "rfifind -time %.17g -o %s %s > %s_rfifind.out"%\
              (rfifind_chunk_time, job.basefilenm,
               job.fits_filenm, job.basefilenm)
        job.rfifind_time += timed_execute(cmd)
    maskfilenm = job.basefilenm + "_rfifind.mask"
    # Find the fraction that was suggested to be masked
    # Note:  Should we stop processing if the fraction is
    #        above some large value?  Maybe 30%?
    job.masked_fraction = find_masked_fraction(job)
    
    # Iterate over the stages of the overall de-dispersion plan
    dmstrs = []
    
    for ddplan in ddplans:
        # Make a downsampled filterbank file
        if ddplan.downsamp > 1:
            cmd = "psrfits_subband -dstime %d -nsub %d -o %s_DS%d %s"%\
                  (ddplan.downsamp, job.nchans, job.dsbasefilenm, ddplan.downsamp, job.dsbasefilenm )
            job.downsample_time += timed_execute(cmd)
            fits_filenm = job.dsbasefilenm + "_DS%d%s"%\
                          (ddplan.downsamp,job.fits_filenm[job.fits_filenm.rfind("_"):])
        else:
            fits_filenm = job.fits_filenm

        # Iterate over the individual passes through the .fil file
        for passnum in range(ddplan.numpasses):
            subbasenm = "%s_DM%s"%(job.basefilenm, ddplan.subdmlist[passnum])

            # Now de-disperse 
            cmd = "prepsubband -mask %s -lodm %.2f -dmstep %.2f -nsub %d -numdms %d -numout %d -o %s/%s %s"%\
                  (maskfilenm, ddplan.lodm+passnum*ddplan.sub_dmstep,
                   ddplan.dmstep, ddplan.numsub,
                   ddplan.dmsperpass, job.N/ddplan.downsamp,
                   tmpdir, job.basefilenm, fits_filenm)
            job.dedispersing_time += timed_execute(cmd)
            
            # Do the single-pulse search
            cmd = "single_pulse_search.py -p -m %f -t %f %s/*.dat"%\
                (singlepulse_maxwidth, singlepulse_threshold, tmpdir)
            job.singlepulse_time += timed_execute(cmd)
            spfiles = glob.glob("%s/*.singlepulse"%tmpdir)
            for spfile in spfiles:
                try:
                    shutil.move(spfile, workdir)
                except: pass

            # Iterate over all the new DMs
            for dmstr in ddplan.dmlist[passnum]:
                dmstrs.append(dmstr)
                basenm = os.path.join(tmpdir, job.basefilenm+"_DM"+dmstr)
                datnm = basenm+".dat"
                fftnm = basenm+".fft"
                infnm = basenm+".inf"

                # FFT, zap, and de-redden
                cmd = "realfft %s"%datnm
                job.FFT_time += timed_execute(cmd)
                cmd = "zapbirds -zap -zapfile %s -baryv %.6g %s"%\
                      (default_zaplist, job.baryv, fftnm)
                job.FFT_time += timed_execute(cmd)
                cmd = "rednoise %s"%fftnm
                job.FFT_time += timed_execute(cmd)
                try:
                    os.rename(basenm+"_red.fft", fftnm)
                except: pass
                
                # Do the low-acceleration search
                cmd = "accelsearch -numharm %d -sigma %f -zmax %d -flo %f %s"%\
                      (lo_accel_numharm, lo_accel_sigma, lo_accel_zmax, lo_accel_flo, fftnm)
                job.lo_accelsearch_time += timed_execute(cmd)
                try:
                    os.remove(basenm+"_ACCEL_%d.txtcand"%lo_accel_zmax)
                except: pass
                try:  # This prevents errors if there are no cand files to copy
                    shutil.move(basenm+"_ACCEL_%d.cand"%lo_accel_zmax, workdir)
                    shutil.move(basenm+"_ACCEL_%d"%lo_accel_zmax, workdir)
                except: pass
        
                # Do the high-acceleration search
                cmd = "accelsearch -numharm %d -sigma %f -zmax %d -flo %f %s"%\
                      (hi_accel_numharm, hi_accel_sigma, hi_accel_zmax, hi_accel_flo, fftnm)
                job.hi_accelsearch_time += timed_execute(cmd)
                try:
                    os.remove(basenm+"_ACCEL_%d.txtcand"%hi_accel_zmax)
                except: pass
                try:  # This prevents errors if there are no cand files to copy
                    shutil.move(basenm+"_ACCEL_%d.cand"%hi_accel_zmax, workdir)
                    shutil.move(basenm+"_ACCEL_%d"%hi_accel_zmax, workdir)
                except: pass

                # Move the .inf files
                try:
                    shutil.move(infnm, workdir)
                except: pass
                # Remove the .dat and .fft files
                try:
                    os.remove(datnm)
                except: pass
                try:
                    os.remove(fftnm)
                except: pass

    # Make the single-pulse plots
    basedmb = job.basefilenm+"_DM"
    basedme = ".singlepulse "
    # The following will make plots for DM ranges:
    #    0-30, 20-110, 100-310, 300-1000+
    dmglobs = [basedmb+"[0-9].[0-9][0-9]"+basedme +
               basedmb+"[012][0-9].[0-9][0-9]"+basedme,
               basedmb+"[2-9][0-9].[0-9][0-9]"+basedme +
               basedmb+"10[0-9].[0-9][0-9]"+basedme,
               basedmb+"[12][0-9][0-9].[0-9][0-9]"+basedme +
               basedmb+"30[0-9].[0-9][0-9]"+basedme,
               basedmb+"[3-9][0-9][0-9].[0-9][0-9]"+basedme +
               basedmb+"1[0-9][0-9][0-9].[0-9][0-9]"+basedme]
    dmrangestrs = ["0-30", "20-110", "100-310", "300-1000+"]
    psname = job.basefilenm+"_singlepulse.ps"
    for dmglob, dmrangestr in zip(dmglobs, dmrangestrs):
        cmd = 'single_pulse_search.py -t %f -g "%s"' % \
              (singlepulse_plot_SNR, dmglob)
        job.singlepulse_time += timed_execute(cmd)
        try:
            os.rename(psname,
                      job.basefilenm+"_DMs%s_singlepulse.ps"%dmrangestr)
        except: pass

    # Sift through the candidates to choose the best to fold
    
    job.sifting_time = time.time()

    lo_accel_cands = sifting.read_candidates(glob.glob("*ACCEL_%d"%lo_accel_zmax))
    if len(lo_accel_cands):
        lo_accel_cands = sifting.remove_duplicate_candidates(lo_accel_cands)
    if len(lo_accel_cands):
        lo_accel_cands = sifting.remove_DM_problems(lo_accel_cands, numhits_to_fold,
                                                    dmstrs, low_DM_cutoff)
        
    hi_accel_cands = sifting.read_candidates(glob.glob("*ACCEL_%d"%hi_accel_zmax))
    if len(hi_accel_cands):
        hi_accel_cands = sifting.remove_duplicate_candidates(hi_accel_cands)
    if len(hi_accel_cands):
        hi_accel_cands = sifting.remove_DM_problems(hi_accel_cands, numhits_to_fold,
                                                    dmstrs, low_DM_cutoff)

    if len(lo_accel_cands) and len(hi_accel_cands):
        lo_accel_cands, hi_accel_cands = remove_crosslist_duplicate_candidates(lo_accel_cands, hi_accel_cands)

    if len(lo_accel_cands):
        lo_accel_cands.sort(sifting.cmp_sigma)
        sifting.write_candlist(lo_accel_cands,
                               job.basefilenm+".accelcands_Z%d"%lo_accel_zmax)
    if len(hi_accel_cands):
        hi_accel_cands.sort(sifting.cmp_sigma)
        sifting.write_candlist(hi_accel_cands,
                               job.basefilenm+".accelcands_Z%d"%hi_accel_zmax)

    try:
        cmd = "mv *.accelcands* "+job.outputdir
        os.system(cmd)
    except: pass
    job.sifting_time = time.time() - job.sifting_time

    # Fold the best candidates

    cands_folded = 0
    for cand in lo_accel_cands:
        if cands_folded == max_lo_cands_to_fold:
            break
        elif cand.sigma > to_prepfold_sigma:
            job.folding_time += timed_execute(get_folding_command(cand, job, ddplans, maskfilenm))
            cands_folded += 1
    cands_folded = 0
    for cand in hi_accel_cands:
        if cands_folded == max_hi_cands_to_fold:
            break
        elif cand.sigma > to_prepfold_sigma:
            job.folding_time += timed_execute(get_folding_command(cand, job, ddplans, maskfilenm))
            cands_folded += 1
    # Remove the bestprof files
    bpfiles = glob.glob("*.pfd.bestprof")
    for bpfile in bpfiles:
        os.remove(bpfile)

    # Now step through the .ps files and convert them to .png and gzip them

    psfiles = glob.glob("*.ps")
    for psfile in psfiles:
        if "singlepulse" in psfile:
            os.system("pstoimg -density 200 -antialias -crop a "+psfile)
            try:
                os.remove(epsfile)
            except: pass
        else:
            os.system("pstoimg -density 200 -antialias -flip cw "+psfile)
        os.system("gzip "+psfile)
    
    # Tar up the results files 

    tar_suffixes = ["_ACCEL_%d.tgz"%lo_accel_zmax,
                    "_ACCEL_%d.tgz"%hi_accel_zmax,
                    "_ACCEL_%d.cand.tgz"%lo_accel_zmax,
                    "_ACCEL_%d.cand.tgz"%hi_accel_zmax,
                    "_singlepulse.tgz",
                    "_inf.tgz",
                    "_pfd.tgz"]
    tar_globs = ["*_ACCEL_%d"%lo_accel_zmax,
                 "*_ACCEL_%d"%hi_accel_zmax,
                 "*_ACCEL_%d.cand"%lo_accel_zmax,
                 "*_ACCEL_%d.cand"%hi_accel_zmax,
                 "*.singlepulse",
                 "*_DM[0-9]*.inf",
                 "*.pfd"]
    for (tar_suffix, tar_glob) in zip(tar_suffixes, tar_globs):
        tf = tarfile.open(job.basefilenm+tar_suffix, "w:gz")
        for infile in glob.glob(tar_glob):
            tf.add(infile)
            os.remove(infile)
        tf.close()
            
    # Remove all the downsampled .fits files

    fitsfiles = glob.glob("*_DS?*.fits") + glob.glob("*_DS??*.fits")
    for fitsfile in fitsfiles:
        os.remove(fitsfile)

    # Remove the tmp directory (in a tmpfs mount)
    try:
        os.rmdir(tmpdir)
    except: pass

    # And finish up

    job.total_time = time.time() - job.total_time
    print "\nFinished"
    print "UTC time is:  %s"%(time.asctime(time.gmtime()))

    # Write the job report

    job.write_report(job.basefilenm+".report")
    job.write_report(os.path.join(job.outputdir, job.basefilenm+".report"))

    # Move all the important stuff to the output directory
    cmd = "mv *rfifind.[bimors]* *.tgz *.ps.gz *.png *.report "+\
          job.outputdir
    os.system(cmd)
        accelsearch_time += temp
        totime += temp

    rfp.write("Realfft total time : %.2f\n" % (realfft_time))
    rfp.write("Rednoise total time : %.2f\n" % (readnoise_time))
    rfp.write("Accelsearch total time : %.2f\n" % (accelsearch_time))

    # Following will sort out the candidates

    numhits_to_fold = 2
    low_DM_cutoff = 1.0
    lo_accel_zmax = zmax

    #       read_candidate will generate collective information about found candidate in
    #	in all ACCEL files.
    lo_accel_cands = sifting.read_candidates(glob.glob(outdir + "*ACCEL_0"))
    #	Remove candidates with same period and low significance.
    if len(lo_accel_cands):
        lo_accel_cands = sifting.remove_duplicate_candidates(lo_accel_cands)
    if len(lo_accel_cands):
        lo_accel_cands = sifting.remove_DM_problems(lo_accel_cands,
                                                    numhits_to_fold, dmstrs,
                                                    low_DM_cutoff)
    if len(lo_accel_cands):
        lo_accel_cands.sort(sifting.cmp_sigma)
        sifting.write_candlist(lo_accel_cands,
                               outdir + "/candidate_list_from_script")

    fp = open(outdir + "candidate_list_from_script", "r")
    candfile = fp.read().split("\n")
    fp.close()
Esempio n. 8
0
          try:
              accel_cands_found_for[z_max].append(dm)
          except KeyError, e:
              accel_cands_found_for[z_max] = [dm]
 
  # Print the settings used for the PRESTO sifting.py module
  sifting.print_sift_globals()
  # For each value of z_max, sift the candidates that were found and put
  # them all in one big list:
  unsifted_accelcands = []
  sifted_accelcands = []
  for z_max in accel_cands_found_for.keys():
      # Read all the acceleration candidates:
      files = glob.glob(os.path.join(cand_dir, 
          basename + '_DM*.*_ACCEL_%d' % z_max))
      accel_cands = sifting.read_candidates(files)
      unsifted_accelcands.extend(accel_cands)
      # The sifting module also needs all the trial DM values 
      dm_strs = ['%.2f' % dm for dm in accel_cands_found_for[z_max]]
      
      # Look in all the output candidates sift them using the PRESTO sifting
      # module.
      print 'Number of candidates %d' % len(accel_cands)        
      if accel_cands:
          accel_cands = sifting.remove_duplicate_candidates(accel_cands)
      print 'Number of candidates %d' % len(accel_cands)        
      if accel_cands:
          accel_cands = sifting.remove_DM_problems(accel_cands, 2, dm_strs, minimum_dm_cutoff)
      print 'Number of candidates %d' % len(accel_cands)        
      if accel_cands:
          # The PRESTO sifting.py module can crash if there are not enough
Esempio n. 9
0
def search_job(job):
    """Search the observation defined in the obs_info
        instance 'job'.
    """
    # Use whatever .zaplist is found in the current directory
    zaplist = glob.glob("*.zaplist")[0]
    print "Using %s as zaplist" % zaplist
    if config.searching.use_subbands and config.searching.fold_rawdata:
        # make a directory to keep subbands so they can be used to fold later
        try:
            os.makedirs(os.path.join(job.workdir, 'subbands'))
        except: pass

    # rfifind the data file
    cmd = "rfifind %s -time %.17g -o %s %s" % \
          (config.searching.datatype_flag, config.searching.rfifind_chunk_time, job.basefilenm,
           job.filenmstr)
    job.rfifind_time += timed_execute(cmd, stdout="%s_rfifind.out" % job.basefilenm)
    maskfilenm = job.basefilenm + "_rfifind.mask"
    # Find the fraction that was suggested to be masked
    # Note:  Should we stop processing if the fraction is
    #        above some large value?  Maybe 30%?
    job.masked_fraction = find_masked_fraction(job)
    
    # Iterate over the stages of the overall de-dispersion plan
    dmstrs = []
    for ddplan in job.ddplans:

        # Iterate over the individual passes through the data file
        for passnum in range(ddplan.numpasses):
            subbasenm = "%s_DM%s"%(job.basefilenm, ddplan.subdmlist[passnum])

            if config.searching.use_subbands:
                try:
                    os.makedirs(os.path.join(job.tempdir, 'subbands'))
                except: pass
    
                # Create a set of subbands
                cmd = "prepsubband %s -sub -subdm %s -downsamp %d -nsub %d -mask %s " \
                        "-o %s/subbands/%s %s" % \
                        (config.searching.datatype_flag, ddplan.subdmlist[passnum], ddplan.sub_downsamp,
                        ddplan.numsub, maskfilenm, job.tempdir, job.basefilenm,
                        job.filenmstr)
                job.subbanding_time += timed_execute(cmd, stdout="%s.subout" % subbasenm)
            
                # Now de-disperse using the subbands
                cmd = "prepsubband -lodm %.2f -dmstep %.2f -numdms %d -downsamp %d " \
                        "-nsub %d -numout %d -o %s/%s %s/subbands/%s.sub[0-9]*" % \
                        (ddplan.lodm+passnum*ddplan.sub_dmstep, ddplan.dmstep,
                        ddplan.dmsperpass, ddplan.dd_downsamp, ddplan.numsub,
                        psr_utils.choose_N(job.orig_N/ddplan.downsamp),
                        job.tempdir, job.basefilenm, job.tempdir, subbasenm)
                job.dedispersing_time += timed_execute(cmd, stdout="%s.prepout" % subbasenm)
            
            else:  # Not using subbands
                cmd = "prepsubband -mask %s -lodm %.2f -dmstep %.2f -numdms %d -downsamp %d " \
                        "-numout %d -o %s/%s %s"%\
                        (maskfilenm, ddplan.lodm+passnum*ddplan.sub_dmstep, ddplan.dmstep,
                        ddplan.dmsperpass, ddplan.dd_downsamp*ddplan.sub_downsamp, 
                        psr_utils.choose_N(job.orig_N/ddplan.downsamp),
                        job.tempdir, job.basefilenm, job.filenmstr)
                job.dedispersing_time += timed_execute(cmd)
            
            # Iterate over all the new DMs
            for dmstr in ddplan.dmlist[passnum]:
                dmstrs.append(dmstr)
                basenm = os.path.join(job.tempdir, job.basefilenm+"_DM"+dmstr)
                datnm = basenm+".dat"
                fftnm = basenm+".fft"
                infnm = basenm+".inf"

                # Do the single-pulse search
                cmd = "single_pulse_search.py -p -m %f -t %f %s"%\
                      (config.searching.singlepulse_maxwidth, \
                       config.searching.singlepulse_threshold, datnm)
                job.singlepulse_time += timed_execute(cmd)
                try:
                    shutil.move(basenm+".singlepulse", job.workdir)
                except: pass

                # FFT, zap, and de-redden
                cmd = "realfft %s"%datnm
                job.FFT_time += timed_execute(cmd)
                cmd = "zapbirds -zap -zapfile %s -baryv %.6g %s"%\
                      (zaplist, job.baryv, fftnm)
                job.FFT_time += timed_execute(cmd)
                cmd = "rednoise %s"%fftnm
                job.FFT_time += timed_execute(cmd)
                try:
                    os.rename(basenm+"_red.fft", fftnm)
                except: pass
                
                # Do the low-acceleration search
                cmd = "accelsearch -harmpolish -numharm %d -sigma %f " \
                        "-zmax %d -flo %f %s"%\
                        (config.searching.lo_accel_numharm, \
                         config.searching.lo_accel_sigma, \
                         config.searching.lo_accel_zmax, \
                         config.searching.lo_accel_flo, fftnm)
                job.lo_accelsearch_time += timed_execute(cmd)
                try:
                    os.remove(basenm+"_ACCEL_%d.txtcand" % config.searching.lo_accel_zmax)
                except: pass
                try:  # This prevents errors if there are no cand files to copy
                    shutil.move(basenm+"_ACCEL_%d.cand" % config.searching.lo_accel_zmax, \
                                    job.workdir)
                    shutil.move(basenm+"_ACCEL_%d" % config.searching.lo_accel_zmax, \
                                    job.workdir)
                except: pass
        
                # Do the high-acceleration search
                cmd = "accelsearch -harmpolish -numharm %d -sigma %f " \
                        "-zmax %d -flo %f %s"%\
                        (config.searching.hi_accel_numharm, \
                         config.searching.hi_accel_sigma, \
                         config.searching.hi_accel_zmax, \
                         config.searching.hi_accel_flo, fftnm)
                job.hi_accelsearch_time += timed_execute(cmd)
                try:
                    os.remove(basenm+"_ACCEL_%d.txtcand" % config.searching.hi_accel_zmax)
                except: pass
                try:  # This prevents errors if there are no cand files to copy
                    shutil.move(basenm+"_ACCEL_%d.cand" % config.searching.hi_accel_zmax, \
                                    job.workdir)
                    shutil.move(basenm+"_ACCEL_%d" % config.searching.hi_accel_zmax, \
                                    job.workdir)
                except: pass

                # Move the .inf files
                try:
                    shutil.move(infnm, job.workdir)
                except: pass
                # Remove the .dat and .fft files
                try:
                    os.remove(datnm)
                except: pass
                try:
                    os.remove(fftnm)
                except: pass

            if config.searching.use_subbands:
                if config.searching.fold_rawdata:
                    # Subband files are no longer needed
                    shutil.rmtree(os.path.join(job.tempdir, 'subbands'))
                else:
                    # Move subbands to workdir
                    for sub in glob.glob(os.path.join(job.tempdir, 'subbands', "*")):
                        shutil.move(sub, os.path.join(job.workdir, 'subbands'))

    # Make the single-pulse plots
    basedmb = job.basefilenm+"_DM"
    basedme = ".singlepulse "
    # The following will make plots for DM ranges:
    #    0-110, 100-310, 300-1000+
    dmglobs = [basedmb+"[0-9].[0-9][0-9]"+basedme +
               basedmb+"[0-9][0-9].[0-9][0-9]"+basedme +
               basedmb+"10[0-9].[0-9][0-9]"+basedme,
               basedmb+"[12][0-9][0-9].[0-9][0-9]"+basedme +
               basedmb+"30[0-9].[0-9][0-9]"+basedme,
               basedmb+"[3-9][0-9][0-9].[0-9][0-9]"+basedme +
               basedmb+"1[0-9][0-9][0-9].[0-9][0-9]"+basedme]
    dmrangestrs = ["0-110", "100-310", "300-1000+"]
    psname = job.basefilenm+"_singlepulse.ps"
    for dmglob, dmrangestr in zip(dmglobs, dmrangestrs):
        dmfiles = []
        for dmg in dmglob.split():
            dmfiles += glob.glob(dmg.strip())
        # Check that there are matching files and they are not all empty
        if dmfiles and sum([os.path.getsize(f) for f in dmfiles]):
            cmd = 'single_pulse_search.py -t %f -g "%s"' % \
                (config.searching.singlepulse_plot_SNR, dmglob)
            job.singlepulse_time += timed_execute(cmd)
            os.rename(psname,
                        job.basefilenm+"_DMs%s_singlepulse.ps" % dmrangestr)

    # Sift through the candidates to choose the best to fold
    job.sifting_time = time.time()

    lo_accel_cands = sifting.read_candidates(glob.glob("*ACCEL_%d" % config.searching.lo_accel_zmax))
    if len(lo_accel_cands):
        lo_accel_cands = sifting.remove_duplicate_candidates(lo_accel_cands)
    if len(lo_accel_cands):
        lo_accel_cands = sifting.remove_DM_problems(lo_accel_cands, config.searching.numhits_to_fold,
                                                    dmstrs, config.searching.low_DM_cutoff)

    hi_accel_cands = sifting.read_candidates(glob.glob("*ACCEL_%d" % config.searching.hi_accel_zmax))
    if len(hi_accel_cands):
        hi_accel_cands = sifting.remove_duplicate_candidates(hi_accel_cands)
    if len(hi_accel_cands):
        hi_accel_cands = sifting.remove_DM_problems(hi_accel_cands, config.searching.numhits_to_fold,
                                                    dmstrs, config.searching.low_DM_cutoff)

    all_accel_cands = lo_accel_cands + hi_accel_cands
    if len(all_accel_cands):
        all_accel_cands = sifting.remove_harmonics(all_accel_cands)
        # Note:  the candidates will be sorted in _sigma_ order, not _SNR_!
        all_accel_cands.sort(sifting.cmp_sigma)
        sifting.write_candlist(all_accel_cands, job.basefilenm+".accelcands")
        # Moving of results to resultsdir now happens in clean_up(...)
        # shutil.copy(job.basefilenm+".accelcands", job.outputdir)

    job.sifting_time = time.time() - job.sifting_time

    # Fold the best candidates
    cands_folded = 0
    for cand in all_accel_cands:
        if cands_folded == config.searching.max_cands_to_fold:
            break
        if cand.sigma >= config.searching.to_prepfold_sigma:
            job.folding_time += timed_execute(get_folding_command(cand, job))
            cands_folded += 1
    job.num_cands_folded = cands_folded

    # Now step through the .ps files and convert them to .png and gzip them

    psfiles = glob.glob("*.ps")
    for psfile in psfiles:
        # The '[0]' appeneded to the end of psfile is to convert only the 1st page
        timed_execute("convert -quality 90 %s -background white -flatten -rotate 90 +matte %s" % \
                            (psfile+"[0]", psfile[:-3]+".png"))
        timed_execute("gzip "+psfile)
Esempio n. 10
0
# Try to read the .inf files first, as _if_ they are present, all of
# them should be there.  (if no candidates are found by accelsearch
# we get no ACCEL files...
inffiles = glob.glob(globinf)
candfiles = glob.glob(globaccel)
# Check to see if this is from a short search
if len(re.findall("_[0-9][0-9][0-9]M_" , inffiles[0])):
    dmstrs = [x.split("DM")[-1].split("_")[0] for x in candfiles]
else:
    dmstrs = [x.split("DM")[-1].split(".inf")[0] for x in inffiles]
dms = map(float, dmstrs)
dms.sort()
dmstrs = ["%.2f"%x for x in dms]

# Read in all the candidates
cands = sifting.read_candidates(candfiles)

# Remove candidates that are duplicated in other ACCEL files
if len(cands):
    cands = sifting.remove_duplicate_candidates(cands)

# Remove candidates with DM problems
if len(cands):
    cands = sifting.remove_DM_problems(cands, min_num_DMs, dmstrs, low_DM_cutoff)

# Remove candidates that are harmonically related to each other
# Note:  this includes only a small set of harmonics
if len(cands):
    cands = sifting.remove_harmonics(cands)

# Write candidates to STDOUT
Esempio n. 11
0
def main(fil_filenm, workdir):

    # Change to the specified working directory
    os.chdir(workdir)

    # Get information on the observation and the jbo
    job = obs_info(fil_filenm)
    if job.T < low_T_to_search:
        print "The observation is too short (%.2f s) to search." % job.T
        sys.exit()
    job.total_time = time.time()

    # Use whatever .zaplist is found in the current directory
    default_zaplist = glob.glob("*.zaplist")[0]

    # Make sure the output directory (and parent directories) exist
    try:
        os.makedirs(job.outputdir)
    except:
        pass

    # Create a directory to hold all the subbands
    if use_subbands:
        try:
            os.makedirs("subbands")
        except:
            pass

    print "\nBeginning PALFA search of '%s'" % job.fil_filenm
    print "UTC time is:  %s" % (time.asctime(time.gmtime()))

    # rfifind the filterbank file
    cmd = "rfifind -time %.17g -o %s %s > %s_rfifind.out"%\
          (rfifind_chunk_time, job.basefilenm,
           job.fil_filenm, job.basefilenm)
    job.rfifind_time += timed_execute(cmd)
    maskfilenm = job.basefilenm + "_rfifind.mask"
    # Find the fraction that was suggested to be masked
    # Note:  Should we stop processing if the fraction is
    #        above some large value?  Maybe 30%?
    job.masked_fraction = find_masked_fraction(job)

    # Iterate over the stages of the overall de-dispersion plan
    dmstrs = []
    for ddplan in ddplans:

        # Make a downsampled filterbank file if we are not using subbands
        if not use_subbands:
            if ddplan.downsamp > 1:
                cmd = "downsample_filterbank.py %d %s" % (ddplan.downsamp,
                                                          job.fil_filenm)
                job.downsample_time += timed_execute(cmd)
                fil_filenm = job.fil_filenm[:job.fil_filenm.find(".fil")] + \
                             "_DS%d.fil"%ddplan.downsamp
            else:
                fil_filenm = job.fil_filenm

        # Iterate over the individual passes through the .fil file
        for passnum in range(ddplan.numpasses):
            subbasenm = "%s_DM%s" % (job.basefilenm, ddplan.subdmlist[passnum])

            if use_subbands:
                # Create a set of subbands
                cmd = "prepsubband -sub -subdm %s -downsamp %d -nsub %d -mask %s -o subbands/%s %s > %s.subout"%\
                      (ddplan.subdmlist[passnum], ddplan.sub_downsamp,
                       ddplan.numsub, maskfilenm, job.basefilenm,
                       job.fil_filenm, subbasenm)
                job.subbanding_time += timed_execute(cmd)

                # Now de-disperse using the subbands
                cmd = "prepsubband -lodm %.2f -dmstep %.2f -numdms %d -downsamp %d -numout %d -o %s subbands/%s.sub[0-9]* > %s.prepout"%\
                      (ddplan.lodm+passnum*ddplan.sub_dmstep, ddplan.dmstep,
                       ddplan.dmsperpass, ddplan.dd_downsamp, job.N/ddplan.downsamp,
                       job.basefilenm, subbasenm, subbasenm)
                job.dedispersing_time += timed_execute(cmd)

            else:  # Not using subbands
                cmd = "prepsubband -mask %s -lodm %.2f -dmstep %.2f -numdms %d -numout %d -o %s %s"%\
                      (maskfilenm, ddplan.lodm+passnum*ddplan.sub_dmstep, ddplan.dmstep,
                       ddplan.dmsperpass, job.N/ddplan.downsamp,
                       job.basefilenm, fil_filenm)
                job.dedispersing_time += timed_execute(cmd)

            # Iterate over all the new DMs
            for dmstr in ddplan.dmlist[passnum]:
                dmstrs.append(dmstr)
                basenm = job.basefilenm + "_DM" + dmstr
                datnm = basenm + ".dat"
                fftnm = basenm + ".fft"
                infnm = basenm + ".inf"

                # Do the single-pulse search
                cmd = "single_pulse_search.py -p -m %f -t %f %s"%\
                      (singlepulse_maxwidth, singlepulse_threshold, datnm)
                job.singlepulse_time += timed_execute(cmd)

                # FFT, zap, and de-redden
                cmd = "realfft %s" % datnm
                job.FFT_time += timed_execute(cmd)
                cmd = "zapbirds -zap -zapfile %s -baryv %.6g %s"%\
                      (default_zaplist, job.baryv, fftnm)
                job.FFT_time += timed_execute(cmd)
                cmd = "rednoise %s" % fftnm
                job.FFT_time += timed_execute(cmd)
                try:
                    os.rename(basenm + "_red.fft", fftnm)
                except:
                    pass

                # Do the low-acceleration search
                cmd = "accelsearch -locpow -harmpolish -numharm %d -sigma %f -zmax %d -flo %f %s"%\
                      (lo_accel_numharm, lo_accel_sigma, lo_accel_zmax, lo_accel_flo, fftnm)
                job.lo_accelsearch_time += timed_execute(cmd)
                try:
                    os.remove(basenm + "_ACCEL_%d.txtcand" % lo_accel_zmax)
                except:
                    pass

                # Do the high-acceleration search
                cmd = "accelsearch -locpow -harmpolish -numharm %d -sigma %f -zmax %d -flo %f %s"%\
                      (hi_accel_numharm, hi_accel_sigma, hi_accel_zmax, hi_accel_flo, fftnm)
                job.hi_accelsearch_time += timed_execute(cmd)
                try:
                    os.remove(basenm + "_ACCEL_%d.txtcand" % hi_accel_zmax)
                except:
                    pass

                # Remove the .dat and .fft files
                try:
                    os.remove(datnm)
                    os.remove(fftnm)
                except:
                    pass

    # Make the single-pulse plots
    basedmb = job.basefilenm + "_DM"
    basedme = ".singlepulse "
    # The following will make plots for DM ranges:
    #    0-110, 100-310, 300-1000+
    dmglobs = [
        basedmb + "[0-9].[0-9][0-9]" + basedme + basedmb +
        "[0-9][0-9].[0-9][0-9]" + basedme + basedmb + "10[0-9].[0-9][0-9]" +
        basedme, basedmb + "[12][0-9][0-9].[0-9][0-9]" + basedme + basedmb +
        "30[0-9].[0-9][0-9]" + basedme,
        basedmb + "[3-9][0-9][0-9].[0-9][0-9]" + basedme + basedmb +
        "1[0-9][0-9][0-9].[0-9][0-9]" + basedme
    ]
    dmrangestrs = ["0-110", "100-310", "300-1000+"]
    psname = job.basefilenm + "_singlepulse.ps"
    for dmglob, dmrangestr in zip(dmglobs, dmrangestrs):
        cmd = 'single_pulse_search.py -t %f -g "%s"' % \
              (singlepulse_plot_SNR, dmglob)
        job.singlepulse_time += timed_execute(cmd)
        try:
            os.rename(psname,
                      job.basefilenm + "_DMs%s_singlepulse.ps" % dmrangestr)
        except:
            pass

    # Sift through the candidates to choose the best to fold

    job.sifting_time = time.time()

    lo_accel_cands = sifting.read_candidates(
        glob.glob("*ACCEL_%d" % lo_accel_zmax))
    if len(lo_accel_cands):
        lo_accel_cands = sifting.remove_duplicate_candidates(lo_accel_cands)
    if len(lo_accel_cands):
        lo_accel_cands = sifting.remove_DM_problems(lo_accel_cands,
                                                    numhits_to_fold, dmstrs,
                                                    low_DM_cutoff)

    hi_accel_cands = sifting.read_candidates(
        glob.glob("*ACCEL_%d" % hi_accel_zmax))
    if len(hi_accel_cands):
        hi_accel_cands = sifting.remove_duplicate_candidates(hi_accel_cands)
    if len(hi_accel_cands):
        hi_accel_cands = sifting.remove_DM_problems(hi_accel_cands,
                                                    numhits_to_fold, dmstrs,
                                                    low_DM_cutoff)

    all_accel_cands = lo_accel_cands + hi_accel_cands
    if len(all_accel_cands):
        all_accel_cands = sifting.remove_harmonics(all_accel_cands)
        # Note:  the candidates will be sorted in _sigma_ order, not _SNR_!
        all_accel_cands.sort(sifting.cmp_sigma)
        sifting.write_candlist(all_accel_cands, job.basefilenm + ".accelcands")

    try:
        cmd = "cp *.accelcands " + job.outputdir
        os.system(cmd)
    except:
        pass

    job.sifting_time = time.time() - job.sifting_time

    # Fold the best candidates

    cands_folded = 0
    for cand in all_accel_cands:
        if cands_folded == max_cands_to_fold:
            break
        if cand.sigma > to_prepfold_sigma:
            job.folding_time += timed_execute(
                get_folding_command(cand, job, ddplans))
            cands_folded += 1

    # Now step through the .ps files and convert them to .png and gzip them

    psfiles = glob.glob("*.ps")
    for psfile in psfiles:
        if "singlepulse" in psfile:
            # For some reason the singlepulse files don't transform nicely...
            epsfile = psfile.replace(".ps", ".eps")
            os.system("eps2eps " + psfile + " " + epsfile)
            os.system("pstoimg -density 100 -crop a " + epsfile)
            try:
                os.remove(epsfile)
            except:
                pass
        else:
            os.system("pstoimg -density 100 -flip cw " + psfile)
        os.system("gzip " + psfile)

    # NOTE:  need to add database commands

    # Tar up the results files

    tar_suffixes = [
        "_ACCEL_%d.tgz" % lo_accel_zmax,
        "_ACCEL_%d.tgz" % hi_accel_zmax,
        "_ACCEL_%d.cand.tgz" % lo_accel_zmax,
        "_ACCEL_%d.cand.tgz" % hi_accel_zmax, "_singlepulse.tgz", "_inf.tgz",
        "_pfd.tgz", "_bestprof.tgz"
    ]
    tar_globs = [
        "*_ACCEL_%d" % lo_accel_zmax,
        "*_ACCEL_%d" % hi_accel_zmax,
        "*_ACCEL_%d.cand" % lo_accel_zmax,
        "*_ACCEL_%d.cand" % hi_accel_zmax, "*.singlepulse", "*_DM[0-9]*.inf",
        "*.pfd", "*.pfd.bestprof"
    ]
    for (tar_suffix, tar_glob) in zip(tar_suffixes, tar_globs):
        tf = tarfile.open(job.basefilenm + tar_suffix, "w:gz")
        for infile in glob.glob(tar_glob):
            tf.add(infile)
            os.remove(infile)
        tf.close()

    # And finish up

    job.total_time = time.time() - job.total_time
    print "\nFinished"
    print "UTC time is:  %s" % (time.asctime(time.gmtime()))

    # Write the job report

    job.write_report(job.basefilenm + ".report")
    job.write_report(os.path.join(job.outputdir, job.basefilenm + ".report"))

    # Copy all the important stuff to the output directory
    try:
        cmd = "cp *rfifind.[bimors]* *.ps.gz *.tgz *.png " + job.outputdir
        os.system(cmd)
    except:
        pass
def main(filename, workdir):

    # Change to the specified working directory
    os.chdir(workdir)

    # Get information on the observation and the jbo
    job = obs_info(filename)
    if job.T < low_T_to_search:
        print "The observation is too short (%.2f s) to search."%job.T
        sys.exit()
    job.total_time = time.time()
    
    # Use whatever .zaplist is found in the current directory
    default_zaplist = glob.glob("*.zaplist")[0]

    # Create a directory to hold all the subbands
    if use_subbands:
        try:
            os.makedirs("subbands")
        except: pass
    
    print "\nBeginning NUPPI search of '%s'"%job.filename
    print "UTC time is:  %s"%(time.asctime(time.gmtime()))

    # rfifind the psrfits file
    cmd = "rfifind -zapchan %s -psrfits -timesig %.2f -freqsig %.2f -time %.17g -o %s %s > %s_rfifind.out"%\
          (bad_chans, rfifind_timesig, rfifind_freqsig, rfifind_chunk_time, job.basefilename,
           job.filename, job.basefilename)
    job.rfifind_time += timed_execute(cmd)
    # Find the fraction that was suggested to be masked
    # Note:  Should we stop processing if the fraction is
    #        above some large value?  Maybe 30%?
    job.masked_fraction = find_masked_fraction(job)
    
    # Iterate over the stages of the overall de-dispersion plan
    dmstrs = []
    for ddplan in ddplans:

        # Iterate over the individual passes through the .fil file
        for passnum in range(ddplan.numpasses):
            subbasenm = "%s_DM%s"%(job.basefilename, ddplan.subdmlist[passnum])

            if use_subbands:
                # Create a set of subbands
                cmd = "prepsubband -psrfits -sub -subdm %s -downsamp %d -nsub %d -mask %s -o subbands/%s %s > %s.subout"%\
                      (ddplan.subdmlist[passnum], ddplan.sub_downsamp,
                       ddplan.numsub, job.maskfilenm, job.basefilename,
                       job.filename, subbasenm)
                job.subbanding_time += timed_execute(cmd)
            
                # Now de-disperse using the subbands
                cmd = "prepsubband -psrfits -lodm %.2f -dmstep %.2f -numdms %d -downsamp %d -numout %d -o %s subbands/%s.sub[0-9]* > %s.prepout"%\
                      (ddplan.lodm+passnum*ddplan.sub_dmstep, ddplan.dmstep,
                       ddplan.dmsperpass, ddplan.dd_downsamp, job.N/ddplan.downsamp,
                       job.basefilename, subbasenm, subbasenm)
                job.dedispersing_time += timed_execute(cmd)

            else:  # Not using subbands
                cmd = "prepsubband -psrfits -downsamp %d -mask %s -lodm %.2f -dmstep %.2f -numdms %d -numout %d -o %s %s"%\
                      (ddplan.downsamp, job.maskfilenm, ddplan.lodm+passnum*ddplan.sub_dmstep, ddplan.dmstep,
                       ddplan.dmsperpass, job.N/ddplan.downsamp,
                       job.basefilename, filename)
                job.dedispersing_time += timed_execute(cmd)
            
            # Iterate over all the new DMs
            for dmstr in ddplan.dmlist[passnum]:
                dmstrs.append(dmstr)
                basenm = job.basefilename+"_DM"+dmstr
                datnm = basenm+".dat"
                infnm = basenm+".inf"

                # Do the single-pulse search
                cmd = "single_pulse_search.py -p -m %f -t %f %s"%\
                      (singlepulse_maxwidth, singlepulse_threshold, datnm)
		#if float(dmstr) < MAX_DM_2_ZAP_SP:
		#    cmd += " -z"
		if use_fast_SP:
		    cmd += " -f"
                job.singlepulse_time += timed_execute(cmd)

                
                # Do the low-acceleration search
                cmd = "accelsearch -harmpolish -zaplist %s -baryv %.6g -numharm %d -sigma %f -zmax %d -flo %f %s"%\
                      (default_zaplist, job.baryv, lo_accel_numharm, lo_accel_sigma, lo_accel_zmax, lo_accel_flo, datnm)
                job.lo_accelsearch_time += timed_execute(cmd)
                try:
                    os.remove(basenm+"_ACCEL_%d.txtcand"%lo_accel_zmax)
                except: pass
        
                # Remove the .dat and .fft files
                try:
                    os.remove(datnm)
                except: pass

    # Make the single-pulse plots
    basedmb = job.basefilename+"_DM"
    basedme = ".singlepulse "
    # The following will make plots for DM ranges:
    #    0-120, 90-340, 280-1000+
    dmglobs = [basedmb+"[0-9].[0-9][0-9]"+basedme +
               basedmb+"[0-9][0-9].[0-9][0-9]"+basedme +
               basedmb+"10[0-9].[0-9][0-9]"+basedme,
               basedmb+"[12][0-9][0-9].[0-9][0-9]"+basedme +
               basedmb+"30[0-9].[0-9][0-9]"+basedme,
               basedmb+"[3-9][0-9][0-9].[0-9][0-9]"+basedme +
               basedmb+"1[0-9][0-9][0-9].[0-9][0-9]"+basedme]
    dmrangestrs = ["0-120", "90-340", "280-1000+"]
    psname = job.basefilename+"_singlepulse.ps"
    for dmglob, dmrangestr in zip(dmglobs, dmrangestrs):
        cmd = 'single_pulse_search.py -t %f -g "%s"' % \
              (singlepulse_plot_SNR, dmglob)
        job.singlepulse_time += timed_execute(cmd)
        try:
            os.rename(psname,
                      job.basefilename+"_DMs%s_singlepulse.ps"%dmrangestr)
        except: pass

    # Sift through the candidates to choose the best to fold
    
    job.sifting_time = time.time()

    lo_accel_cands = sifting.read_candidates(glob.glob("*ACCEL_%d"%lo_accel_zmax))
    if len(lo_accel_cands):
        lo_accel_cands = sifting.remove_duplicate_candidates(lo_accel_cands)
    if len(lo_accel_cands):
        lo_accel_cands = sifting.remove_DM_problems(lo_accel_cands, numhits_to_fold,
                                                    dmstrs, low_DM_cutoff)


    all_accel_cands = lo_accel_cands 
    if len(all_accel_cands):
        all_accel_cands = sifting.remove_harmonics(all_accel_cands)
        # Note:  the candidates will be sorted in _sigma_ order, not _SNR_!
        all_accel_cands.sort(sifting.cmp_sigma)
        sifting.write_candlist(all_accel_cands, job.basefilename+".accelcands")

    job.sifting_time = time.time() - job.sifting_time

    # Fold the best candidates

    cands_folded = 0
    for cand in all_accel_cands:
        if cands_folded == max_cands_to_fold:
            break
        if cand.sigma > to_prepfold_sigma:
	    print get_folding_command(cand, job, ddplans)
            job.folding_time += timed_execute(get_folding_command(cand, job, ddplans))
            cands_folded += 1

    # Now step through the .ps files and convert them to .png and gzip them

    psfiles = glob.glob("*.ps")
    for psfile in psfiles:
        if "singlepulse" in psfile:
            # For some reason the singlepulse files don't transform nicely...
            epsfile = psfile.replace(".ps", ".eps")
            os.system("eps2eps "+psfile+" "+epsfile)
            os.system("pstoimg -density 100 -crop a "+epsfile)
            try:
                os.remove(epsfile)
            except: pass
        else:
            os.system("pstoimg -density 100 -flip cw "+psfile)
        os.system("gzip "+psfile)
    
    # And finish up

    job.total_time = time.time() - job.total_time
    print "\nFinished"
    print "UTC time is:  %s"%(time.asctime(time.gmtime()))

    # Write the job report

    job.write_report(job.basefilename+".report")
Esempio n. 13
0
def main_process_bon(bon_filenm_long, cpu_id, box):

    # 
    cpu_idx = int(cpu_id)

    c = bon_filenm_long.rindex("/")
    bon_filenm = bon_filenm_long[c+1:]
    print bon_filenm

    workdir="/data/BON/work/%s"%bon_filenm.rstrip(".fbk")

    # Make sure the temp directory exist
    try:
        os.makedirs(workdir)
    except: pass

    # Change to the specified working directory
    os.chdir(workdir)

    # Copy data
    cmd = "scp -p root@clairvaux:/data?/%s_*.fbk ."%(bon_filenm_long.rstrip(".fbk"))
    print cmd
    os.system(cmd)
   
    # Convert BON file to sigproc format - need .i file
    print "Join BON .fbk files\n"
    cmd = "taskset -c %d joinfbk %s_*.fbk"%(cpu_idx,bon_filenm.rstrip(".fbk"))
    print cmd
    os.system(cmd)

    fil_filenm = bon_filenm
    #fil_filenm = fil_filenm.rstrip(".fb")+".sig"

    # Get information on the observation and the jbo
    job = obs_info(fil_filenm, box)
    if job.T < low_T_to_search:
        print "The observation is too short (%.2f s) to search."%job.T
        sys.exit()

    ddplans = []
    if (job.dt<0.000048 and job.nchans==512):  # BON 32us and 0.25 Mhz or 512 channels over 128MHz band
        # The values here are:       lodm dmstep dms/call #calls #subbands downsamp
	ddplans.append(dedisp_plan(   0.0,   0.2,     210,     1,       512,       1))
        ddplans.append(dedisp_plan(  42.0,   0.2,     220,     1,       512,       1))
	ddplans.append(dedisp_plan(  86.0,   0.2,     221,     1,       512,       1))
	ddplans.append(dedisp_plan( 130.2,   0.3,     189,     1,       512,       1))
	ddplans.append(dedisp_plan( 186.9,   0.3,     190,     1,       512,       1))
	ddplans.append(dedisp_plan( 243.9,   0.5,     189,     1,       512,       2))
	ddplans.append(dedisp_plan( 338.4,   0.5,     189,     1,       512,       2))
	ddplans.append(dedisp_plan( 432.9,   1.0,     211,     1,       512,       4))
	ddplans.append(dedisp_plan( 643.9,   1.0,     210,     1,       512,       8))
	ddplans.append(dedisp_plan( 853.9,   3.0,     130,     1,       512,       8))

    elif (job.dt<0.000048 and job.nchans==256):  # BON 32us and 0.25 Mhz or 256 channels over 64MHz band
        # The values here are:       lodm dmstep dms/call #calls #subbands downsamp
	ddplans.append(dedisp_plan(   0.0,   0.2,     210,     1,       256,       1))
        ddplans.append(dedisp_plan(  42.0,   0.2,     220,     1,       256,       1))
	ddplans.append(dedisp_plan(  86.0,   0.2,     221,     1,       256,       1))
	ddplans.append(dedisp_plan( 130.2,   0.3,     189,     1,       256,       1))
	ddplans.append(dedisp_plan( 186.9,   0.3,     190,     1,       256,       1))
	ddplans.append(dedisp_plan( 243.9,   0.5,     189,     1,       256,       2))
	ddplans.append(dedisp_plan( 338.4,   0.5,     189,     1,       256,       2))
	ddplans.append(dedisp_plan( 432.9,   1.0,     211,     1,       256,       4))
	ddplans.append(dedisp_plan( 643.9,   1.0,     210,     1,       256,       8))
	ddplans.append(dedisp_plan( 853.9,   3.0,     130,     1,       256,       8))

    else: # faster option : for 64us and 1MHz or 128 channels
        # The values here are:       lodm dmstep dms/call #calls #subbands downsamp
        ddplans.append(dedisp_plan(   0.0,   0.3,     212,     1,       128,       1))
        ddplans.append(dedisp_plan(  63.7,   0.5,      75,     1,       128,       1))
        ddplans.append(dedisp_plan( 101.2,   1.0,      89,     1,       128,       2))
        ddplans.append(dedisp_plan( 190.2,   3.0,      81,     1,       128,       4))
        ddplans.append(dedisp_plan( 433.2,   5.0,     155,     1,       128,       8))
    
    job.total_time = time.time()
    
    # Use whatever .zaplist is found in the current directory
    #default_zaplist = glob.glob("*.zaplist")[0]
    default_zaplist = "/data/BON/BON.zaplist"

    # Make sure the output directory (and parent directories) exist
    try:
        os.makedirs(job.outputdir)
    except: pass

    # Create a directory to hold all the subbands
    if use_subbands:
        try:
            os.makedirs("subbands")
        except: pass
    
    print "\nBeginning BON search of '%s'"%job.fil_filenm
    print "UTC time is:  %s"%(time.asctime(time.gmtime()))

    # rfifind the filterbank file
    cmd = "taskset -c %d rfifind -filterbank -time %.17g -o %s %s > %s_rfifind.out"%\
          (cpu_idx, rfifind_chunk_time, job.basefilenm,
           job.fil_filenm, job.basefilenm)
    	   
    if (job.dt<0.000048 and job.nchans>=256 and job.fch1<1500.0):  
	   cmd = "taskset -c %d rfifind -filterbank -zapchan 106,107,108 -time %.17g -o %s %s > %s_rfifind.out"%\
	             (cpu_idx, rfifind_chunk_time, job.basefilenm,
		                job.fil_filenm, job.basefilenm)
    job.rfifind_time += timed_execute(cmd)
    maskfilenm = job.basefilenm + "_rfifind.mask"
    # Find the fraction that was suggested to be masked
    # Note:  Should we stop processing if the fraction is
    #        above some large value?  Maybe 30%?
    job.masked_fraction = find_masked_fraction(job)
    
    # Iterate over the stages of the overall de-dispersion plan
    dmstrs = []
    for ddplan in ddplans:

        # Iterate over the individual passes through the .fil file
        for passnum in range(ddplan.numpasses):
            subbasenm = "%s_DM%s"%(job.basefilenm, ddplan.subdmlist[passnum])

            if use_subbands:
                # Create a set of subbands
                cmd = "taskset -c %d prepsubband -filterbank -sub -subdm %s -downsamp %d -nsub %d -mask %s -o subbands/%s %s > %s.subout"%\
                      (cpu_idx, ddplan.subdmlist[passnum], ddplan.sub_downsamp,
                       ddplan.numsub, maskfilenm, job.basefilenm,
                       job.fil_filenm, subbasenm)
                job.subbanding_time += timed_execute(cmd)
            
                # Now de-disperse using the subbands
                cmd = "taskset -c %d prepsubband -lodm %.2f -dmstep %.2f -numdms %d -downsamp %d -numout %d -o %s subbands/%s.sub[0-9]* > %s.prepout"%\
                      (cpu_idx, ddplan.lodm+passnum*ddplan.sub_dmstep, ddplan.dmstep,
                       ddplan.dmsperpass, ddplan.dd_downsamp, job.N/ddplan.downsamp,
                       job.basefilenm, subbasenm, subbasenm)
                job.dedispersing_time += timed_execute(cmd)

            else:  # Not using subbands
                cmd = "taskset -c %d prepsubband -filterbank -mask %s -lodm %.2f -dmstep %.2f -numdms %d -numout %d -o %s %s -downsamp %d -nsub %d > /dev/null"%\
                      (cpu_idx, maskfilenm, ddplan.lodm+passnum*ddplan.sub_dmstep, ddplan.dmstep,
                       ddplan.dmsperpass, job.N/ddplan.downsamp,
                       job.basefilenm, fil_filenm, ddplan.downsamp, ddplan.numsub)
                job.dedispersing_time += timed_execute(cmd)
            
            # Iterate over all the new DMs
            for dmstr in ddplan.dmlist[passnum]:
                dmstrs.append(dmstr)
                basenm = job.basefilenm+"_DM"+dmstr
                datnm = basenm+".dat"
                fftnm = basenm+".fft"
                infnm = basenm+".inf"

                # Do the single-pulse search
                cmd = "taskset -c %d single_pulse_search.py -p -t %f %s > /dev/null"%\
                      (cpu_idx, singlepulse_threshold, datnm)
                #cmd = "taskset -c %d single_pulse_search.py -p -m %f -t %f %s > /dev/null"%\
                #      (cpu_idx, singlepulse_maxwidth, singlepulse_threshold, datnm)
                job.singlepulse_time += timed_execute(cmd)
                #try:
                #    shutil.copy(basenm+".singlepulse", job.outputdir)
                #except: pass

		job.nb_sp += count_sp(basenm+".singlepulse")

                # FFT, zap, and de-redden
                #cmd = "realfft %s"%datnm
                #job.FFT_time += timed_execute(cmd)
                #cmd = "zapbirds -zap -zapfile %s -baryv %.6g %s"%\
                #      (default_zaplist, job.baryv, fftnm)
                #job.FFT_time += timed_execute(cmd)
                #cmd = "rednoise %s"%fftnm
                #job.FFT_time += timed_execute(cmd)
                #try:
                #    os.rename(basenm+"_red.fft", fftnm)
                #except: pass
                
                # Do the low-acceleration search
	        cmd = "taskset -c %d accelsearch -numharm %d -sigma %f -zmax %d -flo %f %s -zaplist %s -baryv %.6g > /dev/null"%\
                      (cpu_idx, lo_accel_numharm, lo_accel_sigma, lo_accel_zmax, lo_accel_flo, datnm,default_zaplist, job.baryv)
                job.lo_accelsearch_time += timed_execute(cmd)
                
                try:
                    os.remove(basenm+"_ACCEL_%d.txtcand"%lo_accel_zmax)
                except: pass
                
                #try:  # This prevents errors if there are no cand files to copy
                #    shutil.copy(basenm+"_ACCEL_%d.cand"%lo_accel_zmax, job.outputdir)
                #    shutil.copy(basenm+"_ACCEL_%d"%lo_accel_zmax, job.outputdir)
                #except: pass
        
                # Do the high-acceleration search
                #cmd = "accelsearch -numharm %d -sigma %f -zmax %d -flo %f %s"%\
                #      (hi_accel_numharm, hi_accel_sigma, hi_accel_zmax, hi_accel_flo, fftnm)
                #job.hi_accelsearch_time += timed_execute(cmd)
                #try:
                #    os.remove(basenm+"_ACCEL_%d.txtcand"%hi_accel_zmax)
                #except: pass
                #try:  # This prevents errors if there are no cand files to copy
                #    shutil.copy(basenm+"_ACCEL_%d.cand"%hi_accel_zmax, job.outputdir)
                #    shutil.copy(basenm+"_ACCEL_%d"%hi_accel_zmax, job.outputdir)
                #except: pass

                # Remove the .dat and .fft files
                try:
                    os.remove(datnm)
                    os.remove(fftnm)
                except: pass

    # Make the single-pulse plot
    cmd = "taskset -c %d single_pulse_search.py -t %f %s/*.singlepulse"%(cpu_idx, singlepulse_plot_SNR,workdir)
    job.singlepulse_time += timed_execute(cmd)

    # Sift through the candidates to choose the best to fold
    
    job.sifting_time = time.time()

    lo_accel_cands = sifting.read_candidates(glob.glob("%s/*ACCEL_%d"%(workdir,lo_accel_zmax)))
    if len(lo_accel_cands):
        lo_accel_cands = sifting.remove_duplicate_candidates(lo_accel_cands)
    if len(lo_accel_cands):
        lo_accel_cands = sifting.remove_DM_problems(lo_accel_cands, numhits_to_fold,
                                                    dmstrs, low_DM_cutoff)
#
#    hi_accel_cands = sifting.read_candidates(glob.glob("*ACCEL_%d"%hi_accel_zmax))
#    if len(hi_accel_cands):
#        hi_accel_cands = sifting.remove_duplicate_candidates(hi_accel_cands)
#    if len(hi_accel_cands):
#        hi_accel_cands = sifting.remove_DM_problems(hi_accel_cands, numhits_to_fold,
#                                                    dmstrs, low_DM_cutoff)

    all_accel_cands = lo_accel_cands #+ hi_accel_cands
    if len(all_accel_cands):
        all_accel_cands = sifting.remove_harmonics(all_accel_cands)
        # Note:  the candidates will be sorted in _sigma_ order, not _SNR_!
        all_accel_cands.sort(sifting.cmp_sigma)
        sifting.write_candlist(all_accel_cands, job.basefilenm+".accelcands")

    try:
        cmd = "cp %s/*.accelcands "%workdir +job.outputdir
        os.system(cmd)
    except: pass

    job.sifting_time = time.time() - job.sifting_time

    # Fold the best candidates

    cands_folded = 0
    for cand in all_accel_cands:
        if cands_folded == max_cands_to_fold:
            break
        if cand.sigma > to_prepfold_sigma:
            job.folding_time += timed_execute("taskset -c %d "%cpu_idx + get_folding_command(cand, job, ddplans, maskfilenm))
            cands_folded += 1

    # Now step through the .ps files and convert them to .png and gzip them

    psfiles = glob.glob("%s/*.ps"%(workdir))
    for psfile in psfiles:
        if "singlepulse" in psfile:
            # For some reason the singlepulse files don't transform nicely...
   #         epsfile = psfile.replace(".ps", ".eps")
   #         os.system("eps2eps "+psfile+" "+epsfile)
            os.system("taskset -c %d pstoimg -density 300 -flip cw -scale 0.9 -geometry 640x480 "%cpu_idx +psfile)
   #         try:
   #             os.remove(epsfile)
   #         except: pass
        else:
            os.system("taskset -c %d pstoimg -density 100 -flip cw -scale 0.9 -geometry 640x480 "%cpu_idx +psfile)
         #os.system("gzip "+psfile)
    
    # Compress Single-pulse data
    cmd = "tar cfj %s_sp.tar.bz2 *.singlepulse *_singlepulse.ps *.inf "%job.basefilenm
    os.system(cmd)

    # Compress Cands data
    cmd = "tar cfj %s_accel.tar.bz2 *ACCEL_0"%job.basefilenm
    os.system(cmd)
    cmd = "tar cfj %s_accel_bin.tar.bz2 *ACCEL_0.cand"%job.basefilenm
    os.system(cmd)

    # Compress RFI data
    cmd = "tar cfj %s_rfifind.tar.bz2 %s_rfifind*"%(job.basefilenm,job.basefilenm)
    os.system(cmd)

    # Compress Candidates PostScript 
    cmd = "tar cfj %s_pfd_ps.tar.bz2 *pfd.ps"%job.basefilenm
    os.system(cmd)

    # NOTE:  need to add database commands

    # And finish up

    job.total_time = time.time() - job.total_time
    print "\nFinished"
    print "UTC time is:  %s"%(time.asctime(time.gmtime()))

    # Write the job report

    job.write_report(job.basefilenm+".report")
    job.write_report(os.path.join(job.outputdir, job.basefilenm+".report"))

    # Copy all the important stuff to the output directory
    try:
        cmd = "cp *.png *.bz2 "+job.outputdir # Not enough space to save .pfd file
        #cmd = "cp *rfifind.[bimors]* *.pfd *.ps.gz *.png "+job.outputdir
        os.system(cmd)
    except: pass
    
    # Send results to clairvaux
    cmd = "scp -rp %s root@clairvaux:/data6/results/%s/."%(job.outputdir,box)
    print cmd
    os.system(cmd)
    
    # Remove all temporary stuff
    cmd = "cd; rm -rf %s"%workdir
    #os.system(cmd)
    
    # Return nb of cands found
    return len(all_accel_cands)
Esempio n. 14
0
def main(fits_filenm, workdir, jobid, zaplist, ddplans):
    # Change to the specified working directory
    os.chdir(workdir)
    # Set up theano compile dir (UBC_AI rating uses theano)
    theano_compiledir = os.path.join(workdir, "theano_compile")
    os.mkdir(theano_compiledir)
    os.putenv("THEANO_FLAGS", "compiledir=%s"%theano_compiledir)

    # Get information on the observation and the job
    job = obs_info(fits_filenm)
    if job.raw_T < low_T_to_search:
        print "The observation is too short (%.2f s) to search."%job.raw_T
        sys.exit(1)
    job.total_time = time.time()
    if job.dt == 163.84:
      ddplans = ddplans[str(job.nchans)+"slow"]
    else:
      ddplans = ddplans[str(job.nchans)+"fast"]
    
    # Use the specified zaplist if provided.  Otherwise use whatever is in
    # this directory
    if zaplist is None:
        zaplist = glob.glob("*.zaplist")[0]
    
    # Creat a checkpoint file
    if jobid is not None:
        checkpoint = os.path.join(checkpointdir,
                                  job.basefilenm+"."+jobid+".checkpoint")
    else:
        checkpoint = os.path.join(checkpointdir,
                                  job.basefilenm+"."+".checkpoint")
    
    # Make sure the output directory (and parent directories) exist
    try:
        os.makedirs(job.outputdir)
        os.chmod(job.outputdir, stat.S_IRWXU | stat.S_IRWXG | S_IROTH | S_IXOTH)
    except: pass

    # Make sure the tmp directory (in a tmpfs mount) exists
    if job is not None: tmpdir = os.path.join(basetmpdir, job.basefilenm,
                                              jobid, "tmp")
    else: tmpdir = os.path.join(basetmpdir, job.basefilenm, "tmp")
    try:
        os.makedirs(tmpdir)
    except: pass

    print "\nBeginning GBNCC search of '%s'"%job.fits_filenm
    print "UTC time is:  %s"%(time.asctime(time.gmtime()))

    rfifindout=job.basefilenm+"_rfifind.out"
    rfifindmask=job.basefilenm+"_rfifind.mask"

    if not os.path.exists(rfifindout) or not os.path.exists(rfifindmask):
  
        # rfifind the filterbank file
        cmd = "rfifind -zapchan 2456:3277 -time %.17g -o %s %s > %s_rfifind.out"%\
              (rfifind_chunk_time, job.basefilenm,
               job.fits_filenm, job.basefilenm)
        job.rfifind_time += timed_execute(cmd)
    maskfilenm = job.basefilenm + "_rfifind.mask"
    ### COPYING HERE TO AID IN DEBUGGING ###
    subprocess.call("cp *rfifind.[bimors]* %s"%job.outputdir, shell=True)
    # Find the fraction that was suggested to be masked
    # Note:  Should we stop processing if the fraction is
    #        above some large value?  Maybe 30%?
    job.masked_fraction = find_masked_fraction(job)
    
    # Iterate over the stages of the overall de-dispersion plan
    try:
        with open(checkpoint, "r") as f:
            nodenm  = f.readline().strip()
            queueid = f.readline().strip()
            prevddplan,prevpassnum = map(int, f.readline().strip().split())
    
    except IOError:
        nodenm      = "localhost"
        queueid     = "None"
        prevddplan  = 0
        prevpassnum = 0
        with open(checkpoint, "w") as f: 
            f.write("%s\n"%nodenm)
            f.write("%s\n"%queueid)
            f.write("%d %d\n"%(prevddplan,prevpassnum))
    
    for ddplan in ddplans[prevddplan:]:

        # Make a downsampled filterbank file
        if ddplan.downsamp > 1:
            cmd = "psrfits_subband -dstime %d -nsub %d -o %s_DS%d %s 2>> psrfits_subband.err"%(ddplan.downsamp, job.nchans, job.dsbasefilenm, ddplan.downsamp, job.dsbasefilenm )
            job.downsample_time += timed_execute(cmd)
            fits_filenm = job.dsbasefilenm + "_DS%d%s"%\
                          (ddplan.downsamp,job.fits_filenm[job.fits_filenm.rfind("_"):])
        else:
            fits_filenm = job.fits_filenm
        # Iterate over the individual passes through the .fil file
        for passnum in range(prevpassnum, ddplan.numpasses):
            subbasenm = "%s_DM%s"%(job.basefilenm, ddplan.subdmlist[passnum])

            # Now de-disperse 
            cmd = "prepsubband -ignorechan 2456:3277 -mask %s -lodm %.2f -dmstep %.2f -nsub %d -numdms %d -numout %d -o %s/%s %s"%\
                  (maskfilenm, ddplan.lodm+passnum*ddplan.sub_dmstep, ddplan.dmstep, ddplan.numsub,
                   ddplan.dmsperpass, job.N/ddplan.downsamp,
                   tmpdir, job.basefilenm, fits_filenm)
            job.dedispersing_time += timed_execute(cmd)
            
            # Do the single-pulse search
            cmd = "single_pulse_search.py -p -m %f -t %f %s/*.dat"%\
                (singlepulse_maxwidth, singlepulse_threshold, tmpdir)
            job.singlepulse_time += timed_execute(cmd)
            spfiles = glob.glob("%s/*.singlepulse"%tmpdir)
            for spfile in spfiles:
                try:
                    shutil.move(spfile, workdir)
                except: pass

            # Iterate over all the new DMs
            for dmstr in ddplan.dmlist[passnum]:
                basenm = os.path.join(tmpdir, job.basefilenm+"_DM"+dmstr)
                datnm = basenm+".dat"
                fftnm = basenm+".fft"
                infnm = basenm+".inf"

                # FFT, zap, and de-redden
                cmd = "realfft %s"%datnm
                job.FFT_time += timed_execute(cmd)
                cmd = "zapbirds -zap -zapfile %s -baryv %.6g %s"%\
                      (zaplist, job.baryv, fftnm)
                job.FFT_time += timed_execute(cmd)
                cmd = "rednoise %s"%fftnm
                job.FFT_time += timed_execute(cmd)
                try:
                    os.rename(basenm+"_red.fft", fftnm)
                except: pass
                
                # Do the low-acceleration search
                cmd = "accelsearch -inmem -numharm %d -sigma %f -zmax %d -flo %f %s"%\
                      (lo_accel_numharm, lo_accel_sigma, lo_accel_zmax, lo_accel_flo, fftnm)
                job.lo_accelsearch_time += timed_execute(cmd)
                try:
                    os.remove(basenm+"_ACCEL_%d.txtcand"%lo_accel_zmax)
                except: pass
                try:  # This prevents errors if there are no cand files to copy
                    shutil.move(basenm+"_ACCEL_%d.cand"%lo_accel_zmax, workdir)
                    shutil.move(basenm+"_ACCEL_%d"%lo_accel_zmax, workdir)
                except: pass
        
                # Do the high-acceleration search
                cmd = "accelsearch -inmem -numharm %d -sigma %f -zmax %d -flo %f %s"%\
                      (hi_accel_numharm, hi_accel_sigma, hi_accel_zmax, hi_accel_flo, fftnm)
                job.hi_accelsearch_time += timed_execute(cmd)
                try:
                    os.remove(basenm+"_ACCEL_%d.txtcand"%hi_accel_zmax)
                except: pass
                try:  # This prevents errors if there are no cand files to copy
                    shutil.move(basenm+"_ACCEL_%d.cand"%hi_accel_zmax, workdir)
                    shutil.move(basenm+"_ACCEL_%d"%hi_accel_zmax, workdir)
                except: pass

                # Do the FFA search
                if (not float(dmstr)%0.5) and (float(dmstr) <= 1500.0):
                    cmd = "ffa.py %s"%datnm
                    job.ffa_time += timed_execute(cmd)
                    try:  # This prevents errors if there are no cand files
                        shutil.move(basenm+"_cands.ffa",workdir)
                    except:
                        pass
                else:
                    print "Skipping FFA search for DM=%s pc/cc"%dmstr

                # Move the .inf files
                try:
                    shutil.move(infnm, workdir)
                except: pass
                # Remove the .dat and .fft files
                try:
                    os.remove(datnm)
                except: pass
                try:
                    os.remove(fftnm)
                except: pass

            prevpassnum += 1
            with open(checkpoint, "w") as f:
                f.write("%s\n"%nodenm)
                f.write("%s\n"%queueid)
                f.write("%d %d\n"%(prevddplan,prevpassnum))
        
        prevddplan += 1
        prevpassnum = 0
        with open(checkpoint, "w") as f:
            f.write("%s\n"%nodenm)
            f.write("%s\n"%queueid)
            f.write("%d %d\n"%(prevddplan,prevpassnum))
    
    # Make the single-pulse plots
    basedmb = job.basefilenm+"_DM"
    basedme = ".singlepulse "
    # The following will make plots for DM ranges:
    #    0-30, 20-110, 100-310, 300-1000+
    ### MAKE SURE THAT single_pulse_search.py ALWAYS OUTPUTS A .singlepulse
    ### FILE ###
    dmglobs = [basedmb+"[0-9].[0-9][0-9]"+basedme +
               basedmb+"[012][0-9].[0-9][0-9]"+basedme,
               basedmb+"[2-9][0-9].[0-9][0-9]"+basedme +
               basedmb+"10[0-9].[0-9][0-9]"+basedme,
               basedmb+"[12][0-9][0-9].[0-9][0-9]"+basedme +
               basedmb+"30[0-9].[0-9][0-9]"+basedme,
               basedmb+"[3-9][0-9][0-9].[0-9][0-9]"+basedme +
               basedmb+"10[0-9][0-9].[0-9][0-9]"+basedme,
               basedmb+"1[0-9][0-9][0-9].[0-9][0-9]"+basedme +
               basedmb+"20[0-9][0-9].[0-9][0-9]"+basedme,
               basedmb+"2[0-9][0-9][0-9].[0-9][0-9]"+basedme +
               basedmb+"3[0-9][0-9][0-9].[0-9][0-9]"+basedme,
               ]
    dmrangestrs = ["0-30", "20-110", "100-310", "300-1100", "1000-2100", 
                   "2000-3000+"]
    psname = job.basefilenm+"_singlepulse.ps"
    for dmglob, dmrangestr in zip(dmglobs, dmrangestrs):
        cmd = 'single_pulse_search.py -t %f -g "%s"' % \
              (singlepulse_plot_SNR, dmglob)
        job.singlepulse_time += timed_execute(cmd)
        try:
            os.rename(psname,
                      job.basefilenm+"_DMs%s_singlepulse.ps"%dmrangestr)
        except: pass
    
    # Chen Karako-Argaman's single pulse rating algorithm and Chitrang Patel's single pulse waterfaller code
    mem = int(subprocess.check_output("du -chm *singlepulse | tail -n 1 | cut -d't' -f 1", stderr=subprocess.STDOUT, shell=True))
    if mem < 600: 
        path = os.path.dirname(os.path.abspath(__file__))
        path = path[:-3] + 'lib/python/singlepulse/'  
        cmd = 'python ' + path + 'rrattrap.py --inffile %s*rfifind.inf --use-configfile --use-DMplan --vary-group-size %s*.singlepulse'%(job.basefilenm, job.basefilenm)
        job.singlepulse_time += timed_execute(cmd) 
        GBNCC_wrapper_make_spd.GBNCC_wrapper('groups.txt', maskfilenm, job.fits_filenm, workdir)
    else:
        spoutfile = open('groups.txt', 'w')
        spoutfile.write('# Beam skipped because of high RFI\n.')
        spoutfile.close()

    # Sift through the candidates to choose the best to fold
    
    job.sifting_time = time.time()
    
    # Make the dmstrs list here since it may not be properly filled in the main
    # loop if we pick up from a checkpoint
    dmstrs = []
    for ddplan in ddplans:
        for passnum in range(ddplan.numpasses):
            for dmstr in ddplan.dmlist[passnum]:
                dmstrs.append(dmstr)

    lo_accel_cands = sifting.read_candidates(glob.glob("*ACCEL_%d"%lo_accel_zmax))
    if len(lo_accel_cands):
        lo_accel_cands = sifting.remove_duplicate_candidates(lo_accel_cands)
    if len(lo_accel_cands):
        lo_accel_cands = sifting.remove_DM_problems(lo_accel_cands, numhits_to_fold,
                                                    dmstrs, low_DM_cutoff)
        
    hi_accel_cands = sifting.read_candidates(glob.glob("*ACCEL_%d"%hi_accel_zmax))
    if len(hi_accel_cands):
        hi_accel_cands = sifting.remove_duplicate_candidates(hi_accel_cands)
    if len(hi_accel_cands):
        hi_accel_cands = sifting.remove_DM_problems(hi_accel_cands, numhits_to_fold,
                                                    dmstrs, low_DM_cutoff)

    if len(lo_accel_cands) and len(hi_accel_cands.cands):
        lo_accel_cands, hi_accel_cands = remove_crosslist_duplicate_candidates(lo_accel_cands, hi_accel_cands)

    if len(lo_accel_cands):
        lo_accel_cands.sort(sifting.cmp_sigma)
        sifting.write_candlist(lo_accel_cands,
                               job.basefilenm+".accelcands_Z%d"%lo_accel_zmax)
    if len(hi_accel_cands):
        hi_accel_cands.sort(sifting.cmp_sigma)
        sifting.write_candlist(hi_accel_cands,
                               job.basefilenm+".accelcands_Z%d"%hi_accel_zmax)

    job.sifting_time = time.time() - job.sifting_time

    # FFA sifting
    job.ffa_sifting_time = time.time()
    ffa_cands = sift_ffa(job,zaplist)
    job.ffa_sifting_time = time.time() - job.ffa_sifting_time

    # Fold the best candidates

    cands_folded = 0
    for cand in lo_accel_cands.cands:
        if cands_folded == max_lo_cands_to_fold:
            break
        elif cand.sigma > to_prepfold_sigma:
            job.folding_time += timed_execute(get_folding_command(cand, job, ddplans, maskfilenm))
            cands_folded += 1
    cands_folded = 0
    for cand in hi_accel_cands.cands:
        if cands_folded == max_hi_cands_to_fold:
            break
        elif cand.sigma > to_prepfold_sigma:
            job.folding_time += timed_execute(get_folding_command(cand, job, ddplans, maskfilenm))
            cands_folded += 1

    cands_folded = 0
    for cand in ffa_cands.cands:
        if cands_folded == max_ffa_cands_to_fold:
            break
        elif cand.snr > to_prepfold_ffa_snr:
            job.folding_time += timed_execute(get_ffa_folding_command(cand,job,ddplans,maskfilenm))
            cands_folded += 1
            
    # Rate the candidates
    pfdfiles = glob.glob("*.pfd")
    for pfdfile in pfdfiles:
        status = ratings.rate_candidate(pfdfile)
        if status == 1:
            sys.stdout.write("\nWarining: Ratings failed for %s\n"%pfdfile)
            sys.stdout.flush()
    # Rate the single pulse candidates
    cmd = 'rate_spds.py --redirect-warnings --include-all *.spd'
    subprocess.call(cmd, shell=True)

    # Now step through the .ps files and convert them to .png and gzip them
    psfiles = glob.glob("*.ps")
    for psfile in psfiles:
        if "singlepulse" in psfile:
            pngfile = psfile.replace(".ps", ".png")
            subprocess.call(["convert", psfile, pngfile])
        elif "grouped" in psfile:
            pngfile = psfile.replace(".ps", ".png")
            subprocess.call(["convert", psfile, pngfile])    
        elif "spd" in psfile:
            pngfile = psfile.replace(".ps", ".png")
            subprocess.call(["convert", psfile, pngfile])           
        else:
            pngfile = psfile.replace(".ps", ".png")
            subprocess.call(["convert", "-rotate", "90", psfile, pngfile])
        os.remove(psfile)
    
    # Tar up the results files 
    tar_suffixes = ["_ACCEL_%d.tgz"%lo_accel_zmax,
                    "_ACCEL_%d.tgz"%hi_accel_zmax,
                    "_ACCEL_%d.cand.tgz"%lo_accel_zmax,
                    "_ACCEL_%d.cand.tgz"%hi_accel_zmax,
                    "_singlepulse.tgz",
                    "_inf.tgz",
                    "_pfd.tgz",
                    "_bestprof.tgz",
                    "_spd.tgz"]
    tar_globs = ["*_ACCEL_%d"%lo_accel_zmax,
                 "*_ACCEL_%d"%hi_accel_zmax,
                 "*_ACCEL_%d.cand"%lo_accel_zmax,
                 "*_ACCEL_%d.cand"%hi_accel_zmax,
                 "*.singlepulse",
                 "*_DM[0-9]*.inf",
                 "*.pfd",
                 "*.bestprof",
                 "*.spd"]
    for (tar_suffix, tar_glob) in zip(tar_suffixes, tar_globs):
        tf = tarfile.open(job.basefilenm+tar_suffix, "w:gz")
        for infile in glob.glob(tar_glob):
            tf.add(infile)
            os.remove(infile)
        tf.close()
            
    # Remove all the downsampled .fil files
    filfiles = glob.glob("*_DS?.fil") + glob.glob("*_DS??.fil")
    for filfile in filfiles:
        os.remove(filfile)

    #Remove all subbanded fits files 
    subfiles = glob.glob("*subband*.fits")
    for subfile in subfiles:
        os.remove(subfile)

    # And finish up
    job.total_time = time.time() - job.total_time
    print "\nFinished"
    print "UTC time is:  %s"%(time.asctime(time.gmtime()))

    # Write the job report
    job.write_report(job.basefilenm+".report")
    job.write_report(os.path.join(job.outputdir, job.basefilenm+".report"))
    
    # Write the diagnostics report
    diagnostics.write_diagnostics(job.basefilenm)

    # Move all the important stuff to the output directory
    subprocess.call("mv *rfifind.[bimors]*  *.accelcands* *.ffacands *.tgz *.png *.ratings *.diagnostics groups.txt *spd.rat *.report *.summary %s"%job.outputdir, shell=True)
    
    # Make a file indicating that this beam needs to be viewed
    open("%s/tobeviewed"%job.outputdir, "w").close()
                    
    # Remove the checkpointing file
    try:
        os.remove(checkpoint)
    except: pass
    
    # Remove the tmp directory (in a tmpfs mount)
    try:
        shutil.rmtree(tmpdir)
    except: pass
Esempio n. 15
0
#!/usr/bin/env python
import sifting, sys

if len(sys.argv) < 2:
    sys.stderr.write(
        "\nusage:  quick_prune_cands.py ACCEL_file_name [sigma]\n\n")
    sys.exit()

if len(sys.argv) == 3:
    sifting.sigma_threshold = float(sys.argv[2])

cands = sifting.read_candidates([sys.argv[1]], track=True)
cands.print_cand_summary()
cands.to_file()
Esempio n. 16
0
def main(fil_filenm, workdir):

    # Change to the specified working directory
    os.chdir(workdir)

    # Get information on the observation and the jbo
    job = obs_info(fil_filenm)
    if job.T < low_T_to_search:
        print "The observation is too short (%.2f s) to search."%job.T
        sys.exit()
    job.total_time = time.time()
    
    # Use whatever .zaplist is found in the current directory
    default_zaplist = glob.glob("*.zaplist")[0]

    # Make sure the output directory (and parent directories) exist
    try:
        os.makedirs(job.outputdir)
    except: pass

    # Create a directory to hold all the subbands
    if use_subbands:
        try:
            os.makedirs("subbands")
        except: pass
    
    print "\nBeginning PALFA search of '%s'"%job.fil_filenm
    print "UTC time is:  %s"%(time.asctime(time.gmtime()))

    # rfifind the filterbank file
    cmd = "rfifind -time %.17g -o %s %s > %s_rfifind.out"%\
          (rfifind_chunk_time, job.basefilenm,
           job.fil_filenm, job.basefilenm)
    job.rfifind_time += timed_execute(cmd)
    maskfilenm = job.basefilenm + "_rfifind.mask"
    # Find the fraction that was suggested to be masked
    # Note:  Should we stop processing if the fraction is
    #        above some large value?  Maybe 30%?
    job.masked_fraction = find_masked_fraction(job)
    
    # Iterate over the stages of the overall de-dispersion plan
    dmstrs = []
    for ddplan in ddplans:

        # Make a downsampled filterbank file if we are not using subbands
        if not use_subbands:
            if ddplan.downsamp > 1:
                cmd = "downsample_filterbank.py %d %s"%(ddplan.downsamp, job.fil_filenm)
                job.downsample_time += timed_execute(cmd)
                fil_filenm = job.fil_filenm[:job.fil_filenm.find(".fil")] + \
                             "_DS%d.fil"%ddplan.downsamp
            else:
                fil_filenm = job.fil_filenm

        # Iterate over the individual passes through the .fil file
        for passnum in range(ddplan.numpasses):
            subbasenm = "%s_DM%s"%(job.basefilenm, ddplan.subdmlist[passnum])

            if use_subbands:
                # Create a set of subbands
                cmd = "prepsubband -sub -subdm %s -downsamp %d -nsub %d -mask %s -o subbands/%s %s > %s.subout"%\
                      (ddplan.subdmlist[passnum], ddplan.sub_downsamp,
                       ddplan.numsub, maskfilenm, job.basefilenm,
                       job.fil_filenm, subbasenm)
                job.subbanding_time += timed_execute(cmd)
            
                # Now de-disperse using the subbands
                cmd = "prepsubband -lodm %.2f -dmstep %.2f -numdms %d -downsamp %d -numout %d -o %s subbands/%s.sub[0-9]* > %s.prepout"%\
                      (ddplan.lodm+passnum*ddplan.sub_dmstep, ddplan.dmstep,
                       ddplan.dmsperpass, ddplan.dd_downsamp, job.N/ddplan.downsamp,
                       job.basefilenm, subbasenm, subbasenm)
                job.dedispersing_time += timed_execute(cmd)

            else:  # Not using subbands
                cmd = "prepsubband -mask %s -lodm %.2f -dmstep %.2f -numdms %d -numout %d -o %s %s"%\
                      (maskfilenm, ddplan.lodm+passnum*ddplan.sub_dmstep, ddplan.dmstep,
                       ddplan.dmsperpass, job.N/ddplan.downsamp,
                       job.basefilenm, fil_filenm)
                job.dedispersing_time += timed_execute(cmd)
            
            # Iterate over all the new DMs
            for dmstr in ddplan.dmlist[passnum]:
                dmstrs.append(dmstr)
                basenm = job.basefilenm+"_DM"+dmstr
                datnm = basenm+".dat"
                fftnm = basenm+".fft"
                infnm = basenm+".inf"

                # Do the single-pulse search
                cmd = "single_pulse_search.py -p -m %f -t %f %s"%\
                      (singlepulse_maxwidth, singlepulse_threshold, datnm)
                job.singlepulse_time += timed_execute(cmd)
                try:
                    shutil.copy(basenm+".singlepulse", job.outputdir)
                except: pass

                # FFT, zap, and de-redden
                cmd = "realfft %s"%datnm
                job.FFT_time += timed_execute(cmd)
                cmd = "zapbirds -zap -zapfile %s -baryv %.6g %s"%\
                      (default_zaplist, job.baryv, fftnm)
                job.FFT_time += timed_execute(cmd)
                cmd = "rednoise %s"%fftnm
                job.FFT_time += timed_execute(cmd)
                try:
                    os.rename(basenm+"_red.fft", fftnm)
                except: pass
                
                # Do the low-acceleration search
                cmd = "accelsearch -numharm %d -sigma %f -zmax %d -flo %f %s"%\
                      (lo_accel_numharm, lo_accel_sigma, lo_accel_zmax, lo_accel_flo, fftnm)
                job.lo_accelsearch_time += timed_execute(cmd)
                try:
                    os.remove(basenm+"_ACCEL_%d.txtcand"%lo_accel_zmax)
                except: pass
                try:  # This prevents errors if there are no cand files to copy
                    shutil.copy(basenm+"_ACCEL_%d.cand"%lo_accel_zmax, job.outputdir)
                    shutil.copy(basenm+"_ACCEL_%d"%lo_accel_zmax, job.outputdir)
                except: pass
        
                # Do the high-acceleration search
                cmd = "accelsearch -numharm %d -sigma %f -zmax %d -flo %f %s"%\
                      (hi_accel_numharm, hi_accel_sigma, hi_accel_zmax, hi_accel_flo, fftnm)
                job.hi_accelsearch_time += timed_execute(cmd)
                try:
                    os.remove(basenm+"_ACCEL_%d.txtcand"%hi_accel_zmax)
                except: pass
                try:  # This prevents errors if there are no cand files to copy
                    shutil.copy(basenm+"_ACCEL_%d.cand"%hi_accel_zmax, job.outputdir)
                    shutil.copy(basenm+"_ACCEL_%d"%hi_accel_zmax, job.outputdir)
                except: pass

                # Remove the .dat and .fft files
                try:
                    os.remove(datnm)
                    os.remove(fftnm)
                except: pass

    # Make the single-pulse plot
    cmd = "single_pulse_search.py -t %f *.singlepulse"%singlepulse_plot_SNR
    job.singlepulse_time += timed_execute(cmd)

    # Sift through the candidates to choose the best to fold
    
    job.sifting_time = time.time()

    lo_accel_cands = sifting.read_candidates(glob.glob("*ACCEL_%d"%lo_accel_zmax))
    if len(lo_accel_cands):
        lo_accel_cands = sifting.remove_duplicate_candidates(lo_accel_cands)
    if len(lo_accel_cands):
        lo_accel_cands = sifting.remove_DM_problems(lo_accel_cands, numhits_to_fold,
                                                    dmstrs, low_DM_cutoff)

    hi_accel_cands = sifting.read_candidates(glob.glob("*ACCEL_%d"%hi_accel_zmax))
    if len(hi_accel_cands):
        hi_accel_cands = sifting.remove_duplicate_candidates(hi_accel_cands)
    if len(hi_accel_cands):
        hi_accel_cands = sifting.remove_DM_problems(hi_accel_cands, numhits_to_fold,
                                                    dmstrs, low_DM_cutoff)

    all_accel_cands = lo_accel_cands + hi_accel_cands
    if len(all_accel_cands):
        all_accel_cands = sifting.remove_harmonics(all_accel_cands)
        # Note:  the candidates will be sorted in _sigma_ order, not _SNR_!
        all_accel_cands.sort(sifting.cmp_sigma)
        sifting.write_candlist(all_accel_cands, job.basefilenm+".accelcands")

    try:
        cmd = "cp *.accelcands "+job.outputdir
        os.system(cmd)
    except: pass

    job.sifting_time = time.time() - job.sifting_time

    # Fold the best candidates

    cands_folded = 0
    for cand in all_accel_cands:
        if cands_folded == max_cands_to_fold:
            break
        if cand.sigma > to_prepfold_sigma:
            job.folding_time += timed_execute(get_folding_command(cand, job, ddplans))
            cands_folded += 1

    # Now step through the .ps files and convert them to .png and gzip them

    psfiles = glob.glob("*.ps")
    for psfile in psfiles:
        if "singlepulse" in psfile:
            # For some reason the singlepulse files don't transform nicely...
            epsfile = psfile.replace(".ps", ".eps")
            os.system("eps2eps "+psfile+" "+epsfile)
            os.system("pstoimg -density 100 -crop a "+epsfile)
            try:
                os.remove(epsfile)
            except: pass
        else:
            os.system("pstoimg -density 100 -flip cw "+psfile)
        os.system("gzip "+psfile)
    
    # NOTE:  need to add database commands

    # And finish up

    job.total_time = time.time() - job.total_time
    print "\nFinished"
    print "UTC time is:  %s"%(time.asctime(time.gmtime()))

    # Write the job report

    job.write_report(job.basefilenm+".report")
    job.write_report(os.path.join(job.outputdir, job.basefilenm+".report"))

    # Copy all the important stuff to the output directory
    try:
        cmd = "cp *rfifind.[bimors]* *.pfd *.ps.gz *.png "+job.outputdir
        os.system(cmd)
    except: pass
Esempio n. 17
0
def main(fil_filenm, workdir, ddplans):

    # Change to the specified working directory
    os.chdir(workdir)

    # Get information on the observation and the job
    job = obs_info(fil_filenm)
    if job.raw_T < low_T_to_search:
        print "The observation is too short (%.2f s) to search." % job.raw_T
        sys.exit()
    job.total_time = time.time()
    ddplans = ddplans[job.nchans]

    # Use whatever .zaplist is found in the current directory
    default_zaplist = glob.glob("*.zaplist")[0]

    # Make sure the output directory (and parent directories) exist
    try:
        os.makedirs(job.outputdir)
        os.chmod(job.outputdir, stat.S_IRWXU | stat.S_IRWXG | S_IROTH | S_IXOTH)
    except:
        pass

    # Make sure the tmp directory (in a tmpfs mount) exists
    tmpdir = os.path.join(base_tmp_dir, job.basefilenm)
    try:
        os.makedirs(tmpdir)
    except:
        pass

    print "\nBeginning GBT350 driftscan search of '%s'" % job.fil_filenm
    print "UTC time is:  %s" % (time.asctime(time.gmtime()))

    # rfifind the filterbank file
    cmd = "rfifind -time %.17g -o %s %s > %s_rfifind.out" % (
        rfifind_chunk_time,
        job.basefilenm,
        job.fil_filenm,
        job.basefilenm,
    )
    job.rfifind_time += timed_execute(cmd)
    maskfilenm = job.basefilenm + "_rfifind.mask"
    # Find the fraction that was suggested to be masked
    # Note:  Should we stop processing if the fraction is
    #        above some large value?  Maybe 30%?
    job.masked_fraction = find_masked_fraction(job)

    # Iterate over the stages of the overall de-dispersion plan
    dmstrs = []
    for ddplan in ddplans:

        # Make a downsampled filterbank file
        if ddplan.downsamp > 1:
            cmd = "downsample_filterbank.py %d %s" % (ddplan.downsamp, job.fil_filenm)
            job.downsample_time += timed_execute(cmd)
            fil_filenm = job.fil_filenm[: job.fil_filenm.find(".fil")] + "_DS%d.fil" % ddplan.downsamp
        else:
            fil_filenm = job.fil_filenm

        # Iterate over the individual passes through the .fil file
        for passnum in range(ddplan.numpasses):
            subbasenm = "%s_DM%s" % (job.basefilenm, ddplan.subdmlist[passnum])

            # Now de-disperse
            cmd = "prepsubband -mask %s -lodm %.2f -dmstep %.2f -numdms %d -numout %d -o %s/%s %s" % (
                maskfilenm,
                ddplan.lodm + passnum * ddplan.sub_dmstep,
                ddplan.dmstep,
                ddplan.dmsperpass,
                job.N / ddplan.downsamp,
                tmpdir,
                job.basefilenm,
                fil_filenm,
            )
            job.dedispersing_time += timed_execute(cmd)

            # Iterate over all the new DMs
            for dmstr in ddplan.dmlist[passnum]:
                dmstrs.append(dmstr)
                basenm = os.path.join(tmpdir, job.basefilenm + "_DM" + dmstr)
                datnm = basenm + ".dat"
                fftnm = basenm + ".fft"
                infnm = basenm + ".inf"

                # Do the single-pulse search
                cmd = "single_pulse_search.py -p -m %f -t %f %s" % (singlepulse_maxwidth, singlepulse_threshold, datnm)
                job.singlepulse_time += timed_execute(cmd)
                try:
                    shutil.move(basenm + ".singlepulse", workdir)
                except:
                    pass

                # FFT, zap, and de-redden
                cmd = "realfft %s" % datnm
                job.FFT_time += timed_execute(cmd)
                cmd = "zapbirds -zap -zapfile %s -baryv %.6g %s" % (default_zaplist, job.baryv, fftnm)
                job.FFT_time += timed_execute(cmd)
                cmd = "rednoise %s" % fftnm
                job.FFT_time += timed_execute(cmd)
                try:
                    os.rename(basenm + "_red.fft", fftnm)
                except:
                    pass

                # Do the low-acceleration search
                cmd = "accelsearch -numharm %d -sigma %f -zmax %d -flo %f %s" % (
                    lo_accel_numharm,
                    lo_accel_sigma,
                    lo_accel_zmax,
                    lo_accel_flo,
                    fftnm,
                )
                job.lo_accelsearch_time += timed_execute(cmd)
                try:
                    os.remove(basenm + "_ACCEL_%d.txtcand" % lo_accel_zmax)
                except:
                    pass
                try:  # This prevents errors if there are no cand files to copy
                    shutil.move(basenm + "_ACCEL_%d.cand" % lo_accel_zmax, workdir)
                    shutil.move(basenm + "_ACCEL_%d" % lo_accel_zmax, workdir)
                except:
                    pass

                # Do the high-acceleration search
                cmd = "accelsearch -numharm %d -sigma %f -zmax %d -flo %f %s" % (
                    hi_accel_numharm,
                    hi_accel_sigma,
                    hi_accel_zmax,
                    hi_accel_flo,
                    fftnm,
                )
                job.hi_accelsearch_time += timed_execute(cmd)
                try:
                    os.remove(basenm + "_ACCEL_%d.txtcand" % hi_accel_zmax)
                except:
                    pass
                try:  # This prevents errors if there are no cand files to copy
                    shutil.move(basenm + "_ACCEL_%d.cand" % hi_accel_zmax, workdir)
                    shutil.move(basenm + "_ACCEL_%d" % hi_accel_zmax, workdir)
                except:
                    pass

                # Move the .inf files
                try:
                    shutil.move(infnm, workdir)
                except:
                    pass
                # Remove the .dat and .fft files
                try:
                    os.remove(datnm)
                except:
                    pass
                try:
                    os.remove(fftnm)
                except:
                    pass

    # Make the single-pulse plots
    basedmb = job.basefilenm + "_DM"
    basedme = ".singlepulse "
    # The following will make plots for DM ranges:
    #    0-30, 20-110, 100-310, 300-1000+
    dmglobs = [
        basedmb + "[0-9].[0-9][0-9]" + basedme + basedmb + "[012][0-9].[0-9][0-9]" + basedme,
        basedmb + "[2-9][0-9].[0-9][0-9]" + basedme + basedmb + "10[0-9].[0-9][0-9]" + basedme,
        basedmb + "[12][0-9][0-9].[0-9][0-9]" + basedme + basedmb + "30[0-9].[0-9][0-9]" + basedme,
        basedmb + "[3-9][0-9][0-9].[0-9][0-9]" + basedme + basedmb + "1[0-9][0-9][0-9].[0-9][0-9]" + basedme,
    ]
    dmrangestrs = ["0-30", "20-110", "100-310", "300-1000+"]
    psname = job.basefilenm + "_singlepulse.ps"
    for dmglob, dmrangestr in zip(dmglobs, dmrangestrs):
        cmd = 'single_pulse_search.py -t %f -g "%s"' % (singlepulse_plot_SNR, dmglob)
        job.singlepulse_time += timed_execute(cmd)
        try:
            os.rename(psname, job.basefilenm + "_DMs%s_singlepulse.ps" % dmrangestr)
        except:
            pass

    # Sift through the candidates to choose the best to fold

    job.sifting_time = time.time()

    lo_accel_cands = sifting.read_candidates(glob.glob("*ACCEL_%d" % lo_accel_zmax))
    if len(lo_accel_cands):
        lo_accel_cands = sifting.remove_duplicate_candidates(lo_accel_cands)
    if len(lo_accel_cands):
        lo_accel_cands = sifting.remove_DM_problems(lo_accel_cands, numhits_to_fold, dmstrs, low_DM_cutoff)
    if len(lo_accel_cands):
        lo_accel_cands.sort(sifting.cmp_sigma)
        sifting.write_candlist(lo_accel_cands, job.basefilenm + ".accelcands_Z%d" % lo_accel_zmax)

    hi_accel_cands = sifting.read_candidates(glob.glob("*ACCEL_%d" % hi_accel_zmax))
    if len(hi_accel_cands):
        hi_accel_cands = sifting.remove_duplicate_candidates(hi_accel_cands)
    if len(hi_accel_cands):
        hi_accel_cands = sifting.remove_DM_problems(hi_accel_cands, numhits_to_fold, dmstrs, low_DM_cutoff)
    if len(hi_accel_cands):
        hi_accel_cands.sort(sifting.cmp_sigma)
        sifting.write_candlist(hi_accel_cands, job.basefilenm + ".accelcands_Z%d" % hi_accel_zmax)

    try:
        cmd = "mv *.accelcands* " + job.outputdir
        os.system(cmd)
    except:
        pass
    job.sifting_time = time.time() - job.sifting_time

    # Fold the best candidates

    cands_folded = 0
    for cand in lo_accel_cands:
        if cands_folded == max_lo_cands_to_fold:
            break
        elif cand.sigma > to_prepfold_sigma:
            job.folding_time += timed_execute(get_folding_command(cand, job, ddplans))
            cands_folded += 1
    cands_folded = 0
    for cand in hi_accel_cands:
        if cands_folded == max_hi_cands_to_fold:
            break
        elif cand.sigma > to_prepfold_sigma:
            job.folding_time += timed_execute(get_folding_command(cand, job, ddplans))
            cands_folded += 1
    # Remove the bestprof files
    bpfiles = glob.glob("*.pfd.bestprof")
    for bpfile in bpfiles:
        os.remove(bpfile)

    # Now step through the .ps files and convert them to .png and gzip them

    psfiles = glob.glob("*.ps")
    for psfile in psfiles:
        if "singlepulse" in psfile:
            # For some reason the singlepulse files don't transform nicely...
            epsfile = psfile.replace(".ps", ".eps")
            os.system("eps2eps " + psfile + " " + epsfile)
            os.system("pstoimg -density 100 -crop a " + epsfile)
            try:
                os.remove(epsfile)
            except:
                pass
        else:
            os.system("pstoimg -density 100 -flip cw " + psfile)
        os.system("gzip " + psfile)

    # Tar up the results files

    tar_suffixes = [
        "_ACCEL_%d.tgz" % lo_accel_zmax,
        "_ACCEL_%d.tgz" % hi_accel_zmax,
        "_ACCEL_%d.cand.tgz" % lo_accel_zmax,
        "_ACCEL_%d.cand.tgz" % hi_accel_zmax,
        "_singlepulse.tgz",
        "_inf.tgz",
        "_pfd.tgz",
    ]
    tar_globs = [
        "*_ACCEL_%d" % lo_accel_zmax,
        "*_ACCEL_%d" % hi_accel_zmax,
        "*_ACCEL_%d.cand" % lo_accel_zmax,
        "*_ACCEL_%d.cand" % hi_accel_zmax,
        "*.singlepulse",
        "*_DM[0-9]*.inf",
        "*.pfd",
    ]
    for (tar_suffix, tar_glob) in zip(tar_suffixes, tar_globs):
        tf = tarfile.open(job.basefilenm + tar_suffix, "w:gz")
        for infile in glob.glob(tar_glob):
            tf.add(infile)
            os.remove(infile)
    tf.close()

    # Remove all the downsampled .fil files

    filfiles = glob.glob("*_DS?.fil") + glob.glob("*_DS??.fil")
    for filfile in filfiles:
        os.remove(filfile)

    # Remove the tmp directory (in a tmpfs mount)
    try:
        os.rmdir(tmpdir)
    except:
        pass

    # And finish up

    job.total_time = time.time() - job.total_time
    print "\nFinished"
    print "UTC time is:  %s" % (time.asctime(time.gmtime()))

    # Write the job report

    job.write_report(job.basefilenm + ".report")
    job.write_report(os.path.join(job.outputdir, job.basefilenm + ".report"))

    # Move all the important stuff to the output directory
    cmd = "mv *rfifind.[bimors]* *.tgz *.ps.gz *.png *.report " + job.outputdir
    os.system(cmd)
Esempio n. 18
0
def ACCEL_sift(zmax):

  globaccel = "*ACCEL_%d" % zmax
  globinf = "*DM*.inf"
  # In how many DMs must a candidate be detected to be considered "good"
  min_num_DMs = 2
  # Lowest DM to consider as a "real" pulsar
  low_DM_cutoff = 2.0
  # Ignore candidates with a sigma (from incoherent power summation) less than this
  sifting.sigma_threshold = 8.0
  # Ignore candidates with a coherent power less than this
  sifting.c_pow_threshold = 100.0
  
  # If the birds file works well, the following shouldn't
  # be needed at all...  If they are, add tuples with the bad
  # values and their errors.
  #                (ms, err)
  sifting.known_birds_p = []
  #                (Hz, err)
  sifting.known_birds_f = []
  
  # The following are all defined in the sifting module.
  # But if we want to override them, uncomment and do it here.
  # You shouldn't need to adjust them for most searches, though.

  # How close a candidate has to be to another candidate to
  # consider it the same candidate (in Fourier bins)
  sifting.r_err = 1.1
  # Shortest period candidates to consider (s)
  sifting.short_period = 0.0005
  # Longest period candidates to consider (s)
  sifting.long_period = 15.0
  # Ignore any candidates where at least one harmonic does exceed this power
  sifting.harm_pow_cutoff = 8.0
  
  #--------------------------------------------------------------

  # Try to read the .inf files first, as _if_ they are present, all of
  # them should be there.  (if no candidates are found by accelsearch
  # we get no ACCEL files...

  inffiles = glob.glob(globinf)
  candfiles = glob.glob(globaccel)
  # Check to see if this is from a short search
  if len(re.findall("_[0-9][0-9][0-9]M_" , inffiles[0])):
    dmstrs = [x.split("DM")[-1].split("_")[0] for x in candfiles]
  else:
    dmstrs = [x.split("DM")[-1].split(".inf")[0] for x in inffiles]
  dms = map(float, dmstrs)
  dms.sort()
  dmstrs = ["%.2f"%x for x in dms]

  # Read in all the candidates
  cands = sifting.read_candidates(candfiles)
  
  # Remove candidates that are duplicated in other ACCEL files
  if len(cands):
    cands = sifting.remove_duplicate_candidates(cands)
 
  # Remove candidates with DM problems
  if len(cands):
        cands = sifting.remove_DM_problems(cands, min_num_DMs, dmstrs, low_DM_cutoff)
  
  # Remove candidates that are harmonically related to each other
  # Note:  this includes only a small set of harmonics
  if len(cands):
    cands = sifting.remove_harmonics(cands)

  # Write candidates to STDOUT
  if len(cands):
    cands.sort(sifting.cmp_sigma)
    #for cand in cands[:1]:
       #print cand.filename, cand.candnum, cand.p, cand.DMstr
    #sifting.write_candlist(cands)
  return cands
Esempio n. 19
0
def search_job(job):
    """Search the observation defined in the obs_info
        instance 'job'.
    """
    # Use whatever .zaplist is found in the current directory
    zaplist = glob.glob("*.zaplist")[0]
    print "Using %s as zaplist" % zaplist
    if config.searching.use_subbands and config.searching.fold_rawdata:
        # make a directory to keep subbands so they can be used to fold later
        try:
            os.makedirs(os.path.join(job.workdir, 'subbands'))
        except: pass

    # rfifind the data file
    cmd = "rfifind %s -time %.17g -o %s %s" % \
          (config.searching.datatype_flag, config.searching.rfifind_chunk_time, job.basefilenm,
           job.filenmstr)
    job.rfifind_time += timed_execute(cmd, stdout="%s_rfifind.out" % job.basefilenm)
    maskfilenm = job.basefilenm + "_rfifind.mask"
    # Find the fraction that was suggested to be masked
    # Note:  Should we stop processing if the fraction is
    #        above some large value?  Maybe 30%?
    job.masked_fraction = find_masked_fraction(job)
    
    # Iterate over the stages of the overall de-dispersion plan
    dmstrs = []
    for ddplan in job.ddplans:

        # Make a downsampled filterbank file if we are not using subbands
        if not config.searching.use_subbands:
            if ddplan.downsamp > 1:
                cmd = "downsample_psrfits.py %d %s"%(ddplan.downsamp, job.filenmstr)
                job.downsample_time += timed_execute(cmd)
                dsfiles = []
                for f in job.filenames:
                    fbase = f.rstrip(".fits")
                    dsfiles.append(fbase+"_DS%d.fits"%ddplan.downsamp)
                filenmstr = ' '.join(dsfiles)
            else:
                filenmstr = job.filenmstr 

        # Iterate over the individual passes through the data file
        for passnum in range(ddplan.numpasses):
            subbasenm = "%s_DM%s"%(job.basefilenm, ddplan.subdmlist[passnum])

            if config.searching.use_subbands:
                try:
                    os.makedirs(os.path.join(job.tempdir, 'subbands'))
                except: pass
    
                # Create a set of subbands
                cmd = "prepsubband %s -sub -subdm %s -downsamp %d -nsub %d -mask %s " \
                        "-o %s/subbands/%s %s" % \
                        (config.searching.datatype_flag, ddplan.subdmlist[passnum], ddplan.sub_downsamp,
                        ddplan.numsub, maskfilenm, job.tempdir, job.basefilenm,
                        job.filenmstr)
                job.subbanding_time += timed_execute(cmd, stdout="%s.subout" % subbasenm)
            
                # Now de-disperse using the subbands
                cmd = "prepsubband -lodm %.2f -dmstep %.2f -numdms %d -downsamp %d " \
                        "-numout %d -o %s/%s %s/subbands/%s.sub[0-9]*" % \
                        (ddplan.lodm+passnum*ddplan.sub_dmstep, ddplan.dmstep,
                        ddplan.dmsperpass, ddplan.dd_downsamp, 
                        psr_utils.choose_N(job.orig_N/ddplan.downsamp),
                        job.tempdir, job.basefilenm, job.tempdir, subbasenm)
                job.dedispersing_time += timed_execute(cmd, stdout="%s.prepout" % subbasenm)
            
            else:  # Not using subbands
                cmd = "prepsubband -mask %s -lodm %.2f -dmstep %.2f -numdms %d " \
                        "-numout %d -o %s/%s %s"%\
                        (maskfilenm, ddplan.lodm+passnum*ddplan.sub_dmstep, ddplan.dmstep,
                        ddplan.dmsperpass, psr_utils.choose_N(job.orig_N/ddplan.downsamp),
                        job.tempdir, job.basefilenm, filenmstr)
                job.dedispersing_time += timed_execute(cmd)
            
            # Iterate over all the new DMs
            for dmstr in ddplan.dmlist[passnum]:
                dmstrs.append(dmstr)
                basenm = os.path.join(job.tempdir, job.basefilenm+"_DM"+dmstr)
                datnm = basenm+".dat"
                fftnm = basenm+".fft"
                infnm = basenm+".inf"

                # Do the single-pulse search
                cmd = "single_pulse_search.py -p -m %f -t %f %s"%\
                      (config.searching.singlepulse_maxwidth, \
                       config.searching.singlepulse_threshold, datnm)
                job.singlepulse_time += timed_execute(cmd)
                try:
                    shutil.move(basenm+".singlepulse", job.workdir)
                except: pass

                # FFT, zap, and de-redden
                cmd = "realfft %s"%datnm
                job.FFT_time += timed_execute(cmd)
                cmd = "zapbirds -zap -zapfile %s -baryv %.6g %s"%\
                      (zaplist, job.baryv, fftnm)
                job.FFT_time += timed_execute(cmd)
                cmd = "rednoise %s"%fftnm
                job.FFT_time += timed_execute(cmd)
                try:
                    os.rename(basenm+"_red.fft", fftnm)
                except: pass
                
                # Do the low-acceleration search
                cmd = "accelsearch -harmpolish -numharm %d -sigma %f " \
                        "-zmax %d -flo %f %s"%\
                        (config.searching.lo_accel_numharm, \
                         config.searching.lo_accel_sigma, \
                         config.searching.lo_accel_zmax, \
                         config.searching.lo_accel_flo, fftnm)
                job.lo_accelsearch_time += timed_execute(cmd)
                try:
                    os.remove(basenm+"_ACCEL_%d.txtcand" % config.searching.lo_accel_zmax)
                except: pass
                try:  # This prevents errors if there are no cand files to copy
                    shutil.move(basenm+"_ACCEL_%d.cand" % config.searching.lo_accel_zmax, \
                                    job.workdir)
                    shutil.move(basenm+"_ACCEL_%d" % config.searching.lo_accel_zmax, \
                                    job.workdir)
                except: pass
        
                # Do the high-acceleration search
                cmd = "accelsearch -harmpolish -numharm %d -sigma %f " \
                        "-zmax %d -flo %f %s"%\
                        (config.searching.hi_accel_numharm, \
                         config.searching.hi_accel_sigma, \
                         config.searching.hi_accel_zmax, \
                         config.searching.hi_accel_flo, fftnm)
                job.hi_accelsearch_time += timed_execute(cmd)
                try:
                    os.remove(basenm+"_ACCEL_%d.txtcand" % config.searching.hi_accel_zmax)
                except: pass
                try:  # This prevents errors if there are no cand files to copy
                    shutil.move(basenm+"_ACCEL_%d.cand" % config.searching.hi_accel_zmax, \
                                    job.workdir)
                    shutil.move(basenm+"_ACCEL_%d" % config.searching.hi_accel_zmax, \
                                    job.workdir)
                except: pass

                # Move the .inf files
                try:
                    shutil.move(infnm, job.workdir)
                except: pass
                # Remove the .dat and .fft files
                try:
                    os.remove(datnm)
                except: pass
                try:
                    os.remove(fftnm)
                except: pass

            if config.searching.use_subbands:
                if config.searching.fold_rawdata:
                    # Subband files are no longer needed
                    shutil.rmtree(os.path.join(job.tempdir, 'subbands'))
                else:
                    # Move subbands to workdir
                    for sub in glob.glob(os.path.join(job.tempdir, 'subbands', "*")):
                        shutil.move(sub, os.path.join(job.workdir, 'subbands'))

    # Make the single-pulse plots
    basedmb = job.basefilenm+"_DM"
    basedme = ".singlepulse "
    # The following will make plots for DM ranges:
    #    0-110, 100-310, 300-1000+
    dmglobs = [basedmb+"[0-9].[0-9][0-9]"+basedme +
               basedmb+"[0-9][0-9].[0-9][0-9]"+basedme +
               basedmb+"10[0-9].[0-9][0-9]"+basedme,
               basedmb+"[12][0-9][0-9].[0-9][0-9]"+basedme +
               basedmb+"30[0-9].[0-9][0-9]"+basedme,
               basedmb+"[3-9][0-9][0-9].[0-9][0-9]"+basedme +
               basedmb+"1[0-9][0-9][0-9].[0-9][0-9]"+basedme]
    dmrangestrs = ["0-110", "100-310", "300-1000+"]
    psname = job.basefilenm+"_singlepulse.ps"
    for dmglob, dmrangestr in zip(dmglobs, dmrangestrs):
        dmfiles = []
        for dmg in dmglob.split():
            dmfiles += glob.glob(dmg.strip())
        # Check that there are matching files and they are not all empty
        if dmfiles and sum([os.path.getsize(f) for f in dmfiles]):
            cmd = 'single_pulse_search.py -t %f -g "%s"' % \
                (config.searching.singlepulse_plot_SNR, dmglob)
            job.singlepulse_time += timed_execute(cmd)
            os.rename(psname,
                        job.basefilenm+"_DMs%s_singlepulse.ps" % dmrangestr)

    # Sift through the candidates to choose the best to fold
    job.sifting_time = time.time()

    lo_accel_cands = sifting.read_candidates(glob.glob("*ACCEL_%d" % config.searching.lo_accel_zmax))
    if len(lo_accel_cands):
        lo_accel_cands = sifting.remove_duplicate_candidates(lo_accel_cands)
    if len(lo_accel_cands):
        lo_accel_cands = sifting.remove_DM_problems(lo_accel_cands, config.searching.numhits_to_fold,
                                                    dmstrs, config.searching.low_DM_cutoff)

    hi_accel_cands = sifting.read_candidates(glob.glob("*ACCEL_%d" % config.searching.hi_accel_zmax))
    if len(hi_accel_cands):
        hi_accel_cands = sifting.remove_duplicate_candidates(hi_accel_cands)
    if len(hi_accel_cands):
        hi_accel_cands = sifting.remove_DM_problems(hi_accel_cands, config.searching.numhits_to_fold,
                                                    dmstrs, config.searching.low_DM_cutoff)

    all_accel_cands = lo_accel_cands + hi_accel_cands
    if len(all_accel_cands):
        all_accel_cands = sifting.remove_harmonics(all_accel_cands)
        # Note:  the candidates will be sorted in _sigma_ order, not _SNR_!
        all_accel_cands.sort(sifting.cmp_sigma)
        sifting.write_candlist(all_accel_cands, job.basefilenm+".accelcands")
        # Moving of results to resultsdir now happens in clean_up(...)
        # shutil.copy(job.basefilenm+".accelcands", job.outputdir)

    job.sifting_time = time.time() - job.sifting_time

    # Fold the best candidates
    cands_folded = 0
    for cand in all_accel_cands:
        if cands_folded == config.searching.max_cands_to_fold:
            break
        if cand.sigma >= config.searching.to_prepfold_sigma:
            job.folding_time += timed_execute(get_folding_command(cand, job))
            cands_folded += 1
    job.num_cands_folded = cands_folded

    # Now step through the .ps files and convert them to .png and gzip them

    psfiles = glob.glob("*.ps")
    for psfile in psfiles:
        # The '[0]' appeneded to the end of psfile is to convert only the 1st page
        timed_execute("convert -quality 90 %s -background white -flatten -rotate 90 +matte %s" % \
                            (psfile+"[0]", psfile[:-3]+".png"))
        timed_execute("gzip "+psfile)
Esempio n. 20
0
def main(fits_filenm, workdir, jobid, zaplist, ddplans):
    # Change to the specified working directory
    os.chdir(workdir)
    # Set up theano compile dir (UBC_AI rating uses theano)
    theano_compiledir = os.path.join(workdir, "theano_compile")
    os.mkdir(theano_compiledir)
    os.putenv("THEANO_FLAGS", "compiledir=%s" % theano_compiledir)

    # Get information on the observation and the job
    job = obs_info(fits_filenm)
    if job.raw_T < low_T_to_search:
        print "The observation is too short (%.2f s) to search." % job.raw_T
        sys.exit(1)
    job.total_time = time.time()
    if job.dt == 163.84:
        ddplans = ddplans[str(job.nchans) + "slow"]
    else:
        ddplans = ddplans[str(job.nchans) + "fast"]

    # Use the specified zaplist if provided.  Otherwise use whatever is in
    # this directory
    if zaplist is None:
        zaplist = glob.glob("*.zaplist")[0]

    # Creat a checkpoint file
    if jobid is not None:
        checkpoint = os.path.join(checkpointdir,
                                  job.basefilenm + "." + jobid + ".checkpoint")
    else:
        checkpoint = os.path.join(checkpointdir,
                                  job.basefilenm + "." + ".checkpoint")

    # Make sure the output directory (and parent directories) exist
    try:
        os.makedirs(job.outputdir)
        os.chmod(job.outputdir,
                 stat.S_IRWXU | stat.S_IRWXG | S_IROTH | S_IXOTH)
    except:
        pass

    # Make sure the tmp directory (in a tmpfs mount) exists
    if job is not None:
        tmpdir = os.path.join(basetmpdir, job.basefilenm, jobid, "tmp")
    else:
        tmpdir = os.path.join(basetmpdir, job.basefilenm, "tmp")
    try:
        os.makedirs(tmpdir)
    except:
        pass

    print "\nBeginning GBNCC search of '%s'" % job.fits_filenm
    print "UTC time is:  %s" % (time.asctime(time.gmtime()))

    rfifindout = job.basefilenm + "_rfifind.out"
    rfifindmask = job.basefilenm + "_rfifind.mask"

    if not os.path.exists(rfifindout) or not os.path.exists(rfifindmask):

        # rfifind the filterbank file
        cmd = "rfifind -zapchan 2456:3277 -time %.17g -o %s %s > %s_rfifind.out"%\
              (rfifind_chunk_time, job.basefilenm,
               job.fits_filenm, job.basefilenm)
        job.rfifind_time += timed_execute(cmd)
    maskfilenm = job.basefilenm + "_rfifind.mask"
    ### COPYING HERE TO AID IN DEBUGGING ###
    subprocess.call("cp *rfifind.[bimors]* %s" % job.outputdir, shell=True)
    # Find the fraction that was suggested to be masked
    # Note:  Should we stop processing if the fraction is
    #        above some large value?  Maybe 30%?
    job.masked_fraction = find_masked_fraction(job)

    # Iterate over the stages of the overall de-dispersion plan
    try:
        with open(checkpoint, "r") as f:
            nodenm = f.readline().strip()
            queueid = f.readline().strip()
            prevddplan, prevpassnum = map(int, f.readline().strip().split())

    except IOError:
        nodenm = "localhost"
        queueid = "None"
        prevddplan = 0
        prevpassnum = 0
        with open(checkpoint, "w") as f:
            f.write("%s\n" % nodenm)
            f.write("%s\n" % queueid)
            f.write("%d %d\n" % (prevddplan, prevpassnum))

    for ddplan in ddplans[prevddplan:]:

        # Make a downsampled filterbank file
        if ddplan.downsamp > 1:
            cmd = "psrfits_subband -dstime %d -nsub %d -o %s_DS%d %s 2>> psrfits_subband.err" % (
                ddplan.downsamp, job.nchans, job.dsbasefilenm, ddplan.downsamp,
                job.dsbasefilenm)
            job.downsample_time += timed_execute(cmd)
            fits_filenm = job.dsbasefilenm + "_DS%d%s"%\
                          (ddplan.downsamp,job.fits_filenm[job.fits_filenm.rfind("_"):])
        else:
            fits_filenm = job.fits_filenm
        # Iterate over the individual passes through the .fil file
        for passnum in range(prevpassnum, ddplan.numpasses):
            subbasenm = "%s_DM%s" % (job.basefilenm, ddplan.subdmlist[passnum])

            # Now de-disperse
            cmd = "prepsubband -ignorechan 2456:3277 -mask %s -lodm %.2f -dmstep %.2f -nsub %d -numdms %d -numout %d -o %s/%s %s"%\
                  (maskfilenm, ddplan.lodm+passnum*ddplan.sub_dmstep, ddplan.dmstep, ddplan.numsub,
                   ddplan.dmsperpass, job.N/ddplan.downsamp,
                   tmpdir, job.basefilenm, fits_filenm)
            job.dedispersing_time += timed_execute(cmd)

            # Do the single-pulse search
            cmd = "single_pulse_search.py -p -m %f -t %f %s/*.dat"%\
                (singlepulse_maxwidth, singlepulse_threshold, tmpdir)
            job.singlepulse_time += timed_execute(cmd)
            spfiles = glob.glob("%s/*.singlepulse" % tmpdir)
            for spfile in spfiles:
                try:
                    shutil.move(spfile, workdir)
                except:
                    pass

            # Iterate over all the new DMs
            for dmstr in ddplan.dmlist[passnum]:
                basenm = os.path.join(tmpdir, job.basefilenm + "_DM" + dmstr)
                datnm = basenm + ".dat"
                fftnm = basenm + ".fft"
                infnm = basenm + ".inf"

                # FFT, zap, and de-redden
                cmd = "realfft %s" % datnm
                job.FFT_time += timed_execute(cmd)
                cmd = "zapbirds -zap -zapfile %s -baryv %.6g %s"%\
                      (zaplist, job.baryv, fftnm)
                job.FFT_time += timed_execute(cmd)
                cmd = "rednoise %s" % fftnm
                job.FFT_time += timed_execute(cmd)
                try:
                    os.rename(basenm + "_red.fft", fftnm)
                except:
                    pass

                # Do the low-acceleration search
                cmd = "accelsearch -inmem -numharm %d -sigma %f -zmax %d -flo %f %s"%\
                      (lo_accel_numharm, lo_accel_sigma, lo_accel_zmax, lo_accel_flo, fftnm)
                job.lo_accelsearch_time += timed_execute(cmd)
                try:
                    os.remove(basenm + "_ACCEL_%d.txtcand" % lo_accel_zmax)
                except:
                    pass
                try:  # This prevents errors if there are no cand files to copy
                    shutil.move(basenm + "_ACCEL_%d.cand" % lo_accel_zmax,
                                workdir)
                    shutil.move(basenm + "_ACCEL_%d" % lo_accel_zmax, workdir)
                except:
                    pass

                # Do the high-acceleration search
                cmd = "accelsearch -inmem -numharm %d -sigma %f -zmax %d -flo %f %s"%\
                      (hi_accel_numharm, hi_accel_sigma, hi_accel_zmax, hi_accel_flo, fftnm)
                job.hi_accelsearch_time += timed_execute(cmd)
                try:
                    os.remove(basenm + "_ACCEL_%d.txtcand" % hi_accel_zmax)
                except:
                    pass
                try:  # This prevents errors if there are no cand files to copy
                    shutil.move(basenm + "_ACCEL_%d.cand" % hi_accel_zmax,
                                workdir)
                    shutil.move(basenm + "_ACCEL_%d" % hi_accel_zmax, workdir)
                except:
                    pass

                # Do the FFA search
                if (not float(dmstr) % 0.5) and (float(dmstr) <= 1500.0):
                    cmd = "ffa.py %s" % datnm
                    job.ffa_time += timed_execute(cmd)
                    try:  # This prevents errors if there are no cand files
                        shutil.move(basenm + "_cands.ffa", workdir)
                    except:
                        pass
                else:
                    print "Skipping FFA search for DM=%s pc/cc" % dmstr

                # Move the .inf files
                try:
                    shutil.move(infnm, workdir)
                except:
                    pass
                # Remove the .dat and .fft files
                try:
                    os.remove(datnm)
                except:
                    pass
                try:
                    os.remove(fftnm)
                except:
                    pass

            prevpassnum += 1
            with open(checkpoint, "w") as f:
                f.write("%s\n" % nodenm)
                f.write("%s\n" % queueid)
                f.write("%d %d\n" % (prevddplan, prevpassnum))

        prevddplan += 1
        prevpassnum = 0
        with open(checkpoint, "w") as f:
            f.write("%s\n" % nodenm)
            f.write("%s\n" % queueid)
            f.write("%d %d\n" % (prevddplan, prevpassnum))

    # Make the single-pulse plots
    basedmb = job.basefilenm + "_DM"
    basedme = ".singlepulse "
    # The following will make plots for DM ranges:
    #    0-30, 20-110, 100-310, 300-1000+
    ### MAKE SURE THAT single_pulse_search.py ALWAYS OUTPUTS A .singlepulse
    ### FILE ###
    dmglobs = [
        basedmb + "[0-9].[0-9][0-9]" + basedme + basedmb +
        "[012][0-9].[0-9][0-9]" + basedme,
        basedmb + "[2-9][0-9].[0-9][0-9]" + basedme + basedmb +
        "10[0-9].[0-9][0-9]" + basedme,
        basedmb + "[12][0-9][0-9].[0-9][0-9]" + basedme + basedmb +
        "30[0-9].[0-9][0-9]" + basedme,
        basedmb + "[3-9][0-9][0-9].[0-9][0-9]" + basedme + basedmb +
        "10[0-9][0-9].[0-9][0-9]" + basedme,
        basedmb + "1[0-9][0-9][0-9].[0-9][0-9]" + basedme + basedmb +
        "20[0-9][0-9].[0-9][0-9]" + basedme,
        basedmb + "2[0-9][0-9][0-9].[0-9][0-9]" + basedme + basedmb +
        "3[0-9][0-9][0-9].[0-9][0-9]" + basedme,
    ]
    dmrangestrs = [
        "0-30", "20-110", "100-310", "300-1100", "1000-2100", "2000-3000+"
    ]
    psname = job.basefilenm + "_singlepulse.ps"
    for dmglob, dmrangestr in zip(dmglobs, dmrangestrs):
        cmd = 'single_pulse_search.py -t %f -g "%s"' % \
              (singlepulse_plot_SNR, dmglob)
        job.singlepulse_time += timed_execute(cmd)
        try:
            os.rename(psname,
                      job.basefilenm + "_DMs%s_singlepulse.ps" % dmrangestr)
        except:
            pass

    # Chen Karako-Argaman's single pulse rating algorithm and Chitrang Patel's single pulse waterfaller code
    mem = int(
        subprocess.check_output(
            "du -chm *singlepulse | tail -n 1 | cut -d't' -f 1",
            stderr=subprocess.STDOUT,
            shell=True))
    if mem < 600:
        path = os.path.dirname(os.path.abspath(__file__))
        path = path[:-3] + 'lib/python/singlepulse/'
        cmd = 'python ' + path + 'rrattrap.py --inffile %s*rfifind.inf --use-configfile --use-DMplan --vary-group-size %s*.singlepulse' % (
            job.basefilenm, job.basefilenm)
        job.singlepulse_time += timed_execute(cmd)
        GBNCC_wrapper_make_spd.GBNCC_wrapper('groups.txt', maskfilenm,
                                             job.fits_filenm, workdir)
    else:
        spoutfile = open('groups.txt', 'w')
        spoutfile.write('# Beam skipped because of high RFI\n.')
        spoutfile.close()

    # Sift through the candidates to choose the best to fold

    job.sifting_time = time.time()

    # Make the dmstrs list here since it may not be properly filled in the main
    # loop if we pick up from a checkpoint
    dmstrs = []
    for ddplan in ddplans:
        for passnum in range(ddplan.numpasses):
            for dmstr in ddplan.dmlist[passnum]:
                dmstrs.append(dmstr)

    lo_accel_cands = sifting.read_candidates(
        glob.glob("*ACCEL_%d" % lo_accel_zmax))
    if len(lo_accel_cands):
        lo_accel_cands = sifting.remove_duplicate_candidates(lo_accel_cands)
    if len(lo_accel_cands):
        lo_accel_cands = sifting.remove_DM_problems(lo_accel_cands,
                                                    numhits_to_fold, dmstrs,
                                                    low_DM_cutoff)

    hi_accel_cands = sifting.read_candidates(
        glob.glob("*ACCEL_%d" % hi_accel_zmax))
    if len(hi_accel_cands):
        hi_accel_cands = sifting.remove_duplicate_candidates(hi_accel_cands)
    if len(hi_accel_cands):
        hi_accel_cands = sifting.remove_DM_problems(hi_accel_cands,
                                                    numhits_to_fold, dmstrs,
                                                    low_DM_cutoff)

    if len(lo_accel_cands) and len(hi_accel_cands.cands):
        lo_accel_cands, hi_accel_cands = remove_crosslist_duplicate_candidates(
            lo_accel_cands, hi_accel_cands)

    if len(lo_accel_cands):
        lo_accel_cands.sort(sifting.cmp_sigma)
        sifting.write_candlist(
            lo_accel_cands, job.basefilenm + ".accelcands_Z%d" % lo_accel_zmax)
    if len(hi_accel_cands):
        hi_accel_cands.sort(sifting.cmp_sigma)
        sifting.write_candlist(
            hi_accel_cands, job.basefilenm + ".accelcands_Z%d" % hi_accel_zmax)

    job.sifting_time = time.time() - job.sifting_time

    # FFA sifting
    job.ffa_sifting_time = time.time()
    ffa_cands = sift_ffa(job, zaplist)
    job.ffa_sifting_time = time.time() - job.ffa_sifting_time

    # Fold the best candidates

    cands_folded = 0
    for cand in lo_accel_cands.cands:
        if cands_folded == max_lo_cands_to_fold:
            break
        elif cand.sigma > to_prepfold_sigma:
            job.folding_time += timed_execute(
                get_folding_command(cand, job, ddplans, maskfilenm))
            cands_folded += 1
    cands_folded = 0
    for cand in hi_accel_cands.cands:
        if cands_folded == max_hi_cands_to_fold:
            break
        elif cand.sigma > to_prepfold_sigma:
            job.folding_time += timed_execute(
                get_folding_command(cand, job, ddplans, maskfilenm))
            cands_folded += 1

    cands_folded = 0
    for cand in ffa_cands.cands:
        if cands_folded == max_ffa_cands_to_fold:
            break
        elif cand.snr > to_prepfold_ffa_snr:
            job.folding_time += timed_execute(
                get_ffa_folding_command(cand, job, ddplans, maskfilenm))
            cands_folded += 1

    # Rate the candidates
    pfdfiles = glob.glob("*.pfd")
    for pfdfile in pfdfiles:
        status = ratings.rate_candidate(pfdfile)
        if status == 1:
            sys.stdout.write("\nWarining: Ratings failed for %s\n" % pfdfile)
            sys.stdout.flush()
    # Rate the single pulse candidates
    cmd = 'rate_spds.py --redirect-warnings --include-all *.spd'
    subprocess.call(cmd, shell=True)

    # Now step through the .ps files and convert them to .png and gzip them
    psfiles = glob.glob("*.ps")
    for psfile in psfiles:
        if "singlepulse" in psfile:
            pngfile = psfile.replace(".ps", ".png")
            subprocess.call(["convert", psfile, pngfile])
        elif "grouped" in psfile:
            pngfile = psfile.replace(".ps", ".png")
            subprocess.call(["convert", psfile, pngfile])
        elif "spd" in psfile:
            pngfile = psfile.replace(".ps", ".png")
            subprocess.call(["convert", psfile, pngfile])
        else:
            pngfile = psfile.replace(".ps", ".png")
            subprocess.call(["convert", "-rotate", "90", psfile, pngfile])
        os.remove(psfile)

    # Tar up the results files
    tar_suffixes = [
        "_ACCEL_%d.tgz" % lo_accel_zmax,
        "_ACCEL_%d.tgz" % hi_accel_zmax,
        "_ACCEL_%d.cand.tgz" % lo_accel_zmax,
        "_ACCEL_%d.cand.tgz" % hi_accel_zmax, "_singlepulse.tgz", "_inf.tgz",
        "_pfd.tgz", "_bestprof.tgz", "_spd.tgz"
    ]
    tar_globs = [
        "*_ACCEL_%d" % lo_accel_zmax,
        "*_ACCEL_%d" % hi_accel_zmax,
        "*_ACCEL_%d.cand" % lo_accel_zmax,
        "*_ACCEL_%d.cand" % hi_accel_zmax, "*.singlepulse", "*_DM[0-9]*.inf",
        "*.pfd", "*.bestprof", "*.spd"
    ]
    for (tar_suffix, tar_glob) in zip(tar_suffixes, tar_globs):
        tf = tarfile.open(job.basefilenm + tar_suffix, "w:gz")
        for infile in glob.glob(tar_glob):
            tf.add(infile)
            os.remove(infile)
        tf.close()

    # Remove all the downsampled .fil files
    filfiles = glob.glob("*_DS?.fil") + glob.glob("*_DS??.fil")
    for filfile in filfiles:
        os.remove(filfile)

    #Remove all subbanded fits files
    subfiles = glob.glob("*subband*.fits")
    for subfile in subfiles:
        os.remove(subfile)

    # And finish up
    job.total_time = time.time() - job.total_time
    print "\nFinished"
    print "UTC time is:  %s" % (time.asctime(time.gmtime()))

    # Write the job report
    job.write_report(job.basefilenm + ".report")
    job.write_report(os.path.join(job.outputdir, job.basefilenm + ".report"))

    # Write the diagnostics report
    diagnostics.write_diagnostics(job.basefilenm)

    # Move all the important stuff to the output directory
    subprocess.call(
        "mv *rfifind.[bimors]*  *.accelcands* *.ffacands *.tgz *.png *.ratings *.diagnostics groups.txt *spd.rat *.report *.summary %s"
        % job.outputdir,
        shell=True)

    # Make a file indicating that this beam needs to be viewed
    open("%s/tobeviewed" % job.outputdir, "w").close()

    # Remove the checkpointing file
    try:
        os.remove(checkpoint)
    except:
        pass

    # Remove the tmp directory (in a tmpfs mount)
    try:
        shutil.rmtree(tmpdir)
    except:
        pass
Esempio n. 21
0
def ACCEL_sift(zmax):
    '''
    The following code come from PRESTO's ACCEL_sift.py
    '''

    globaccel = "*ACCEL_%d" % zmax
    globinf = "*DM*.inf"
    # In how many DMs must a candidate be detected to be considered "good"
    min_num_DMs = 2
    # Lowest DM to consider as a "real" pulsar
    low_DM_cutoff = 2.0
    # Ignore candidates with a sigma (from incoherent power summation) less than this
    sifting.sigma_threshold = 4.0
    # Ignore candidates with a coherent power less than this
    sifting.c_pow_threshold = 100.0

    # If the birds file works well, the following shouldn't
    # be needed at all...  If they are, add tuples with the bad
    # values and their errors.
    #                (ms, err)
    sifting.known_birds_p = []
    #                (Hz, err)
    sifting.known_birds_f = []

    # The following are all defined in the sifting module.
    # But if we want to override them, uncomment and do it here.
    # You shouldn't need to adjust them for most searches, though.

    # How close a candidate has to be to another candidate to
    # consider it the same candidate (in Fourier bins)
    sifting.r_err = 1.1
    # Shortest period candidates to consider (s)
    sifting.short_period = 0.0005
    # Longest period candidates to consider (s)
    sifting.long_period = 15.0
    # Ignore any candidates where at least one harmonic does exceed this power
    sifting.harm_pow_cutoff = 8.0

    # --------------------------------------------------------------

    # Try to read the .inf files first, as _if_ they are present, all of
    # them should be there.  (if no candidates are found by accelsearch
    # we get no ACCEL files...
    inffiles = glob.glob(globinf)
    candfiles = glob.glob(globaccel)
    # Check to see if this is from a short search
    if len(re.findall("_[0-9][0-9][0-9]M_", inffiles[0])):
        dmstrs = [x.split("DM")[-1].split("_")[0] for x in candfiles]
    else:
        dmstrs = [x.split("DM")[-1].split(".inf")[0] for x in inffiles]
    dms = map(float, dmstrs)
    dms.sort()
    dmstrs = ["%.2f" % x for x in dms]

    # Read in all the candidates
    cands = sifting.read_candidates(candfiles)

    # Remove candidates that are duplicated in other ACCEL files
    if len(cands):
        cands = sifting.remove_duplicate_candidates(cands)

    # Remove candidates with DM problems
    if len(cands):
        cands = sifting.remove_DM_problems(cands, min_num_DMs, dmstrs,
                                           low_DM_cutoff)

    # Remove candidates that are harmonically related to each other
    # Note:  this includes only a small set of harmonics
    if len(cands):
        cands = sifting.remove_harmonics(cands)

    # Write candidates to STDOUT
    if len(cands):
        cands.sort(sifting.cmp_sigma)
        # for cand in cands[:1]:
        # print cand.filename, cand.candnum, cand.p, cand.DMstr
        # sifting.write_candlist(cands)
    return cands
Esempio n. 22
0
#!/usr/bin/env python
import sifting, sys

if len(sys.argv) < 2:
    sys.stderr.write("\nusage:  quick_prune_cands.py ACCEL_file_name [sigma]\n\n")
    sys.exit()

if len(sys.argv)==3:
    sifting.sigma_threshold = float(sys.argv[2])

cands = sifting.read_candidates([sys.argv[1]], track=True)
cands.print_cand_summary()
cands.to_file()