Esempio n. 1
0
# Check to see if this is from a short search
if len(re.findall("_[0-9][0-9][0-9]M_", inffiles[0])):
    dmstrs = [x.split("DM")[-1].split("_")[0] for x in candfiles]
else:
    dmstrs = [x.split("DM")[-1].split(".inf")[0] for x in inffiles]
dms = list(map(float, dmstrs))
dms.sort()
dmstrs = ["%.2f" % x for x in dms]

# Read in all the candidates
cands = sifting.read_candidates(candfiles)

# Remove candidates that are duplicated in other ACCEL files
if len(cands):
    cands = sifting.remove_duplicate_candidates(cands)

# Remove candidates with DM problems
if len(cands):
    cands = sifting.remove_DM_problems(cands, min_num_DMs, dmstrs,
                                       low_DM_cutoff)

# Remove candidates that are harmonically related to each other
# Note:  this includes only a small set of harmonics
if len(cands):
    cands = sifting.remove_harmonics(cands)

# Write candidates to STDOUT
if len(cands):
    cands.sort(key=attrgetter('sigma'), reverse=True)
    sifting.write_candlist(cands)
Esempio n. 2
0
def main(fits_filenm, workdir, ddplans):
   
    # Change to the specified working directory
    os.chdir(workdir)

    # Get information on the observation and the job
    job = obs_info(fits_filenm)
    if job.raw_T < low_T_to_search:
        print("The observation is too short (%.2f s) to search."%job.raw_T)
        sys.exit()
    job.total_time = time.time()
    if job.dt == 163.84:
        ddplans = ddplans[str(job.nchans)+"slow"]
    else:
        ddplans = ddplans[str(job.nchans)+"fast"]
    
    # Use whatever .zaplist is found in the current directory
    default_zaplist = glob.glob("*.zaplist")[0]

    # Make sure the output directory (and parent directories) exist
    try:
        os.makedirs(job.outputdir)
        os.chmod(job.outputdir, stat.S_IRWXU | stat.S_IRWXG | stat.S_IROTH | stat.S_IXOTH)
    except: pass

    # Make sure the tmp directory (in a tmpfs mount) exists
    tmpdir = os.path.join(base_tmp_dir, job.basefilenm)
    try:
        os.makedirs(tmpdir)
    except: pass

    print("\nBeginning GBNCC search of '%s'"%job.fits_filenm)
    print("UTC time is:  %s"%(time.asctime(time.gmtime())))

    rfifindout=job.basefilenm+"_rfifind.out"
    rfifindmask=job.basefilenm+"_rfifind.mask"

    if not os.path.exists(rfifindout) or not os.path.exists(rfifindmask):
        # rfifind the filterbank file
        cmd = "rfifind -time %.17g -o %s %s > %s_rfifind.out"%\
              (rfifind_chunk_time, job.basefilenm,
               job.fits_filenm, job.basefilenm)
        job.rfifind_time += timed_execute(cmd)
    maskfilenm = job.basefilenm + "_rfifind.mask"
    # Find the fraction that was suggested to be masked
    # Note:  Should we stop processing if the fraction is
    #        above some large value?  Maybe 30%?
    job.masked_fraction = find_masked_fraction(job)
    
    # Iterate over the stages of the overall de-dispersion plan
    dmstrs = []
    
    for ddplan in ddplans:
        # Make a downsampled filterbank file
        if ddplan.downsamp > 1:
            cmd = "psrfits_subband -dstime %d -nsub %d -o %s_DS%d %s"%\
                  (ddplan.downsamp, job.nchans, job.dsbasefilenm, ddplan.downsamp, job.dsbasefilenm )
            job.downsample_time += timed_execute(cmd)
            fits_filenm = job.dsbasefilenm + "_DS%d%s"%\
                          (ddplan.downsamp,job.fits_filenm[job.fits_filenm.rfind("_"):])
        else:
            fits_filenm = job.fits_filenm

        # Iterate over the individual passes through the .fil file
        for passnum in range(ddplan.numpasses):
            subbasenm = "%s_DM%s"%(job.basefilenm, ddplan.subdmlist[passnum])

            # Now de-disperse 
            cmd = "prepsubband -mask %s -lodm %.2f -dmstep %.2f -nsub %d -numdms %d -numout %d -o %s/%s %s"%\
                  (maskfilenm, ddplan.lodm+passnum*ddplan.sub_dmstep,
                   ddplan.dmstep, ddplan.numsub,
                   ddplan.dmsperpass, job.N/ddplan.downsamp,
                   tmpdir, job.basefilenm, fits_filenm)
            job.dedispersing_time += timed_execute(cmd)
            
            # Do the single-pulse search
            cmd = "single_pulse_search.py -p -m %f -t %f %s/*.dat"%\
                (singlepulse_maxwidth, singlepulse_threshold, tmpdir)
            job.singlepulse_time += timed_execute(cmd)
            spfiles = glob.glob("%s/*.singlepulse"%tmpdir)
            for spfile in spfiles:
                try:
                    shutil.move(spfile, workdir)
                except: pass

            # Iterate over all the new DMs
            for dmstr in ddplan.dmlist[passnum]:
                dmstrs.append(dmstr)
                basenm = os.path.join(tmpdir, job.basefilenm+"_DM"+dmstr)
                datnm = basenm+".dat"
                fftnm = basenm+".fft"
                infnm = basenm+".inf"

                # FFT, zap, and de-redden
                cmd = "realfft %s"%datnm
                job.FFT_time += timed_execute(cmd)
                cmd = "zapbirds -zap -zapfile %s -baryv %.6g %s"%\
                      (default_zaplist, job.baryv, fftnm)
                job.FFT_time += timed_execute(cmd)
                cmd = "rednoise %s"%fftnm
                job.FFT_time += timed_execute(cmd)
                try:
                    os.rename(basenm+"_red.fft", fftnm)
                except: pass
                
                # Do the low-acceleration search
                cmd = "accelsearch -numharm %d -sigma %f -zmax %d -flo %f %s"%\
                      (lo_accel_numharm, lo_accel_sigma, lo_accel_zmax, lo_accel_flo, fftnm)
                job.lo_accelsearch_time += timed_execute(cmd)
                try:
                    os.remove(basenm+"_ACCEL_%d.txtcand"%lo_accel_zmax)
                except: pass
                try:  # This prevents errors if there are no cand files to copy
                    shutil.move(basenm+"_ACCEL_%d.cand"%lo_accel_zmax, workdir)
                    shutil.move(basenm+"_ACCEL_%d"%lo_accel_zmax, workdir)
                except: pass
        
                # Do the high-acceleration search
                cmd = "accelsearch -numharm %d -sigma %f -zmax %d -flo %f %s"%\
                      (hi_accel_numharm, hi_accel_sigma, hi_accel_zmax, hi_accel_flo, fftnm)
                job.hi_accelsearch_time += timed_execute(cmd)
                try:
                    os.remove(basenm+"_ACCEL_%d.txtcand"%hi_accel_zmax)
                except: pass
                try:  # This prevents errors if there are no cand files to copy
                    shutil.move(basenm+"_ACCEL_%d.cand"%hi_accel_zmax, workdir)
                    shutil.move(basenm+"_ACCEL_%d"%hi_accel_zmax, workdir)
                except: pass

                # Move the .inf files
                try:
                    shutil.move(infnm, workdir)
                except: pass
                # Remove the .dat and .fft files
                try:
                    os.remove(datnm)
                except: pass
                try:
                    os.remove(fftnm)
                except: pass

    # Make the single-pulse plots
    basedmb = job.basefilenm+"_DM"
    basedme = ".singlepulse "
    # The following will make plots for DM ranges:
    #    0-30, 20-110, 100-310, 300-1000+
    dmglobs = [basedmb+"[0-9].[0-9][0-9]"+basedme +
               basedmb+"[012][0-9].[0-9][0-9]"+basedme,
               basedmb+"[2-9][0-9].[0-9][0-9]"+basedme +
               basedmb+"10[0-9].[0-9][0-9]"+basedme,
               basedmb+"[12][0-9][0-9].[0-9][0-9]"+basedme +
               basedmb+"30[0-9].[0-9][0-9]"+basedme,
               basedmb+"[3-9][0-9][0-9].[0-9][0-9]"+basedme +
               basedmb+"1[0-9][0-9][0-9].[0-9][0-9]"+basedme]
    dmrangestrs = ["0-30", "20-110", "100-310", "300-1000+"]
    psname = job.basefilenm+"_singlepulse.ps"
    for dmglob, dmrangestr in zip(dmglobs, dmrangestrs):
        cmd = 'single_pulse_search.py -t %f -g "%s"' % \
              (singlepulse_plot_SNR, dmglob)
        job.singlepulse_time += timed_execute(cmd)
        try:
            os.rename(psname,
                      job.basefilenm+"_DMs%s_singlepulse.ps"%dmrangestr)
        except: pass

    # Sift through the candidates to choose the best to fold
    
    job.sifting_time = time.time()

    lo_accel_cands = sifting.read_candidates(glob.glob("*ACCEL_%d"%lo_accel_zmax))
    if len(lo_accel_cands):
        lo_accel_cands = sifting.remove_duplicate_candidates(lo_accel_cands)
    if len(lo_accel_cands):
        lo_accel_cands = sifting.remove_DM_problems(lo_accel_cands, numhits_to_fold,
                                                    dmstrs, low_DM_cutoff)
        
    hi_accel_cands = sifting.read_candidates(glob.glob("*ACCEL_%d"%hi_accel_zmax))
    if len(hi_accel_cands):
        hi_accel_cands = sifting.remove_duplicate_candidates(hi_accel_cands)
    if len(hi_accel_cands):
        hi_accel_cands = sifting.remove_DM_problems(hi_accel_cands, numhits_to_fold,
                                                    dmstrs, low_DM_cutoff)

    if len(lo_accel_cands) and len(hi_accel_cands):
        lo_accel_cands, hi_accel_cands = remove_crosslist_duplicate_candidates(lo_accel_cands, hi_accel_cands)

    if len(lo_accel_cands):
        lo_accel_cands.sort(key=attrgetter('sigma'), reverse=True)
        sifting.write_candlist(lo_accel_cands,
                               job.basefilenm+".accelcands_Z%d"%lo_accel_zmax)
    if len(hi_accel_cands):
        hi_accel_cands.sort(key=attrgetter('sigma'), reverse=True)
        sifting.write_candlist(hi_accel_cands,
                               job.basefilenm+".accelcands_Z%d"%hi_accel_zmax)

    try:
        cmd = "mv *.accelcands* "+job.outputdir
        os.system(cmd)
    except: pass
    job.sifting_time = time.time() - job.sifting_time

    # Fold the best candidates

    cands_folded = 0
    for cand in lo_accel_cands:
        if cands_folded == max_lo_cands_to_fold:
            break
        elif cand.sigma > to_prepfold_sigma:
            job.folding_time += timed_execute(get_folding_command(cand, job, ddplans, maskfilenm))
            cands_folded += 1
    cands_folded = 0
    for cand in hi_accel_cands:
        if cands_folded == max_hi_cands_to_fold:
            break
        elif cand.sigma > to_prepfold_sigma:
            job.folding_time += timed_execute(get_folding_command(cand, job, ddplans, maskfilenm))
            cands_folded += 1
    # Remove the bestprof files
    bpfiles = glob.glob("*.pfd.bestprof")
    for bpfile in bpfiles:
        os.remove(bpfile)

    # Now step through the .ps files and convert them to .png and gzip them

    psfiles = glob.glob("*.ps")
    for psfile in psfiles:
        if "singlepulse" in psfile:
            os.system("pstoimg -density 200 -antialias -crop a "+psfile)
            try:
                os.remove(psfile)
            except: pass
        else:
            os.system("pstoimg -density 200 -antialias -flip cw "+psfile)
        os.system("gzip "+psfile)
    
    # Tar up the results files 

    tar_suffixes = ["_ACCEL_%d.tgz"%lo_accel_zmax,
                    "_ACCEL_%d.tgz"%hi_accel_zmax,
                    "_ACCEL_%d.cand.tgz"%lo_accel_zmax,
                    "_ACCEL_%d.cand.tgz"%hi_accel_zmax,
                    "_singlepulse.tgz",
                    "_inf.tgz",
                    "_pfd.tgz"]
    tar_globs = ["*_ACCEL_%d"%lo_accel_zmax,
                 "*_ACCEL_%d"%hi_accel_zmax,
                 "*_ACCEL_%d.cand"%lo_accel_zmax,
                 "*_ACCEL_%d.cand"%hi_accel_zmax,
                 "*.singlepulse",
                 "*_DM[0-9]*.inf",
                 "*.pfd"]
    for (tar_suffix, tar_glob) in zip(tar_suffixes, tar_globs):
        tf = tarfile.open(job.basefilenm+tar_suffix, "w:gz")
        for infile in glob.glob(tar_glob):
            tf.add(infile)
            os.remove(infile)
        tf.close()
            
    # Remove all the downsampled .fits files

    fitsfiles = glob.glob("*_DS?*.fits") + glob.glob("*_DS??*.fits")
    for fitsfile in fitsfiles:
        os.remove(fitsfile)

    # Remove the tmp directory (in a tmpfs mount)
    try:
        os.rmdir(tmpdir)
    except: pass

    # And finish up

    job.total_time = time.time() - job.total_time
    print("\nFinished")
    print("UTC time is:  %s"%(time.asctime(time.gmtime())))

    # Write the job report

    job.write_report(job.basefilenm+".report")
    job.write_report(os.path.join(job.outputdir, job.basefilenm+".report"))

    # Move all the important stuff to the output directory
    cmd = "mv *rfifind.[bimors]* *.tgz *.ps.gz *.png *.report "+\
          job.outputdir
    os.system(cmd)
cands = sifting.read_candidates(candfiles)

# Remove candidates that are duplicated in other ACCEL files
if len(cands):
    cands = sifting.remove_duplicate_candidates(cands)

# Remove candidates with DM problems
if len(cands) and args.min_num_DMs > 1:
    cands = sifting.remove_DM_problems(cands, args.min_num_DMs, dmstrs,
                                       args.low_DM_cutoff)

# Remove candidates that are harmonically related to each other
# Note:  this includes only a small set of harmonics
if len(cands) > 1:
    cands = sifting.remove_harmonics(cands)
print("Number of candidates remaining {}".format(len(cands)))
# Write candidates to STDOUT
if args.file_name:
    cands_file_name = 'cands_{}.txt'.format(args.file_name)
elif args.dir == "./":
    cands_file_name = 'cands.txt'
else:
    cands_file_name = 'cand_files/cands_' + d.replace("/", "_") + '.txt'

if len(cands):
    cands.sort(key=attrgetter('sigma'), reverse=True)
    sifting.write_candlist(cands, candfilenm=cands_file_name)
    print(cands_file_name)
else:
    print("No candidates left, created no file")
Esempio n. 4
0
def main(fil_filenm, workdir):

    # Change to the specified working directory
    os.chdir(workdir)

    # Get information on the observation and the jbo
    job = obs_info(fil_filenm)
    if job.T < low_T_to_search:
        print("The observation is too short (%.2f s) to search." % job.T)
        sys.exit()
    job.total_time = time.time()

    # Use whatever .zaplist is found in the current directory
    default_zaplist = glob.glob("*.zaplist")[0]

    # Make sure the output directory (and parent directories) exist
    try:
        os.makedirs(job.outputdir)
    except:
        pass

    # Create a directory to hold all the subbands
    if use_subbands:
        try:
            os.makedirs("subbands")
        except:
            pass

    print("\nBeginning PALFA search of '%s'" % job.fil_filenm)
    print("UTC time is:  %s" % (time.asctime(time.gmtime())))

    # rfifind the filterbank file
    cmd = "rfifind -time %.17g -o %s %s > %s_rfifind.out"%\
          (rfifind_chunk_time, job.basefilenm,
           job.fil_filenm, job.basefilenm)
    job.rfifind_time += timed_execute(cmd)
    maskfilenm = job.basefilenm + "_rfifind.mask"
    # Find the fraction that was suggested to be masked
    # Note:  Should we stop processing if the fraction is
    #        above some large value?  Maybe 30%?
    job.masked_fraction = find_masked_fraction(job)

    # Iterate over the stages of the overall de-dispersion plan
    dmstrs = []
    for ddplan in ddplans:

        # Make a downsampled filterbank file if we are not using subbands
        if not use_subbands:
            if ddplan.downsamp > 1:
                cmd = "downsample_filterbank.py %d %s" % (ddplan.downsamp,
                                                          job.fil_filenm)
                job.downsample_time += timed_execute(cmd)
                fil_filenm = job.fil_filenm[:job.fil_filenm.find(".fil")] + \
                             "_DS%d.fil"%ddplan.downsamp
            else:
                fil_filenm = job.fil_filenm

        # Iterate over the individual passes through the .fil file
        for passnum in range(ddplan.numpasses):
            subbasenm = "%s_DM%s" % (job.basefilenm, ddplan.subdmlist[passnum])

            if use_subbands:
                # Create a set of subbands
                cmd = "prepsubband -sub -subdm %s -downsamp %d -nsub %d -mask %s -o subbands/%s %s > %s.subout"%\
                      (ddplan.subdmlist[passnum], ddplan.sub_downsamp,
                       ddplan.numsub, maskfilenm, job.basefilenm,
                       job.fil_filenm, subbasenm)
                job.subbanding_time += timed_execute(cmd)

                # Now de-disperse using the subbands
                cmd = "prepsubband -lodm %.2f -dmstep %.2f -numdms %d -downsamp %d -numout %d -o %s subbands/%s.sub[0-9]* > %s.prepout"%\
                      (ddplan.lodm+passnum*ddplan.sub_dmstep, ddplan.dmstep,
                       ddplan.dmsperpass, ddplan.dd_downsamp, job.N/ddplan.downsamp,
                       job.basefilenm, subbasenm, subbasenm)
                job.dedispersing_time += timed_execute(cmd)

            else:  # Not using subbands
                cmd = "prepsubband -mask %s -lodm %.2f -dmstep %.2f -numdms %d -numout %d -o %s %s"%\
                      (maskfilenm, ddplan.lodm+passnum*ddplan.sub_dmstep, ddplan.dmstep,
                       ddplan.dmsperpass, job.N/ddplan.downsamp,
                       job.basefilenm, fil_filenm)
                job.dedispersing_time += timed_execute(cmd)

            # Iterate over all the new DMs
            for dmstr in ddplan.dmlist[passnum]:
                dmstrs.append(dmstr)
                basenm = job.basefilenm + "_DM" + dmstr
                datnm = basenm + ".dat"
                fftnm = basenm + ".fft"
                infnm = basenm + ".inf"

                # Do the single-pulse search
                cmd = "single_pulse_search.py -p -m %f -t %f %s"%\
                      (singlepulse_maxwidth, singlepulse_threshold, datnm)
                job.singlepulse_time += timed_execute(cmd)

                # FFT, zap, and de-redden
                cmd = "realfft %s" % datnm
                job.FFT_time += timed_execute(cmd)
                cmd = "zapbirds -zap -zapfile %s -baryv %.6g %s"%\
                      (default_zaplist, job.baryv, fftnm)
                job.FFT_time += timed_execute(cmd)
                cmd = "rednoise %s" % fftnm
                job.FFT_time += timed_execute(cmd)
                try:
                    os.rename(basenm + "_red.fft", fftnm)
                except:
                    pass

                # Do the low-acceleration search
                cmd = "accelsearch -locpow -harmpolish -numharm %d -sigma %f -zmax %d -flo %f %s"%\
                      (lo_accel_numharm, lo_accel_sigma, lo_accel_zmax, lo_accel_flo, fftnm)
                job.lo_accelsearch_time += timed_execute(cmd)
                try:
                    os.remove(basenm + "_ACCEL_%d.txtcand" % lo_accel_zmax)
                except:
                    pass

                # Do the high-acceleration search
                cmd = "accelsearch -locpow -harmpolish -numharm %d -sigma %f -zmax %d -flo %f %s"%\
                      (hi_accel_numharm, hi_accel_sigma, hi_accel_zmax, hi_accel_flo, fftnm)
                job.hi_accelsearch_time += timed_execute(cmd)
                try:
                    os.remove(basenm + "_ACCEL_%d.txtcand" % hi_accel_zmax)
                except:
                    pass

                # Remove the .dat and .fft files
                try:
                    os.remove(datnm)
                    os.remove(fftnm)
                except:
                    pass

    # Make the single-pulse plots
    basedmb = job.basefilenm + "_DM"
    basedme = ".singlepulse "
    # The following will make plots for DM ranges:
    #    0-110, 100-310, 300-1000+
    dmglobs = [
        basedmb + "[0-9].[0-9][0-9]" + basedme + basedmb +
        "[0-9][0-9].[0-9][0-9]" + basedme + basedmb + "10[0-9].[0-9][0-9]" +
        basedme, basedmb + "[12][0-9][0-9].[0-9][0-9]" + basedme + basedmb +
        "30[0-9].[0-9][0-9]" + basedme,
        basedmb + "[3-9][0-9][0-9].[0-9][0-9]" + basedme + basedmb +
        "1[0-9][0-9][0-9].[0-9][0-9]" + basedme
    ]
    dmrangestrs = ["0-110", "100-310", "300-1000+"]
    psname = job.basefilenm + "_singlepulse.ps"
    for dmglob, dmrangestr in zip(dmglobs, dmrangestrs):
        cmd = 'single_pulse_search.py -t %f -g "%s"' % \
              (singlepulse_plot_SNR, dmglob)
        job.singlepulse_time += timed_execute(cmd)
        try:
            os.rename(psname,
                      job.basefilenm + "_DMs%s_singlepulse.ps" % dmrangestr)
        except:
            pass

    # Sift through the candidates to choose the best to fold

    job.sifting_time = time.time()

    lo_accel_cands = sifting.read_candidates(
        glob.glob("*ACCEL_%d" % lo_accel_zmax))
    if len(lo_accel_cands):
        lo_accel_cands = sifting.remove_duplicate_candidates(lo_accel_cands)
    if len(lo_accel_cands):
        lo_accel_cands = sifting.remove_DM_problems(lo_accel_cands,
                                                    numhits_to_fold, dmstrs,
                                                    low_DM_cutoff)

    hi_accel_cands = sifting.read_candidates(
        glob.glob("*ACCEL_%d" % hi_accel_zmax))
    if len(hi_accel_cands):
        hi_accel_cands = sifting.remove_duplicate_candidates(hi_accel_cands)
    if len(hi_accel_cands):
        hi_accel_cands = sifting.remove_DM_problems(hi_accel_cands,
                                                    numhits_to_fold, dmstrs,
                                                    low_DM_cutoff)

    all_accel_cands = lo_accel_cands + hi_accel_cands
    if len(all_accel_cands):
        all_accel_cands = sifting.remove_harmonics(all_accel_cands)
        # Note:  the candidates will be sorted in _sigma_ order, not _SNR_!
        all_accel_cands.sort(key=attrgetter('sigma'), reverse=True)
        sifting.write_candlist(all_accel_cands, job.basefilenm + ".accelcands")

    try:
        cmd = "cp *.accelcands " + job.outputdir
        os.system(cmd)
    except:
        pass

    job.sifting_time = time.time() - job.sifting_time

    # Fold the best candidates

    cands_folded = 0
    for cand in all_accel_cands:
        if cands_folded == max_cands_to_fold:
            break
        if cand.sigma > to_prepfold_sigma:
            job.folding_time += timed_execute(
                get_folding_command(cand, job, ddplans))
            cands_folded += 1

    # Now step through the .ps files and convert them to .png and gzip them

    psfiles = glob.glob("*.ps")
    for psfile in psfiles:
        if "singlepulse" in psfile:
            # For some reason the singlepulse files don't transform nicely...
            epsfile = psfile.replace(".ps", ".eps")
            os.system("eps2eps " + psfile + " " + epsfile)
            os.system("pstoimg -density 100 -crop a " + epsfile)
            try:
                os.remove(epsfile)
            except:
                pass
        else:
            os.system("pstoimg -density 100 -flip cw " + psfile)
        os.system("gzip " + psfile)

    # NOTE:  need to add database commands

    # Tar up the results files

    tar_suffixes = [
        "_ACCEL_%d.tgz" % lo_accel_zmax,
        "_ACCEL_%d.tgz" % hi_accel_zmax,
        "_ACCEL_%d.cand.tgz" % lo_accel_zmax,
        "_ACCEL_%d.cand.tgz" % hi_accel_zmax, "_singlepulse.tgz", "_inf.tgz",
        "_pfd.tgz", "_bestprof.tgz"
    ]
    tar_globs = [
        "*_ACCEL_%d" % lo_accel_zmax,
        "*_ACCEL_%d" % hi_accel_zmax,
        "*_ACCEL_%d.cand" % lo_accel_zmax,
        "*_ACCEL_%d.cand" % hi_accel_zmax, "*.singlepulse", "*_DM[0-9]*.inf",
        "*.pfd", "*.pfd.bestprof"
    ]
    for (tar_suffix, tar_glob) in zip(tar_suffixes, tar_globs):
        tf = tarfile.open(job.basefilenm + tar_suffix, "w:gz")
        for infile in glob.glob(tar_glob):
            tf.add(infile)
            os.remove(infile)
        tf.close()

    # And finish up

    job.total_time = time.time() - job.total_time
    print("\nFinished")
    print("UTC time is:  %s" % (time.asctime(time.gmtime())))

    # Write the job report

    job.write_report(job.basefilenm + ".report")
    job.write_report(os.path.join(job.outputdir, job.basefilenm + ".report"))

    # Copy all the important stuff to the output directory
    try:
        cmd = "cp *rfifind.[bimors]* *.ps.gz *.tgz *.png " + job.outputdir
        os.system(cmd)
    except:
        pass