예제 #1
0
def write_listing(subdirs, subints, outfn):
    """Write a text file containing a listing of subints
        that should be combined.

        Inputs:
            subdirs: List of sub-band directories containing 
                sub-ints to combine
            subints: List of subint files to be combined.
                (NOTE: These are the file name only (i.e. no path)
                    Each file listed should appear in each of the
                    subdirs.)
            outfn: The name of the file to write the listing to.

        Outputs:
            None
    """
    # Ensure paths are absolute
    subdirs = [os.path.abspath(path) for path in subdirs]

    if os.path.exists(outfn):
        raise errors.InputError("A file already exists with the requested " \
                        "output file name (%s)!" % outfn)
    outfile = open(outfn, 'w')
    outfile.write("# Listing of sub-int files to combine\n" + \
                  "# Each file name listed below should appear " + \
                        "in each of the following directories.\n" + \
                  "# Each directory contains data from a different " + \
                        "frequency sub-band.\n")
    outfile.write("===== Frequency sub-band directories =====\n")
    for subdir in sorted(subdirs):
        outfile.write(subdir + "\n")
    outfile.write("========== Sub-integration files =========\n")
    for subint in sorted(subints):
        outfile.write(subint + "\n")
    outfile.close()
예제 #2
0
def make_template(outdir, psrname, stage, rcvr, max_span=1, min_snr=0):
    if os.path.isdir(outdir):
        outdir = outdir
    else:
        raise errors.InputError("Output directory (%s) doesn't exist!" %
                                outdir)
    filerows = list_files.get_files([psrname], stage, rcvr)
    print "Found %d matching files" % len(filerows)
    fns = get_files_to_combine(filerows, max_span, min_snr)
    if not fns:
        raise errors.TemplateGenerationError("No files for type=%s, "
                                             "psr=%s, rcvr=%s" %
                                             (stage, psrname, rcvr))
    print "Combining %d files" % len(fns)
    cmbfn = combine_files(fns)

    runpaas = True
    tmpdir = tempfile.mkdtemp(suffix="cg_paas", dir=config.tmp_directory)
    while runpaas:
        try:
            print "Running paas"
            utils.execute(['paas', '-D', '-i', cmbfn], dir=tmpdir)
        except:
            if raw_input("Failure! Give up? (y/n): ").lower()[0] == 'y':
                runpaas = False
        else:
            if raw_input("Success! Keep template? (y/n): ").lower()[0] == 'y':
                runpaas = False
                outbasenm = os.path.join(outdir,
                                         "%s_%s_%s" % (psrname, rcvr, stage))
                tmpbasenm = os.path.join(tmpdir, 'paas')
                shutil.copy(tmpbasenm + '.m', outbasenm + '.m')
                shutil.copy(tmpbasenm + '.std', outbasenm + '.std')
                shutil.copy(cmbfn, outbasenm + ".add")
    # Clean up paas files
    try:
        shutil.rmtree(tmpdir)
    except:
        pass
    try:
        os.remove(cmbfn)
    except:
        pass
    return outbasenm + '.std'
예제 #3
0
def main():
    print ""
    print "        combine.py"
    print "     Patrick  Lazarus"
    print ""

    if len(args.subdirs):
        print "Number of input sub-band directories: %d" % len(args.subdirs)
    elif args.group_file is None:
        raise errors.InputError(
            "No sub-band directories to combine and no group file provided!")

    if args.group_file is not None:
        usedirs, subints = read_listing(args.group_file)
        groups = [subints]
    else:
        # Group directories
        usedirs, groups = group_subband_dirs(args.subdirs, \
                    maxspan=args.combine_maxspan,
                    maxgap=args.combine_maxgap, \
                    filetype=args.filetype)

    # Work in a temporary directory
    tmpdir = tempfile.mkdtemp(suffix="_combine", dir=config.tmp_directory)
    # Combine files
    outfns = []
    for subints in groups:
        if not args.no_combine:
            preppeddirs = prepare_subints(usedirs,
                                          subints,
                                          baseoutdir=os.path.join(
                                              tmpdir, 'data'),
                                          trimpcnt=6.25)
            outfn = combine_subints(preppeddirs, subints, outdir=os.getcwd())
            outfns.append(outfn)
        if args.write_listing:
            write_listing(usedirs, subints, "list.txt")
    shutil.rmtree(tmpdir)
    if outfns:
        print "Created %d combined files" % len(outfns)
        for outfn in outfns:
            print "    %s" % outfn
예제 #4
0
def main():
    print ""
    print "        calibrate.py"
    print "     Patrick  Lazarus"
    print ""
    
    if len(args.files):
        print "Number of input files: %d" % len(args.files)
    else:
        raise errors.InputError("No files to calibrate!")

    if args.caldb is None:
        # Prepare to fetch caldb info from the pipeline database
        db = database.Database()
    else:
        caldb = args.caldb

    for fn in args.files:
        if args.caldb is None: 
            arf = utils.ArchiveFile(fn)
            caldb = update_caldb(db, arf['name'], force=True)
        calfn = calibrate(fn, caldb)
예제 #5
0
def main():
    print ""
    print "        correct.py"
    print "     Patrick  Lazarus"
    print ""

    if len(args.files):
        print "Number of input files: %d" % len(args.files)
    else:
        raise errors.InputError("No files to correct!")

    if args.obslog_line is not None:
        obsinfo = parse_obslog_line(args.obslog_line)
    else:
        obsinfo = None

    for fn in args.files:
        corrfn, corrstr, note = correct_header(fn,
                                               obsinfo=obsinfo,
                                               outfn=args.outfn,
                                               backend=args.backend_name)
        print "    Output corrected file: %s" % corrfn
        print "        Notes: %s" % note
예제 #6
0
def main():
    psrname = utils.get_prefname(args.psrname)

    if args.nchan == 1:
        ext = '.FTp'
        scrunchargs = ['-F']
    elif args.nchan > 1:
        ext = '.Tp.F%d' % args.nchan
        scrunchargs = ['--setnchn', '%d' % args.nchan]
    else:
        raise ValueError("Cannot scrunch using negative number of "
                         "channels (nchan=%d)" % args.nchan)

    #psrdirs = dict([(utils.get_prefname(os.path.basename(dd)),
    #                 os.path.basename(dd))
    #                for dd in glob.glob(os.path.join(PARFILE_DIR, '*'))
    #                if os.path.isdir(dd)])

    #if psrname in psrdirs:
    #    legacydir = os.path.join('/homes/plazarus/research/epta-legacy/',
    #                             psrdirs[psrname])
    #else:
    #    legacydir = None

    # Copy EPTA legacy TOAs
    #if legacydir and not os.path.exists("epta-legacy"):
    #    os.mkdir("epta-legacy")
    #    shutil.copytree(os.path.join(legacydir, "tims"), "epta-legacy/tims")
    #    shutil.copy(os.path.join(legacydir,
    #                             "%s_t2noise.model" % psrdirs[psrname]),
    #                "epta-legacy")

    # Find parfile
    if args.parfile is not None:
        if not os.path.exists(args.parfile):
            raise errors.InputError("Parfile specified (%s) doesn't exist!" %
                                    args.parfile)
        inparfn = args.parfile
    else:
        # Create parfile
        #inparfn = os.path.join('/homes/plazarus/research/epta-legacy/',
        #                       psrdirs[psrname], "%s.par" % psrdirs[psrname])
        inparfn = reduce_data.PARFILES[psrname]
    #intimfn = os.path.join('/homes/plazarus/research/epta-legacy/',
    #                       psrdirs[psrname], "%s_all.tim" % psrdirs[psrname])

    outparfn = "%s.T2.par" % psrname
    with open(inparfn, 'r') as inff, open(outparfn, 'w') as outff:
        for line in inff:
            # Don't copy over JUMPs or EFACs to 'outff'
            if not line.startswith("JUMP") and \
                    not 'EFAC' in line:
                outff.write(line)
        outff.write("\n".join(EXTRA_PARFILE_LINES))

    template_dir = os.path.join(BASE_TEMPLATE_DIR, psrname)
    for stage in STAGES:
        if stage == "current":
            continue
        for rcvr in RCVRS:
            template_name = "%s_%s_%s.std" % (psrname, rcvr, stage)
            # First, check if templates exists
            if not os.path.isfile(os.path.join(template_dir, template_name)):
                # Make template
                utils.print_info("No template (%s) found!" % template_name, 1)
                try:
                    os.makedirs(template_dir)
                except:
                    pass
                try:
                    print psrname, stage, rcvr
                    stdfn = make_template.make_template(
                        template_dir, psrname, stage, rcvr)
                    utils.print_info("Made template: %s" % stdfn, 1)
                except errors.TemplateGenerationError:
                    pass

    timfns = []
    for stage in STAGES:
        # List files to reduce
        rows = list_files.get_files([psrname], stage)
        print len(rows)
        fns = {}
        # Initialize list of file names for each receiver
        for rcvr in RCVRS:
            fns[rcvr] = []
        for row in rows:
            if row['stage'] not in ('cleaned', 'calibrated'):
                continue
            fn = os.path.join(row['filepath'], row['filename'])
            fns[row['rcvr']].append(fn)
        stagetimfn = "%s_%s.tim" % (psrname, stage)
        print "Opening %s" % stagetimfn
        stagetimff = open(stagetimfn, 'w')
        # Create file listings and generate TOAs
        for rcvr in RCVRS:
            print rcvr, len(fns[rcvr])
            if not fns[rcvr]:
                # No files
                continue
            # Check for existing scrunched files
            toscrunch = []
            scrunchedfns = []
            scrunchdir = os.path.join("scrunched", rcvr)
            for fn in fns[rcvr]:
                scrunchfn = os.path.join(scrunchdir,
                                         os.path.basename(fn) + ext)
                scrunchedfns.append(scrunchfn)
                if not os.path.exists(scrunchfn):
                    toscrunch.append(fn)
            # Scrunch files
            try:
                os.makedirs(scrunchdir)
            except:
                pass
            print "Working on %s %s" % (rcvr, stage)
            for fn in utils.show_progress(toscrunch, width=50):
                # Create a copy of the file with the 'eff_psrix' site
                cmd = ['psredit', '-c', 'site=eff_psrix', '-O', scrunchdir, fn]
                cmd.extend(['-e', fn.split('.')[-1] + ext])
                utils.execute(cmd)
                arfn = os.path.join(scrunchdir, os.path.basename(fn + ext))
                parfn = utils.get_norm_parfile(arfn)
                # Re-install ephemeris
                cmd = ['pam', '-Tp', '-E', parfn, '-m', arfn] + scrunchargs
                utils.execute(cmd)

            toas = []
            mjds = []
            for row in rows:
                if row['rcvr'] != rcvr:
                    continue
                if row['stage'] not in ('cleaned', 'calibrated'):
                    continue
                template_name = "%s_%s_%s.std" % (psrname, rcvr, row['stage'])
                template = os.path.join(template_dir, template_name)
                # Generate TOAs
                fn = os.path.join(scrunchdir, row['filename']) + ext
                print fn
                stdout, stderr = utils.execute([
                    "pat", "-T", "-A", "FDM", "-f", "tempo2", "-C",
                    "rcvr chan", "-d", "-s", template, fn
                ])
                # Parse TOAs
                toalines = stdout.split('\n')
                for line in toalines:
                    toainfo = readers.tempo2_reader(line)
                    if toainfo is not None:
                        # Formatter expects 'file' field to be called 'rawfile'
                        toainfo['rawfile'] = toainfo['file']
                        toainfo['telescope_code'] = toainfo['telescope']
                        toainfo['type'] = stage
                        toainfo['rcvr'] = rcvr
                        toainfo['file_id'] = row['file_id']
                        toainfo['obs_id'] = row['obs_id']
                        toainfo['shortstage'] = row['stage'][:2].upper()
                        if row['stage'] == 'cleaned':
                            toainfo['grp'] = "%s_clean" % rcvr
                        else:
                            toainfo['grp'] = "%s_cal" % rcvr
                        toainfo['chan'] = toainfo['extras']['chan']
                        toas.append(toainfo)
                        mjds.append(toainfo['imjd'])
            # Sort TOAs
            utils.sort_by_keys(toas, ['fmjd', 'imjd'])

            # Format timfile
            sysflag = 'EFF.AS.%(rcvr)s.%(shortstage)s'
            timlines = formatters.tempo2_formatter(toas,
                                                   flags=[
                                                       ('rcvr', '%(rcvr)s'),
                                                       ('type', '%(type)s'),
                                                       ('grp', '%(grp)s'),
                                                       ('sys', sysflag),
                                                       ('obsid', '%(obs_id)d'),
                                                       ('fileid',
                                                        '%(file_id)d'),
                                                       ('chan', '%(chan)s')
                                                   ])

            mjds.sort()
            #offsetmjds = sorted(TIME_OFFSETS.keys())
            #inds = np.searchsorted(mjds, offsetmjds)+1
            # Insert extra lines from back of list
            #for ind, key in reversed(zip(inds, offsetmjds)):
            #    timlines[ind:ind] = ["\n"+TIME_OFFSETS[key]+"\n"]

            # Write out timfile
            timfn = "%s_%s_%s.tim" % (psrname, rcvr, stage)
            with open(timfn, 'w') as ff:
                for line in timlines:
                    ff.write(line + "\n")
            utils.print_info("Wrote out timfile: %s" % timfn)
            timfns.append(timfn)
            stagetimff.write("INCLUDE %s\n" % timfn)
        stagetimff.close()

    #outtimfn = os.path.join("epta-legacy", os.path.basename(intimfn))
    #with open(intimfn, 'r') as inff, open(outtimfn, 'w') as outff:
    #    for line in inff:
    #        outff.write(line)
    #    for rcvr in RCVRS:
    #        timfn = "%s_%s_cleaned.tim" % (psrname, rcvr)
    #        if os.path.exists(timfn):
    #            outff.write("INCLUDE ../%s\n" % timfn)

    # Count TOAs
    #toas = load_toa.parse_timfile(outtimfn, determine_obssystem=False)
    systems = {}
    #for toa in toas:
    #    if toa['is_bad']:
    #        continue
    #    if not 'sys' in toa['extras']:
    #        print toa
    #    else:
    #        nn = systems.get(toa['extras']['sys'], 0)
    #        systems[toa['extras']['sys']] = nn+1

    outparfn = "%s.T2.par" % psrname
    #outparfn2 = os.path.join("epta-legacy", os.path.basename(inparfn))
    with open(inparfn, 'r') as inff, open(outparfn, 'w') as outff:  #, \
        #open(outparfn2, 'w') as outff2:
        for line in inff:
            # Don't copy over JUMPs or EFACs to 'outff'
            # Copy JUMPs and EFACs to 'outff2' and fit
            #if line.startswith("JUMP"):
            #    if "-sys" in line:
            #        obssys = line.split()[2]
            #        if systems.get(obssys, 0):
            #            # Observing system has TOAs
            #            # Replace all system jumps by 0 and set the fit flag
            #            outff2.write(" ".join(line.split()[:3])+" 0 1\n")
            #    else:
            #        outff2.write(line)
            #elif line.startswith("T2EFAC"):
            #    outff2.write(line)
            #elif line.startswith("NITS"):
            #    pass
            #else:
            outff.write(line)
            # Remove fit-flags for 'outff2'
            #outff2.write(" ".join(line.split()[:2])+'\n')
        outff.write("\n".join(EXTRA_PARFILE_LINES))
        #outff2.write("\n".join(["JUMP -sys EFF.AS.%s.CL 0 1" % rcvr for rcvr in RCVRS]))
        #outff2.write("\nNITS 3\n")

    # Create a master timfile
    master_timfn = "%s_all.tim" % psrname
    with open(master_timfn, 'w') as ff:
        for timfn in timfns:
            ff.write("INCLUDE %s\n" % timfn)
    utils.print_info("Wrote out master timfile: %s" % master_timfn)
예제 #7
0
def group_subband_dirs(subdirs, maxspan=None, maxgap=None, \
            tossfrac=None, filetype='subint'):
    """Based on file names group sub-ints from different
        sub-bands. Each subband is assumed to be in a separate
        directory.

        Inputs:
            subdirs: List of sub-band directories
            maxspan: Maximum span, in seconds, between first and 
                last sub-int in a combined file.
            maxgap: Maximum gap, in seconds, permitted before 
                starting a new output file.
            tossfrac: Fraction of sub-ints required for a 
                sub-band to be combined. If a sub-band has
                fewer than tossfrac*N_subint sub-ints it
                will be excluded.
            filetype: Type of files being grouped. Can be 'subint',
                or 'single'. (Default: 'subint')

        Outputs:
            usedirs: List of directories to use when combining.
                (NOTE: This may be different than the input
                    'subdirs' because some directories may have
                    too few subints to be worth combining. This
                    depends on the input value of 'tossfrac'.)
            groups: List of groups of files to be combined.
                (NOTE: These are the file name only (i.e. no path)
                    Each file listed appears in each of 'usedirs'.)
    """
    if maxspan is None:
        maxspan = config.cfg.combine_maxspan
    if maxgap is None:
        maxgap = config.cfg.combine_maxgap
    if tossfrac is None:
        tossfrac = 1 - config.cfg.missing_subint_tolerance

    if filetype not in FILETYPE_SPECIFICS:
        raise errors.InputError("File type (%s) is not recognized. " \
                                "Possible values are: '%s'" % \
                            (filetype, "', '".join(FILETYPE_SPECIFICS.keys())))
    else:
        globpat, get_start = FILETYPE_SPECIFICS[filetype]

    # Ensure paths are absolute
    subdirs = [os.path.abspath(path) for path in subdirs]
    utils.print_debug("Grouping subints from %d sub-band directories" % \
                        len(subdirs), 'combine')

    nindirs = len(subdirs)
    nsubbands = len(subdirs)
    nperdir = collections.Counter()
    noccurs = collections.Counter()
    nintotal = 0
    for subdir in subdirs:
        fns = glob.glob(os.path.join(subdir, globpat))
        nn = len(fns)
        utils.print_debug("Found %d sub-int files in %s" % \
                            (nn, subdir), 'combine')
        nintotal += nn
        nperdir[subdir] = nn
        noccurs.update([os.path.basename(fn) for fn in fns])
    nsubints = len(noccurs)

    # Remove sub-bands that have too few subints
    thresh = tossfrac * nsubints
    for ii in xrange(len(subdirs) - 1, -1, -1):
        subdir = subdirs[ii]
        if nperdir[subdir] < thresh:
            utils.print_info("Ignoring sub-ints from %s. " \
                    "It has too few sub-ints (%d < %d; tossfrac: %f)" % \
                    (subdir, nperdir[subdir], thresh, tossfrac), 2)
            subdirs.pop(ii)
            del nperdir[subdir]

            fns = glob.glob(os.path.join(subdir, globpat))
            noccurs.subtract([os.path.basename(fn) for fn in fns])
            nsubbands -= 1

    # Remove subints that are no longer included in any subbands
    to_del = []
    for fn in noccurs:
        if not noccurs[fn]:
            to_del.append(fn)
    for fn in to_del:
        del noccurs[fn]

    # Now combine subints
    lastsubint = datetime.datetime.min
    filestart = datetime.datetime.min
    groups = []
    if nsubbands:
        for subint in sorted(noccurs):
            if noccurs[subint] < nsubbands:
                utils.print_info("Ignoring sub-int (%s). It doesn't apear in all " \
                                "subbands (only %d of %d)" % \
                                (subint, noccurs[subint], nsubbands), 2)
                continue
            start = get_start(os.path.join(subdirs[0], subint))
            if ((start - filestart).total_seconds() > maxspan) or \
                        ((start - lastsubint).total_seconds() > maxgap):
                filestart = start
                utils.print_debug("Starting a new file at %s" % \
                        filestart, 'combine')
                # Start a new file
                groups.append([])
            groups[-1].append(subint)
            lastsubint = start
    nused = sum([len(grp) for grp in groups])
    utils.print_info("Grouped %d files from %d directories into %d groups.\n" \
                     "(Threw out %d directories and %d files)" % \
                     (nintotal, nindirs, len(groups), nindirs-len(subdirs), \
                        nintotal-nused), 2)
    return subdirs, groups