parser.add_argument('-l', required=False, default='.', type=str, help='Destination directory for output logs.') parser.add_argument('--archive-new', required=False, default=False, action='store_true', help='Store the copies of the images pulled via this ' 'process to the database as HTTPArchiveCopies.') args = parser.parse_args() # read the data and pull the images import zuds import numpy as np zuds.init_db(ref=True) ids = np.genfromtxt(args.idfile, encoding='ascii', dtype=int).tolist() retrieve_images(ids, job_script_destination=args.j, frame_destination=args.f, log_destination=args.l, archive_new=args.archive_new, ipac=~args.no_http, http=~args.no_local, tape=~args.no_tape, n_jobs=args.n_jobs, preserve_dirs=~args.no_preserve_dirs)
import os import sys import zuds import dosub import shutil import traceback if __name__ == '__main__': send_alerts = True zuds.init_db() infile = sys.argv[1] refvers = sys.argv[2] subclass = zuds.SingleEpochSubtraction sciclass = zuds.ScienceImage # get the work imgs = zuds.get_my_share_of_work(infile) subs = [] dirs = [] all_detections = [] for inpt in imgs: s = zuds.ScienceImage.get_by_basename(os.path.basename(inpt)) fn = f'/global/cfs/cdirs/m937/www/data/scratch/{s.field:06d}/' \ f'c{s.ccdid:02d}/q{s.qid}/{zuds.fid_map[s.fid]}/{s.basename}' shutil.copy(inpt, fn)
import zuds zuds.init_db(timeout=60000) import pandas as pd import sys import os import time from astropy.io import fits from astropy.wcs import WCS from sqlalchemy.dialects.postgresql import array from functools import wraps import errno import signal import sqlalchemy as sa zuds.init_db() __author__ = 'Danny Goldstein <*****@*****.**>' __whatami__ = 'Do the photometry for ZUDS.' infile = sys.argv[1] # file listing all the subs to do photometry on outfile = sys.argv[2] # file listing all the photometry to load into the DB # get the work imgs = zuds.get_my_share_of_work(infile) imgs = sorted(imgs, key=lambda s: s[0].split('ztf_')[1].split('_')[0], reverse=True) def write_csv(output): df = pd.DataFrame(output)