def mkpsf(expnum, ccd, version, dry_run=False, prefix=""): """Run the OSSOS jmpmakepsf script. """ ## confirm destination directory exists. destdir = os.path.dirname( storage.dbimages_uri(expnum, ccd, prefix=prefix, version=version, ext='fits')) if not dry_run: storage.mkdir(destdir) ## get image from the vospace storage area logging.info("Getting file from VOSpace") filename = storage.get_image(expnum, ccd, version=version, prefix=prefix) logging.info("Running mkpsf on %s %d" % (expnum, ccd)) ## launch the makepsf script logging.info(util.exec_prog(['jmpmakepsf.csh', './', filename, 'yes', 'yes'])) if dry_run: return ## place the results into VOSpace basename = os.path.splitext(filename)[0] for ext in ('mopheader', 'psf.fits', 'zeropoint.used', 'apcor', 'fwhm', 'phot'): dest = storage.dbimages_uri(expnum, ccd, prefix=prefix, version=version, ext=ext) source = basename + "." + str(ext) storage.copy(source, dest) return
def mkpsf(expnum, ccd): """Run the OSSOS makepsf script. """ ## get image from the vospace storage area filename = storage.get_image(expnum, ccd, version='p') logging.info("Running mkpsf on %s %d" % (expnum, ccd)) ## launch the makepsf script util.exec_prog(['jmpmakepsf.csh', './', filename, 'no']) ## place the results into VOSpace basename = os.path.splitext(filename)[0] ## confirm destination directory exists. destdir = os.path.dirname( storage.dbimages_uri(expnum, ccd, version='p',ext='fits')) logging.info("Checking that destination direcoties exist") storage.mkdir(destdir) for ext in ('mopheader', 'psf.fits', 'zeropoint.used', 'apcor', 'fwhm', 'phot'): dest = storage.dbimages_uri(expnum, ccd, version='p', ext=ext) source = basename + "." + ext storage.copy(source, dest) return
def mkpsf(expnum, ccd, fversion): """Run the OSSOS makepsf script. """ ## get image from the vospace storage area filename = storage.get_image(expnum, ccd, version=fversion) logging.info("Running mkpsf on %s %d" % (expnum, ccd)) ## launch the makepsf script util.exec_prog(['jmpmakepsf.csh', './', filename, 'no']) ## place the results into VOSpace basename = os.path.splitext(filename)[0] ## confirm destination directory exists. destdir = os.path.dirname( storage.dbimages_uri(expnum, ccd, version=fversion,ext='fits')) logging.info("Checking that destination directories exist") storage.mkdir(destdir) for ext in ('mopheader', 'psf.fits', 'zeropoint.used', 'apcor', 'fwhm', 'phot'): dest = storage.dbimages_uri(expnum, ccd, version=fversion, ext=ext) source = basename + "." + ext logging.info("Copying %s -> %s" % ( source, dest)) storage.remove(dest) storage.copy(source, dest) return
def mk_mopheader(expnum, ccd, version, dry_run=False, prefix=""): """Run the OSSOS mopheader script. """ ## confirm destination directory exists. destdir = os.path.dirname( storage.dbimages_uri(expnum, ccd, prefix=prefix, version=version, ext='fits')) if not dry_run: storage.mkdir(destdir) ## get image from the vospace storage area filename = storage.get_image(expnum, ccd, version=version, prefix=prefix) logging.info("Running mopheader on %s %d" % (expnum, ccd)) ## launch the mopheader script ## launch the makepsf script expname = os.path.basename(filename).strip('.fits') logging.info(util.exec_prog(['stepZjmp', '-f', expname])) mopheader_filename = expname+".mopheader" # mopheader_filename = mopheader.main(filename) if dry_run: return destination = storage.dbimages_uri(expnum, ccd, prefix=prefix, version=version, ext='mopheader') source = mopheader_filename storage.copy(source, destination) return
def build_source_reading(self, expnum, ccd, X, Y): """ Given the location of a source in the image, create a source reading. """ image_uri = storage.dbimages_uri(expnum=expnum, ccd=None, version='p', ext='.fits', subdir=None) logger.debug('Trying to access {}'.format(image_uri)) if not storage.exists(image_uri, force=False): logger.warning('Image not in dbimages? Trying subdir.') image_uri = storage.dbimages_uri(expnum=expnum, ccd=ccd, version='p') if not storage.exists(image_uri, force=False): logger.warning("Image doesn't exist in ccd subdir. %s" % image_uri) return None slice_rows=config.read("CUTOUTS.SINGLETS.SLICE_ROWS") slice_cols=config.read("CUTOUTS.SINGLETS.SLICE_COLS") if X == -9999 or Y == -9999 : logger.warning("Skipping {} as x/y not resolved.".format(image_uri)) return None if not (-slice_cols/2. < X < 2048+slice_cols/2. and -slice_rows/2. < Y < 4600+slice_rows/2.0): logger.warning("Central location ({},{}) off image cutout.".format(X,Y)) return None mopheader_uri = storage.dbimages_uri(expnum=expnum, ccd=ccd, version='p', ext='.mopheader') if not storage.exists(mopheader_uri, force=False): # ELEVATE! we need to know to go off and reprocess/include this image. logger.critical('Image exists but processing incomplete. Mopheader missing. {}'.format(image_uri)) return None mopheader = get_mopheader(expnum, ccd) # Build astrom.Observation observation = astrom.Observation(expnum=str(expnum), ftype='p', ccdnum=str(ccd), fk="") observation.rawname = os.path.splitext(os.path.basename(image_uri))[0]+str(ccd).zfill(2) observation.header = mopheader return observation
def from_source_reference(expnum, ccd, X, Y): """ Given the location of a source in the image, create a source reading. """ image_uri = storage.dbimages_uri(expnum=expnum, ccd=None, version='p', ext='.fits', subdir=None) logger.debug('Trying to access {}'.format(image_uri)) if not storage.exists(image_uri, force=False): logger.warning('Image not in dbimages? Trying subdir.') image_uri = storage.dbimages_uri(expnum=expnum, ccd=ccd, version='p') if not storage.exists(image_uri, force=False): logger.warning("Image doesn't exist in ccd subdir. %s" % image_uri) return None if X == -9999 or Y == -9999 : logger.warning("Skipping {} as x/y not resolved.".format(image_uri)) return None mopheader_uri = storage.dbimages_uri(expnum=expnum, ccd=ccd, version='p', ext='.mopheader') if not storage.exists(mopheader_uri, force=False): # ELEVATE! we need to know to go off and reprocess/include this image. logger.critical('Image exists but processing incomplete. Mopheader missing. {}'.format(image_uri)) return None mopheader = storage.get_mopheader(expnum, ccd) # Build astrom.Observation observation = Observation(expnum=str(expnum), ftype='p', ccdnum=str(ccd), fk="") observation.rawname = os.path.splitext(os.path.basename(image_uri))[0]+str(ccd).zfill(2) observation.header = mopheader return observation
def from_source_reference(expnum, ccd, x, y): """ Given the location of a source in the image, create a source reading. """ image_uri = storage.dbimages_uri(expnum=expnum, ccd=None, version='p', ext='.fits', subdir=None) logger.debug('Trying to access {}'.format(image_uri)) if not storage.exists(image_uri, force=False): logger.warning('Image not in dbimages? Trying subdir.') image_uri = storage.dbimages_uri(expnum=expnum, ccd=ccd, version='p') if not storage.exists(image_uri, force=False): logger.warning("Image doesn't exist in ccd subdir. %s" % image_uri) return None if x == -9999 or y == -9999: logger.warning( "Skipping {} as x/y not resolved.".format(image_uri)) return None mopheader_uri = storage.dbimages_uri(expnum=expnum, ccd=ccd, version='p', ext='.mopheader') if not storage.exists(mopheader_uri, force=False): # ELEVATE! we need to know to go off and reprocess/include this image. logger.critical( 'Image exists but processing incomplete. Mopheader missing. {}' .format(image_uri)) return None # Build astrom.Observation observation = Observation(expnum=str(expnum), ftype='p', ccdnum=str(ccd), fk="") observation.rawname = os.path.splitext( os.path.basename(image_uri))[0] + str(ccd).zfill(2) return observation
def main(expnum, ccd): header = storage.get_astheader(expnum, ccd) datasec = storage.datasec_to_list(header.get('DATASEC', '[80:2080,30,4160]')) try: fwhm = "{:5.2f}".format(storage.get_fwhm(expnum, ccd)) except: fwhm = 'unknown' for keyword in del_keyword_list: try: del(header[keyword]) except: pass header['FWHM'] = (fwhm, 'FWHM in pixels') header['EXTNAME'] = 'header' primary_hdu = fits.PrimaryHDU(header=header) hdu_list = fits.HDUList([primary_hdu, ]) for ext in ['jmp', 'matt']: extension = 'obj.'+ext name = "{}p{:02d}.{}".format(expnum, ccd, extension) try: os.unlink(name) os.unlink(name+".fits") except: pass logging.info("Retrieving {}".format(name)) obj_file = mop_file.Parser(expnum, ccd, extension) obj_file.parse() t = numpy.all([datasec[0] < obj_file.data['X'], obj_file.data['X'] < datasec[1], datasec[2] < obj_file.data['Y'], obj_file.data['Y'] < datasec[3]], axis=0) logging.info("Source remaining after datasec cut: {} of {}".format(len(obj_file.data[t]['X']), len(t))) table_hdu = fits.table_to_hdu(obj_file.data[t]) table_hdu.header['CATALOG'] = name table_hdu.header['EXTNAME'] = ext hdu_list.append(table_hdu) del table_hdu del obj_file os.unlink(name) name = "{}p{:02d}.{}".format(expnum, ccd, 'obj.fits') if os.access(name, os.F_OK): os.unlink(name) hdu_list.writeto(name) uri = storage.dbimages_uri(expnum, ccd, 'p', ext=".obj.fits") logging.info(name+" -> "+uri) count = 0 while True: print("Copy attempt {}".format(count)) try: storage.copy(name, uri) os.unlink(name) break except Exception as ex: if count > 10: raise ex count += 1
def compute_trans(expnums, ccd, version, prefix=None, default="WCS"): """ Pull the astrometric header for each image, compute an x/y transform and compare to trans.jmp this one overides trans.jmp if they are very different. @param expnums: @param ccd: @param version: @param prefix: @return: None """ wcs_dict = {} for expnum in expnums: try: # TODO This assumes that the image is already N/E flipped. # If compute_trans is called after the image is retrieved from archive then we get the disk version. filename = storage.get_image(expnum, ccd, version, prefix=prefix) this_wcs = wcs.WCS(fits.open(filename)[0].header) except Exception as err: logging.warning("WCS Trans compute failed. {}".format(str(err))) return wcs_dict[expnum] = this_wcs x0 = wcs_dict[expnums[0]].header['NAXIS1'] / 2.0 y0 = wcs_dict[expnums[0]].header['NAXIS2'] / 2.0 (ra0, dec0) = wcs_dict[expnums[0]].xy2sky(x0, y0) result = "" for expnum in expnums: filename = storage.get_file(expnum, ccd, version, ext='.trans.jmp', prefix=prefix) jmp_trans = file(filename, 'r').readline().split() (x, y) = wcs_dict[expnum].sky2xy(ra0, dec0) x1 = float( jmp_trans[0]) + float(jmp_trans[1]) * x + float(jmp_trans[2]) * y y1 = float( jmp_trans[3]) + float(jmp_trans[4]) * x + float(jmp_trans[5]) * y dr = math.sqrt((x1 - x0)**2 + (y1 - y0)**2) if dr > 0.5: result += "WARNING: WCS-JMP transforms mis-matched {} reverting to using {}.\n".format( expnum, default) if default == "WCS": uri = storage.dbimages_uri(expnum, ccd, version, ext='.trans.jmp', prefix=prefix) filename = os.path.basename(uri) trans = file(filename, 'w') trans.write("{:5.2f} 1. 0. {:5.2f} 0. 1.\n".format( x0 - x, y0 - y)) trans.close() else: result += "WCS-JMP transforms match {}\n".format(expnum) return result
def mkpsf(expnum, ccd, version, dry_run=False, prefix=""): """Run the OSSOS jmpmakepsf script. """ ## confirm destination directory exists. destdir = os.path.dirname( storage.dbimages_uri(expnum, ccd, prefix=prefix, version=version, ext='fits')) if not dry_run: storage.mkdir(destdir) ## get image from the vospace storage area filename = storage.get_image(expnum, ccd, version=version, prefix=prefix) logging.info("Running mkpsf on %s %d" % (expnum, ccd)) ## launch the makepsf script logging.info(util.exec_prog(['jmpmakepsf.csh', './', filename, 'no'])) if dry_run: return ## place the results into VOSpace basename = os.path.splitext(filename)[0] for ext in ('mopheader', 'psf.fits', 'zeropoint.used', 'apcor', 'fwhm', 'phot'): dest = storage.dbimages_uri(expnum, ccd, prefix=prefix, version=version, ext=ext) source = basename + "." + str(ext) storage.copy(source, dest) return
def compute_trans(expnums, ccd, version, prefix=None, default="WCS"): """ Pull the astrometric header for each image, compute an x/y transform and compare to trans.jmp this one overides trans.jmp if they are very different. @param expnums: @param ccd: @param version: @param prefix: @return: None """ wcs_dict = {} for expnum in expnums: try: # TODO This assumes that the image is already N/E flipped. # If compute_trans is called after the image is retrieved from archive then we get the disk version. filename = storage.get_image(expnum, ccd, version, prefix=prefix) this_wcs = wcs.WCS(fits.open(filename)[0].header) except Exception as err: logging.warning("WCS Trans compute failed. {}".format(str(err))) return wcs_dict[expnum] = this_wcs x0 = wcs_dict[expnums[0]].header['NAXIS1'] / 2.0 y0 = wcs_dict[expnums[0]].header['NAXIS2'] / 2.0 (ra0, dec0) = wcs_dict[expnums[0]].xy2sky(x0, y0) result = "" for expnum in expnums: filename = storage.get_file(expnum, ccd, version, ext='.trans.jmp', prefix=prefix) jmp_trans = file(filename, 'r').readline().split() (x, y) = wcs_dict[expnum].sky2xy(ra0, dec0) x1 = float(jmp_trans[0]) + float(jmp_trans[1]) * x + float(jmp_trans[2]) * y y1 = float(jmp_trans[3]) + float(jmp_trans[4]) * x + float(jmp_trans[5]) * y dr = math.sqrt((x1 - x0) ** 2 + (y1 - y0) ** 2) if dr > 0.5: result += "WARNING: WCS-JMP transforms mis-matched {} reverting to using {}.\n".format(expnum, default) if default == "WCS": uri = storage.dbimages_uri(expnum, ccd, version, ext='.trans.jmp', prefix=prefix) filename = os.path.basename(uri) trans = file(filename, 'w') trans.write("{:5.2f} 1. 0. {:5.2f} 0. 1.\n".format(x0 - x, y0 - y)) trans.close() else: result += "WCS-JMP transforms match {}\n".format(expnum) return result
def main(commands=['update_header', 'mkpsf', 'step1', 'step2', 'step3', 'combine'], ccds=range(0,36), launch=False, triplets_dir=TRIPLETS_NODE, delete=False): cmd_order={'LEAD': ['update_header', 'mkpsf', 'step1', 'step2', 'step3', 'combine'], 'OTHER': ['update_header', 'mkpsf', 'step1' ] } existance = {'mkpsf': ['psf.fits', ], 'step1': ['obj.jmp', 'obj.matt'], 'step2': ['unid.jmp', 'unid.matt'], 'step3': ['cands.jmp', 'cands.matt']} for line in get_triplets(triplets_dir): for command in commands: exp_type = 'LEAD' for expnum in line[0:3]: tags = storage.get_tags(expnum) if command not in cmd_order[exp_type]: continue for ccd in ccds: tag = storage.tag_uri(storage.get_process_tag(command, ccd)) print line[3], command, expnum, ccd, tags.get(tag,None) if tags.get(tag,None) != storage.SUCCESS: if launch: submit(command, expnum, ccd) if delete: success=True for ext in existance[command]: uri = storage.dbimages_uri(expnum, ccd, version='p', ext=ext) if not storage.exists(uri): success=False print expnum, ccd, success #for next_command in cmd_order[exp_type][commands.index(command)+1:]: # storage.set_status(expnum, ccd, next_command, command+" FAILED") if success: storage.set_status(expnum, ccd, command, version='p', status='success') exp_type='OTHER'
def step2(expnums, ccd, version, prefix=None): '''run the actual step2 on the given exp/ccd combo''' jmp_args = ['step2jmp'] matt_args = ['step2matt_jmp'] idx = 0 for expnum in expnums: jmp_args.append( storage.get_image(expnum, ccd=ccd, version=version, ext='obj.jmp', prefix=prefix )[0:-8] ) idx += 1 matt_args.append('-f%d' % ( idx)) matt_args.append( storage.get_image(expnum, ccd=ccd, version=version, ext='obj.matt', prefix=prefix )[0:-9] ) util.exec_prog(jmp_args) util.exec_prog(matt_args) for expnum in expnums: for ext in ['unid.jmp', 'unid.matt', 'trans.jmp']: uri = storage.dbimages_uri(expnum,ccd=ccd,version=version,ext=ext, prefix=prefix) filename = os.path.basename(uri) storage.copy(filename, uri) return
def step2(expnums, ccd, version, prefix=None): '''run the actual step2 on the given exp/ccd combo''' jmp_args = ['step2jmp'] matt_args = ['step2matt_jmp'] idx = 0 for expnum in expnums: jmp_args.append( storage.get_image(expnum, ccd=ccd, version=version, ext='obj.jmp', prefix=prefix)[0:-8]) idx += 1 matt_args.append('-f%d' % (idx)) matt_args.append( storage.get_image(expnum, ccd=ccd, version=version, ext='obj.matt', prefix=prefix)[0:-9]) util.exec_prog(jmp_args) util.exec_prog(matt_args) for expnum in expnums: for ext in ['unid.jmp', 'unid.matt', 'trans.jmp']: uri = storage.dbimages_uri(expnum, ccd=ccd, version=version, ext=ext, prefix=prefix) filename = os.path.basename(uri) storage.copy(filename, uri) return
def get_image_uri(self): return storage.dbimages_uri(self.expnum, ccd=self.ccdnum, version=self.ftype, prefix=self.fk, ext='.fits')
def run(expnum, ccd, version='p', prefix='', dry_run=False, force=False): message = 'success' if storage.get_status(task, prefix, expnum, version=version, ccd=ccd) and not force: logging.info("{} completed successfully for {} {} {} {}".format(task, prefix, expnum, version, ccd)) return with storage.LoggingManager(task=task, prefix=prefix, expnum=expnum, ccd=ccd, version=version, dry_run=dry_run): try: if not storage.get_status(dependency, prefix, expnum, "p", ccd=ccd): raise IOError("{} not yet run for {}".format(dependency, expnum)) header = storage.get_astheader(expnum, ccd) datasec = storage.datasec_to_list(header.get('DATASEC', '[80:2080,30,4160]')) try: fwhm = "{:5.2f}".format(storage.get_fwhm(expnum, ccd)) except: fwhm = 'unknown' for keyword in del_keyword_list: try: del(header[keyword]) except: pass header['FWHM'] = (fwhm, 'FWHM in pixels') header['EXTNAME'] = 'header' primary_hdu = fits.PrimaryHDU(header=header) hdu_list = fits.HDUList([primary_hdu, ]) for ext in ['jmp', 'matt']: extension = 'obj.'+ext name = "{}p{:02d}.{}".format(expnum, ccd, extension) try: os.unlink(name) os.unlink(name+".fits") except: pass logging.info("Retrieving {}".format(name)) obj_file = mop_file.Parser(expnum, ccd, extension) obj_file.parse() t = numpy.all([datasec[0] < obj_file.data['X'], obj_file.data['X'] < datasec[1], datasec[2] < obj_file.data['Y'], obj_file.data['Y'] < datasec[3]], axis=0) logging.info("Source remaining after datasec cut: {} of {}".format(len(obj_file.data[t]['X']), len(t))) table_hdu = fits.table_to_hdu(obj_file.data[t]) table_hdu.header['CATALOG'] = name table_hdu.header['EXTNAME'] = ext hdu_list.append(table_hdu) del table_hdu del obj_file os.unlink(name) name = "{}p{:02d}.{}".format(expnum, ccd, 'obj.fits') if os.access(name, os.F_OK): os.unlink(name) hdu_list.writeto(name) uri = storage.dbimages_uri(expnum, ccd, 'p', ext=".obj.fits") logging.info(name+" -> "+uri) count = 0 with open(name): while True: count += 1 logging.info("Copy attempt {}".format(count)) try: storage.copy(name, uri) os.unlink(name) break except Exception as ex: if count > 10: raise ex logging.info(message) except Exception as e: message = str(e) logging.error(message) if not dry_run: storage.set_status(task, prefix, expnum, version=version, ccd=ccd, status=message)
def run(expnums, ccd, version, prefix=None, dry_run=False, default="WCS", force=False): """run the actual step2 on the given exp/ccd combo""" jmp_trans = ['step2ajmp'] jmp_args = ['step2bjmp'] matt_args = ['step2matt_jmp'] if storage.get_status(task, prefix, expnums[0], version, ccd) and not force: logging.info("{} completed successfully for {}{}{}{:02d}".format( task, prefix, expnums[0], version, ccd)) return with storage.LoggingManager(task, prefix, expnums[0], ccd, version, dry_run): try: for expnum in expnums: if not storage.get_status( dependency, prefix, expnum, version=version, ccd=ccd): raise IOError( 35, "Cannot start {} as {} not yet completed for {}{}{}{:02d}" .format(task, dependency, prefix, expnum, version, ccd)) message = storage.SUCCESS idx = 0 logging.info("Retrieving catalog files to do matching.") for expnum in expnums: jmp_args.append( storage.get_file(expnum, ccd=ccd, version=version, ext='obj.jmp', prefix=prefix)[0:-8]) jmp_trans.append( storage.get_file(expnum, ccd=ccd, version=version, ext='obj.jmp', prefix=prefix)[0:-8]) idx += 1 matt_args.append('-f%d' % idx) matt_args.append( storage.get_file(expnum, ccd=ccd, version=version, ext='obj.matt', prefix=prefix)[0:-9]) logging.info( "Computing the catalog alignment using sources in catalogs.") try: logging.info(util.exec_prog(jmp_trans)) if default == "WCS": logging.info("Comparing computed transform to WCS values") logging.info( compute_trans(expnums, ccd, version, prefix, default=default)) except Exception as ex: logging.info("JMP Trans failed: {}".format(ex)) logging.info( compute_trans(expnums, ccd, version, prefix, default="WCS")) logging.info("Using transform to match catalogs for three images.") logging.info(util.exec_prog(jmp_args)) logging.info(util.exec_prog(matt_args)) # check that the shifts from step2 are rational by matching the bright star lists. logging.info( "Uisng checktrans to ensure that transforms were reasonable.") check_args = ['checktrans'] if os.access('proc-these-files', os.R_OK): os.unlink('proc-these-files') ptf = open('proc-these-files', 'w') ptf.write( "# A dummy file that is created so checktrans could run.\n") ptf.write("# Frame FWHM PSF?\n") for expnum in expnums: filename = os.path.splitext( storage.get_image(expnum, ccd, version=version, prefix=prefix))[0] if not os.access(filename + ".bright.psf", os.R_OK): os.link(filename + ".bright.jmp", filename + ".bright.psf") if not os.access(filename + ".obj.psf", os.R_OK): os.link(filename + ".obj.jmp", filename + ".obj.psf") ptf.write("{:>19s}{:>10.1f}{:>5s}\n".format( filename, _FWHM, "NO")) ptf.close() if os.access('BAD_TRANS', os.F_OK): os.unlink('BAD_TRANS') logging.info(util.exec_prog(check_args)) if os.access('BAD_TRANS', os.F_OK): raise OSError(errno.EBADMSG, 'BAD_TRANS') if os.access('proc-these-files', os.F_OK): os.unlink('proc-these-files') if dry_run: return for expnum in expnums: for ext in ['unid.jmp', 'unid.matt', 'trans.jmp']: uri = storage.dbimages_uri(expnum, ccd=ccd, version=version, ext=ext, prefix=prefix) filename = os.path.basename(uri) storage.copy(filename, uri) except Exception as ex: message = str(ex) logging.error(message) storage.set_status(task, prefix, expnums[0], version, ccd, status=message) return
t = numpy.all([datasec[0] < obj_file.data['X'], obj_file.data['X'] < datasec[1], datasec[2] < obj_file.data['Y'], obj_file.data['Y'] < datasec[3]], axis=0) print("Source remaining after datasec cut: {} of {}".format(len(obj_file.data[t]['X']), len(t))) table_hdu = fits.table_to_hdu(obj_file.data[t]) table_hdu.header['CATALOG'] = name table_hdu.header['EXTNAME'] = ext hdu_list.append(table_hdu) del table_hdu del obj_file os.unlink(name) name = "{}p{:02d}.{}".format(expnum, ccd, 'obj.fits') if os.access(name, os.F_OK): os.unlink(name) hdu_list.writeto(name) uri = storage.dbimages_uri(expnum, ccd, 'p', ext=".obj.fits") print(name+" -> "+uri) count = 0 while count < 10: print("Copy attempt {}".format(count)) try: storage.copy(name, uri) os.unlink(name) break except Exception as ex: print(ex) count += 1 continue except Exception as ex: print(ex)
def run(expnum, ccd, version='p', prefix='', dry_run=False, force=False): message = 'success' if storage.get_status(task, prefix, expnum, version=version, ccd=ccd) and not force: logging.info("{} completed successfully for {} {} {} {}".format( task, prefix, expnum, version, ccd)) return with storage.LoggingManager(task=task, prefix=prefix, expnum=expnum, ccd=ccd, version=version, dry_run=dry_run): try: if not storage.get_status(dependency, prefix, expnum, "p", ccd=ccd): raise IOError("{} not yet run for {}".format( dependency, expnum)) header = storage.get_astheader(expnum, ccd) datasec = storage.datasec_to_list( header.get('DATASEC', '[80:2080,30,4160]')) try: fwhm = "{:5.2f}".format(storage.get_fwhm(expnum, ccd)) except: fwhm = 'unknown' for keyword in del_keyword_list: try: del (header[keyword]) except: pass header['FWHM'] = (fwhm, 'FWHM in pixels') header['EXTNAME'] = 'header' primary_hdu = fits.PrimaryHDU(header=header) hdu_list = fits.HDUList([ primary_hdu, ]) for ext in ['jmp', 'matt']: extension = 'obj.' + ext name = "{}p{:02d}.{}".format(expnum, ccd, extension) try: os.unlink(name) os.unlink(name + ".fits") except: pass logging.info("Retrieving {}".format(name)) obj_file = mop_file.Parser(expnum, ccd, extension) obj_file.parse() t = numpy.all([ datasec[0] < obj_file.data['X'], obj_file.data['X'] < datasec[1], datasec[2] < obj_file.data['Y'], obj_file.data['Y'] < datasec[3] ], axis=0) logging.info( "Source remaining after datasec cut: {} of {}".format( len(obj_file.data[t]['X']), len(t))) table_hdu = fits.table_to_hdu(obj_file.data[t]) table_hdu.header['CATALOG'] = name table_hdu.header['EXTNAME'] = ext hdu_list.append(table_hdu) del table_hdu del obj_file os.unlink(name) name = "{}p{:02d}.{}".format(expnum, ccd, 'obj.fits') if os.access(name, os.F_OK): os.unlink(name) hdu_list.writeto(name) uri = storage.dbimages_uri(expnum, ccd, 'p', ext=".obj.fits") logging.info(name + " -> " + uri) count = 0 with open(name): while True: count += 1 logging.info("Copy attempt {}".format(count)) try: storage.copy(name, uri) os.unlink(name) break except Exception as ex: if count > 10: raise ex logging.info(message) except Exception as e: message = str(e) logging.error(message) if not dry_run: storage.set_status(task, prefix, expnum, version=version, ccd=ccd, status=message)
def get_zmag_uri(self): return storage.dbimages_uri(self.expnum, ccd=self.ccdnum, version=self.ftype, prefix=self.fk, ext=storage.ZEROPOINT_USED_EXT)
def get_apcor_uri(self): return storage.dbimages_uri(self.expnum, ccd=self.ccdnum, version=self.ftype, prefix=self.fk, ext=storage.APCOR_EXT)
def run(expnum, ccd, version, dry_run=False, prefix="", force=False): """Run the OSSOS jmpmakepsf script. """ message = storage.SUCCESS if storage.get_status(task, prefix, expnum, version=version, ccd=ccd) and not force: logging.info("{} completed successfully for {} {} {} {}".format(task, prefix, expnum, version, ccd)) return with storage.LoggingManager(task, prefix, expnum, ccd, version, dry_run): try: if not storage.get_status(dependency, prefix, expnum, "p", ccd=ccd): raise IOError("{} not yet run for {}".format(dependency, expnum)) # confirm destination directory exists. destdir = os.path.dirname( storage.dbimages_uri(expnum, ccd, prefix=prefix, version=version, ext='fits')) if not dry_run: storage.mkdir(destdir) # get image from the vospace storage area logging.info("Getting fits image from VOSpace") filename = storage.get_image(expnum, ccd, version=version, prefix=prefix) # get mopheader from the vospace storage area logging.info("Getting mopheader from VOSpace") mopheader_filename = storage.get_file(expnum, ccd, version=version, prefix=prefix, ext='mopheader') # run mkpsf process logging.info("Running mkpsf on %s %d" % (expnum, ccd)) logging.info(util.exec_prog(['jmpmakepsf.csh', './', filename, 'yes', 'yes'])) if dry_run: return # place the results into VOSpace basename = os.path.splitext(filename)[0] for ext in ('mopheader', 'psf.fits', 'zeropoint.used', 'apcor', 'fwhm', 'phot'): dest = storage.dbimages_uri(expnum, ccd, prefix=prefix, version=version, ext=ext) source = basename + "." + str(ext) count = 0 with open(source, 'r'): while True: count += 1 try: logging.info("Attempt {} to copy {} -> {}".format(count, source, dest)) storage.copy(source, dest) break except Exception as ex: if count > 10: raise ex # set some data parameters associated with the image, determined in this step. storage.set_status('fwhm', prefix, expnum, version=version, ccd=ccd, status=str(storage.get_fwhm( expnum, ccd=ccd, prefix=prefix, version=version))) storage.set_status('zeropoint', prefix, expnum, version=version, ccd=ccd, status=str(storage.get_zeropoint( expnum, ccd=ccd, prefix=prefix, version=version))) logging.info(message) except Exception as e: message = str(e) logging.error(message) storage.set_status(task, prefix, expnum, version, ccd=ccd, status=message) return
def main(expnum, ccd): header = storage.get_astheader(expnum, ccd) datasec = storage.datasec_to_list( header.get('DATASEC', '[80:2080,30,4160]')) try: fwhm = "{:5.2f}".format(storage.get_fwhm(expnum, ccd)) except: fwhm = 'unknown' for keyword in del_keyword_list: try: del (header[keyword]) except: pass header['FWHM'] = (fwhm, 'FWHM in pixels') header['EXTNAME'] = 'header' primary_hdu = fits.PrimaryHDU(header=header) hdu_list = fits.HDUList([ primary_hdu, ]) for ext in ['jmp', 'matt']: extension = 'obj.' + ext name = "{}p{:02d}.{}".format(expnum, ccd, extension) try: os.unlink(name) os.unlink(name + ".fits") except: pass logging.info("Retrieving {}".format(name)) obj_file = mop_file.Parser(expnum, ccd, extension) obj_file.parse() t = numpy.all([ datasec[0] < obj_file.data['X'], obj_file.data['X'] < datasec[1], datasec[2] < obj_file.data['Y'], obj_file.data['Y'] < datasec[3] ], axis=0) logging.info("Source remaining after datasec cut: {} of {}".format( len(obj_file.data[t]['X']), len(t))) table_hdu = fits.table_to_hdu(obj_file.data[t]) table_hdu.header['CATALOG'] = name table_hdu.header['EXTNAME'] = ext hdu_list.append(table_hdu) del table_hdu del obj_file os.unlink(name) name = "{}p{:02d}.{}".format(expnum, ccd, 'obj.fits') if os.access(name, os.F_OK): os.unlink(name) hdu_list.writeto(name) uri = storage.dbimages_uri(expnum, ccd, 'p', ext=".obj.fits") logging.info(name + " -> " + uri) count = 0 while True: print(("Copy attempt {}".format(count))) try: storage.copy(name, uri) os.unlink(name) break except Exception as ex: if count > 10: raise ex count += 1
logging.basicConfig(level=logging.INFO, format="%(message)s") message = storage.SUCCESS try: image = (os.access(args.expnum, os.W_OK) and args.expnum) or (storage.get_image(args.expnum)) header = (args.header is not None and ((os.access(args.header, os.W_OK) and args.header) or (storage.get_image(args.header, ext='head')))) or ( storage.get_image(args.expnum, ext='head')) logging.info("Swapping header for %s for contents in %s \n" % (image, header)) run_update_header(image, header) if args.replace: expnum = args.expnum or fits.open(image)[0].header['EXPNUM'] dest = storage.dbimages_uri(expnum) storage.copy(image, dest) storage.set_status(args.expnum, 36, 'update_header', message) sys.exit(0) except Exception as e: logging.error("Error replacing header for %s" % (args.expnum)) logging.error(str(e)) message = str(e) storage.set_status(args.expnum, 36, 'update_header', message) sys.exit(2)
def step2(expnums, ccd, version, prefix=None, dry_run=False, default="WCS"): """run the actual step2 on the given exp/ccd combo""" jmp_trans = ['step2ajmp'] jmp_args = ['step2bjmp'] matt_args = ['step2matt_jmp'] idx = 0 for expnum in expnums: jmp_args.append( storage.get_file(expnum, ccd=ccd, version=version, ext='obj.jmp', prefix=prefix)[0:-8]) jmp_trans.append( storage.get_file(expnum, ccd=ccd, version=version, ext='obj.jmp', prefix=prefix)[0:-8]) idx += 1 matt_args.append('-f%d' % idx) matt_args.append( storage.get_file(expnum, ccd=ccd, version=version, ext='obj.matt', prefix=prefix)[0:-9]) logging.info(util.exec_prog(jmp_trans)) if default == "WCS": logging.info( compute_trans(expnums, ccd, version, prefix, default=default)) logging.info(util.exec_prog(jmp_args)) logging.info(util.exec_prog(matt_args)) ## check that the shifts from step2 are rational check_args = ['checktrans'] if os.access('proc-these-files', os.R_OK): os.unlink('proc-these-files') ptf = open('proc-these-files', 'w') ptf.write("# A dummy file that is created so checktrans could run.\n") ptf.write("# Frame FWHM PSF?\n") for expnum in expnums: filename = os.path.splitext( storage.get_image(expnum, ccd, version=version, prefix=prefix))[0] if not os.access(filename + ".bright.psf", os.R_OK): os.link(filename + ".bright.jmp", filename + ".bright.psf") if not os.access(filename + ".obj.psf", os.R_OK): os.link(filename + ".obj.jmp", filename + ".obj.psf") ptf.write("{:>19s}{:>10.1f}{:>5s}\n".format(filename, _FWHM, "NO")) ptf.close() if os.access('BAD_TRANS', os.F_OK): os.unlink('BAD_TRANS') logging.info(util.exec_prog(check_args)) if os.access('BAD_TRANS', os.F_OK): raise ValueError(errno.EBADEXEC, 'BAD_TRANS') if os.access('proc-these-files', os.F_OK): os.unlink('proc-these-files') if dry_run: return for expnum in expnums: for ext in ['unid.jmp', 'unid.matt', 'trans.jmp']: uri = storage.dbimages_uri(expnum, ccd=ccd, version=version, ext=ext, prefix=prefix) filename = os.path.basename(uri) storage.copy(filename, uri) return
def run(expnum, ccd, version, dry_run=False, prefix="", force=False): """Run the OSSOS jmpmakepsf script. """ message = storage.SUCCESS if storage.get_status(task, prefix, expnum, version=version, ccd=ccd) and not force: logging.info("{} completed successfully for {} {} {} {}".format( task, prefix, expnum, version, ccd)) return with storage.LoggingManager(task, prefix, expnum, ccd, version, dry_run): try: if not storage.get_status( dependency, prefix, expnum, version, ccd=ccd): raise IOError("{} not yet run for {}".format( dependency, expnum)) # confirm destination directory exists. destdir = os.path.dirname( storage.dbimages_uri(expnum, ccd, prefix=prefix, version=version, ext='fits')) if not dry_run: storage.mkdir(destdir) # get image from the vospace storage area logging.info("Getting fits image from VOSpace") filename = storage.get_image(expnum, ccd, version=version, prefix=prefix) # get mopheader from the vospace storage area logging.info("Getting mopheader from VOSpace") mopheader_filename = storage.get_file(expnum, ccd, version=version, prefix=prefix, ext='mopheader') # run mkpsf process logging.info("Running mkpsf on %s %d" % (expnum, ccd)) logging.info( util.exec_prog( ['jmpmakepsf.csh', './', filename, 'yes', 'yes'])) if dry_run: return # place the results into VOSpace basename = os.path.splitext(filename)[0] for ext in ('mopheader', 'psf.fits', 'zeropoint.used', 'apcor', 'fwhm', 'phot'): dest = storage.dbimages_uri(expnum, ccd, prefix=prefix, version=version, ext=ext) source = basename + "." + str(ext) count = 0 with open(source, 'r'): while True: count += 1 try: logging.info("Attempt {} to copy {} -> {}".format( count, source, dest)) storage.copy(source, dest) break except Exception as ex: if count > 10: raise ex # set some data parameters associated with the image, determined in this step. storage.set_status('fwhm', prefix, expnum, version=version, ccd=ccd, status=str( storage.get_fwhm(expnum, ccd=ccd, prefix=prefix, version=version))) storage.set_status('zeropoint', prefix, expnum, version=version, ccd=ccd, status=str( storage.get_zeropoint(expnum, ccd=ccd, prefix=prefix, version=version))) logging.info(message) except Exception as e: message = str(e) logging.error(message) storage.set_status(task, prefix, expnum, version, ccd=ccd, status=message) return
axis=0) print(("Source remaining after datasec cut: {} of {}".format( len(obj_file.data[t]['X']), len(t)))) table_hdu = fits.table_to_hdu(obj_file.data[t]) table_hdu.header['CATALOG'] = name table_hdu.header['EXTNAME'] = ext hdu_list.append(table_hdu) del table_hdu del obj_file os.unlink(name) name = "{}p{:02d}.{}".format(expnum, ccd, 'obj.fits') if os.access(name, os.F_OK): os.unlink(name) hdu_list.writeto(name) uri = storage.dbimages_uri(expnum, ccd, 'p', ext=".obj.fits") print((name + " -> " + uri)) count = 0 while count < 10: print(("Copy attempt {}".format(count))) try: storage.copy(name, uri) os.unlink(name) break except Exception as ex: print(ex) count += 1 continue except Exception as ex: print(ex)
def main(): """Do the script.""" parser = argparse.ArgumentParser( description='replace image header') parser.add_argument('--extname', help='name of extension to in header') parser.add_argument('expnum', type=str, help='exposure to update') parser.add_argument('-r', '--replace', action='store_true', help='store modified image back to VOSpace?') parser.add_argument('-v', '--verbose', action='store_true') parser.add_argument('--debug', action='store_true') parser.add_argument('--force', action='store_true', help="Re-run even if previous success recorded") parser.add_argument('--dbimages', help="VOSpace DATA storage area.", default="vos:OSSOS/dbimages") args = parser.parse_args() task = util.task() dependency = 'preproc' prefix = "" storage.DBIMAGES = args.dbimages level = logging.CRITICAL message_format = "%(message)s" if args.verbose: level = logging.INFO if args.debug: level = logging.DEBUG message_format = "%(module)s %(funcName)s %(lineno)s %(message)s" logging.basicConfig(level=level, format=message_format) storage.set_logger(task, prefix, args.expnum, None, None, False) message = storage.SUCCESS expnum = args.expnum exit_status = 0 try: # skip if already succeeded and not in force mode if storage.get_status(task, prefix, expnum, "p", 36) and not args.force: logging.info("Already updated, skipping") sys.exit(0) image_hdulist = storage.get_image(args.expnum, return_file=False) ast_hdulist = storage.get_astheader(expnum, ccd=None) run_update_header(image_hdulist, ast_hdulist) image_filename = os.path.basename(storage.get_uri(expnum)) image_hdulist.writeto(image_filename) if args.replace: dest = storage.dbimages_uri(expnum) storage.copy(image_filename, dest) storage.set_status('update_header', "", expnum, 'p', 36, message) except Exception as e: message = str(e) if args.replace: storage.set_status(task, prefix, expnum, 'p', 36, message) exit_status = message logging.error(message) return exit_status
def parse(self, ssos_result_filename_or_lines): """ given the result table create 'source' objects. :type ssos_result_table: Table :param ssos_result_table: """ table_reader = ascii.get_reader(Reader=ascii.Basic) table_reader.inconsistent_handler = self._skip_missing_data table_reader.header.splitter.delimiter = '\t' table_reader.data.splitter.delimiter = '\t' table = table_reader.read(ssos_result_filename_or_lines) sources = [] observations = [] source_readings = [] ref_pvwcs = None downloader = Downloader() warnings.filterwarnings('ignore') for row in table: # check if a dbimages object exists ccd = int(row['Ext']) - 1 expnum = row['Image'].rstrip('p') # ADDING THIS TEMPORARILY TO GET THE NON-OSSOS DATA OUT OF THE WAY WHILE DEBUGGING if (row['Telescope_Insturment'] != 'CFHT/MegaCam') or (row['Filter'] != 'r.MP9601'): continue # it's fine for OSSOS, go get the image image_uri = storage.dbimages_uri(expnum=expnum, ccd=None, version='p', ext='.fits', subdir=None) logger.info('Trying to access %s\n%s' % (row.data, image_uri)) if not storage.exists(image_uri, force=False): logger.warning('Image not in dbimages? Trying subdir.') image_uri = storage.dbimages_uri(expnum=expnum, ccd=ccd, version='p') if not storage.exists(image_uri, force=False): logger.warning("Image doesn't exist in ccd subdir. %s" % image_uri) continue if row['X'] == -9999 or row['Y'] == -9999 : logger.warning("Skipping %s as x/y not resolved." % ( row['Image'])) continue mopheader_uri = storage.dbimages_uri(expnum=expnum, ccd=ccd, version='p', ext='.mopheader') if not mopheader_uri in mopheaders: if not storage.exists(mopheader_uri, force=False): logger.warning('mopheader missing, but images exists') continue # raise flag if no MOPHEADER mopheader_fpt = cStringIO.StringIO(storage.open_vos_or_local(mopheader_uri).read()) mopheader = fits.open(mopheader_fpt) mopheaders[mopheader_uri] = mopheader mopheader = mopheaders[mopheader_uri] # Build astrom.Observation observation = astrom.Observation(expnum=str(expnum), ftype='p', ccdnum=str(ccd), fk="") observation.rawname = os.path.splitext(os.path.basename(image_uri))[0]+str(ccd).zfill(2) observation.header = mopheader[0].header MJD_OBS_CENTER = mpc.Time(observation.header['MJD-OBSC'], format='mjd', scale='utc', precision=5 ).replicate(format='mpc') observation.header['MJD_OBS_CENTER'] = str(MJD_OBS_CENTER) observation.header['MAXCOUNT'] = MAXCOUNT observation.header['SCALE'] = observation.header['PIXSCALE'] #observation.header['CHIP'] = str(observation.header['CHIPNUM']).zfill(2) observation.header['NAX1'] = observation.header['NAXIS1'] observation.header['NAX2'] = observation.header['NAXIS2'] observation.header['MOPversion'] = observation.header['MOP_VER'] observation.header['FWHM'] = 4 # a download pixel 1,1 of this data to due offsets with. x_cen = int(min(max(1,row['X']),observation.header['NAX1'])) y_cen = int(min(max(1,row['Y']),observation.header['NAX2'])) if image_uri not in astheaders: hdulist = downloader.download_hdulist( uri=image_uri, view='cutout', cutout='[{}][{}:{},{}:{}]'.format(ccd+1, x_cen, x_cen, y_cen, y_cen)) astheaders[image_uri] = hdulist hdulist = astheaders[image_uri] pvwcs = wcs.WCS(hdulist[0].header) (ra,dec) = pvwcs.xy2sky(x_cen, y_cen) if ref_pvwcs is None: ref_pvwcs = pvwcs xref = row['X'] yref = row['Y'] (x0, y0) = ref_pvwcs.sky2xy(ra,dec) x0 += row['X'] - x_cen y0 += row['Y'] - y_cen # Build astrom.SourceReading observations.append(observation) from_input_file = observation.rawname in self.input_rawnames null_observation = observation.rawname in self.null_observations print observation.rawname, observation.header['MJD_OBS_CENTER'], null_observation, from_input_file source_reading = astrom.SourceReading(x=row['X'], y=row['Y'], xref=xref, yref=yref, x0=x0, y0=y0, ra=row['Object_RA'], dec=row['Object_Dec'], obs=observation, ssos=True, from_input_file=from_input_file, null_observation=null_observation) #if observation.rawname in self.input_rawnames: # source_readings.insert(0, source_reading) #else: source_readings.append(source_reading) # build our array of SourceReading objects sources.append(source_readings) warnings.filterwarnings('once') return SSOSData(observations, sources, self.provisional_name)
def step2(expnums, ccd, version, prefix=None, dry_run=False, default="WCS"): """run the actual step2 on the given exp/ccd combo""" jmp_trans = ['step2ajmp'] jmp_args = ['step2bjmp'] matt_args = ['step2matt_jmp'] idx = 0 for expnum in expnums: jmp_args.append( storage.get_file(expnum, ccd=ccd, version=version, ext='obj.jmp', prefix=prefix)[0:-8] ) jmp_trans.append( storage.get_file(expnum, ccd=ccd, version=version, ext='obj.jmp', prefix=prefix)[0:-8] ) idx += 1 matt_args.append('-f%d' % idx) matt_args.append( storage.get_file(expnum, ccd=ccd, version=version, ext='obj.matt', prefix=prefix)[0:-9] ) logging.info(util.exec_prog(jmp_trans)) if default == "WCS": logging.info(compute_trans(expnums, ccd, version, prefix, default=default)) logging.info(util.exec_prog(jmp_args)) logging.info(util.exec_prog(matt_args)) ## check that the shifts from step2 are rational check_args = ['checktrans'] if os.access('proc-these-files', os.R_OK): os.unlink('proc-these-files') ptf = open('proc-these-files', 'w') ptf.write("# A dummy file that is created so checktrans could run.\n") ptf.write("# Frame FWHM PSF?\n") for expnum in expnums: filename = os.path.splitext(storage.get_image(expnum, ccd, version=version, prefix=prefix))[0] if not os.access(filename + ".bright.psf", os.R_OK): os.link(filename + ".bright.jmp", filename + ".bright.psf") if not os.access(filename + ".obj.psf", os.R_OK): os.link(filename + ".obj.jmp", filename + ".obj.psf") ptf.write("{:>19s}{:>10.1f}{:>5s}\n".format(filename, _FWHM, "NO")) ptf.close() if os.access('BAD_TRANS', os.F_OK): os.unlink('BAD_TRANS') logging.info(util.exec_prog(check_args)) if os.access('BAD_TRANS', os.F_OK): raise OSError(errno.EBADMSG, 'BAD_TRANS') if os.access('proc-these-files', os.F_OK): os.unlink('proc-these-files') if dry_run: return for expnum in expnums: for ext in ['unid.jmp', 'unid.matt', 'trans.jmp']: uri = storage.dbimages_uri(expnum, ccd=ccd, version=version, ext=ext, prefix=prefix) filename = os.path.basename(uri) storage.copy(filename, uri) return
message = storage.SUCCESS try: image = (os.access(args.expnum,os.W_OK) and args.expnum ) or ( storage.get_image(args.expnum) ) header = (args.header is not None and (( os.access(args.header, os.W_OK) and args.header ) or ( storage.get_image(args.header, ext='head')))) or ( storage.get_image(args.expnum, ext='head')) logging.info( "Swapping header for %s for contents in %s \n" % ( image, header) ) run_update_header(image, header) if args.replace: expnum = args.expnum or fits.open(image)[0].header['EXPNUM'] dest = storage.dbimages_uri(expnum) storage.copy(image, dest) storage.set_status(args.expnum, 36, 'update_header', message) sys.exit(0) except Exception as e: logging.error("Error replacing header for %s" % ( args.expnum)) logging.error(str(e)) message = str(e) storage.set_status(args.expnum, 36, 'update_header', message) sys.exit(2)
def main(): """Do the script.""" parser = argparse.ArgumentParser(description='replace image header') parser.add_argument('--extname', help='name of extension to in header') parser.add_argument('expnum', type=str, help='exposure to update') parser.add_argument('-r', '--replace', action='store_true', help='store modified image back to VOSpace?') parser.add_argument('-v', '--verbose', action='store_true') parser.add_argument('--debug', action='store_true') parser.add_argument('--force', action='store_true', help="Re-run even if previous success recorded") parser.add_argument('--dbimages', help="VOSpace DATA storage area.", default="vos:OSSOS/dbimages") args = parser.parse_args() task = util.task() dependency = 'preproc' prefix = "" storage.DBIMAGES = args.dbimages level = logging.CRITICAL message_format = "%(message)s" if args.verbose: level = logging.INFO if args.debug: level = logging.DEBUG message_format = "%(module)s %(funcName)s %(lineno)s %(message)s" logging.basicConfig(level=level, format=message_format) storage.set_logger(task, prefix, args.expnum, None, None, False) message = storage.SUCCESS expnum = args.expnum exit_status = 0 try: # skip if already succeeded and not in force mode if storage.get_status(task, prefix, expnum, "p", 36) and not args.force: logging.info("Already updated, skipping") sys.exit(0) image_hdulist = storage.get_image(args.expnum, return_file=False) ast_hdulist = storage.get_astheader(expnum, ccd=None) run_update_header(image_hdulist, ast_hdulist) image_filename = os.path.basename(storage.get_uri(expnum)) image_hdulist.writeto(image_filename) if args.replace: dest = storage.dbimages_uri(expnum) storage.copy(image_filename, dest) storage.set_status('update_header', "", expnum, 'p', 36, message) except Exception as e: message = str(e) if args.replace: storage.set_status(task, prefix, expnum, 'p', 36, message) exit_status = message logging.error(message) return exit_status