def register_images(files, params): """Register the images in the given files list. Registered images are placed in the loc_output dir with prefix r_ """ # Have we specified a registration template? if params.registration_image: reg = DS.Observation(params.registration_image, params) else: reg = DS.EmptyBase() reg.fw = 999.0 for f in files: if (f.fw < reg.fw) and (f.fw > 1.2): reg = f print('Registration image:', reg.name) # Register images for f in files: if f == reg: f.image = f.data rf = params.loc_output + os.path.sep + 'r_' + f.name IO.write_image(f.image, rf) else: f.register(reg, params) # delete image arrays to save memory del f.image del f.mask del f.inv_variance del reg.data del reg.image del reg.mask del reg.inv_variance return(files)
def make_ref_image(params): """Make a reference image if it doesn't exist. Requires a list of input images to combine, in the file given by params.ref_image_list (default "ref_image_list.txt"). Assumes these images are already registered and trimmed, and can be found in the params.loc_trim directory. Returns the ref image as a DIA data_structures Observation object """ refimname = params.ref_image refimpath = os.path.join(params.loc_output, refimname) if os.path.exists(refimpath): refim = DS.Observation(refimpath, params) print("Ref image %s exists. Not clobbering." % refimpath) return (refim) # read in a list of input images for creating the ref image if not os.path.exists(params.ref_image_list): print("Missing ref image list file %s" % params.ref_image_list) return (None) fin = open(params.ref_image_list, 'r') ref_input_filenames = [f.strip() for f in fin.readlines()] fin.close() ref_image_list = get_observation_list(ref_input_filenames, params) registered_ref_image_list = register_images(ref_image_list, params) # Make the photometric reference image if we don't have it. # Find stamp positions if required. stamp_positions = DIA.make_reference(registered_ref_image_list, params, reference_image=refimname) refim = DS.Observation(refimpath, params) # Register the newly made reference image # #TODO: ref image registration is superfluous? maybe a symlink would work? registered_refimlist = register_images([refim], params) # TODO : we need to add a header and WCS? copy_wcs(ref_image_list[0].fullname, refimpath) copy_wcs(ref_image_list[0].fullname, registered_refimlist[0].fullname) return (refim)
def get_observation_list(filenamelist, params): """ From a given list of filenames, make a list of images as pyDIA Observation objects. If user has specified a full path to the input image files, use that full path. If not, then check if each input image file exists as a trimmed version, and use that. If not, fall back is to use the original untrimmed image. """ observationslist = [] for filename in filenamelist: if filename.endswith('_trim.fits'): ftrimname = os.path.basename(filename) else: ftrimname = os.path.basename(filename).replace('.fits', '_trim.fits') trimfile = os.path.join(params.loc_trim, ftrimname) imfile = os.path.join(params.loc_data, os.path.basename(filename)) for f in [filename, trimfile, imfile]: if os.path.exists(f): g = DS.Observation(f, params) observationslist.append(g) break return(observationslist)
def do_photometry(params, extname='newflux', star_file='star_positions', psf_file='psf.fits', star_positions=None, reference_image='ref.fits'): # # Determine our list of files # all_files = os.listdir(params.loc_data) all_files.sort() files = [] for f in all_files: if fnmatch.fnmatch(f, params.name_pattern): g = DS.Observation(params.loc_data + os.path.sep + f, params) if g.fw > 0.0: files.append(g) ref = DS.Observation(params.loc_output + os.path.sep + reference_image, params) ref.register(ref, params) # # Detect stars and compute the PSF if necessary # psf_file = params.loc_output + os.path.sep + psf_file star_file = params.loc_output + os.path.sep + star_file print psf_file print os.path.exists(psf_file) print star_file print os.path.exists(star_file) if not (os.path.exists(psf_file)) or not (os.path.exists(star_file)): stars = PH.compute_psf_image(params, ref, psf_image=psf_file) if star_positions is None: if os.path.exists(star_file): star_positions = np.genfromtxt(star_file)[:, :2] else: star_positions = stars[:, 0:2] # # Group the stars by location # star_group_boundaries = None detector_mean_positions_x = None detector_mean_positions_y = None star_sort_index,star_group_boundaries,detector_mean_positions_x,detector_mean_positions_y = \ PH.group_stars_ccd(params,star_positions,params.loc_output+os.path.sep+reference_image) star_positions = star_positions[star_sort_index] star_unsort_index = np.argsort(star_sort_index) # # Process the reference image # print 'Processing', reference_image ref = DS.Observation(params.loc_output + os.path.sep + reference_image, params) #reg = Observation(params.loc_data+os.path.sep+ # params.registration_image,params) mask, _ = IO.read_fits_file(params.loc_output + os.path.sep + 'mask_' + reference_image) variance, _ = IO.read_fits_file(params.loc_output + os.path.sep + 'var_' + reference_image) ref.mask = mask ref.inv_variance = 1.0 / variance + (1 - mask) ref.register(ref, params) smask = IM.compute_saturated_pixel_mask(ref.image, params) ref.inv_variance += (1 - (smask * mask)) * 1.e-12 ktable = params.loc_output + os.path.sep + 'k_' + os.path.basename( reference_image) kernelIndex, extendedBasis, c, params = IO.read_kernel_table( ktable, params) kernelRadius = np.max(kernelIndex[:, 0]) + 1 if np.sum(extendedBasis) > 0: kernelRadius += 1 print 'kernelIndex', kernelIndex print 'extendedBasis', extendedBasis print 'coeffs', c print 'kernelRadius', kernelRadius print 'star_positions', star_positions.shape phot_target, _ = IO.read_fits_file(params.loc_output + os.path.sep + 'clean_' + reference_image) ref.flux, ref.dflux = CIF.photom_all_stars_simultaneous( phot_target, ref.inv_variance, star_positions, psf_file, c, kernelIndex, extendedBasis, kernelRadius, params, star_group_boundaries, detector_mean_positions_x, detector_mean_positions_y) if isinstance(ref.flux, np.ndarray): if not (params.use_GPU): print 'ungrouping fluxes' ref.flux = ref.flux[star_unsort_index].copy() ref.dflux = ref.dflux[star_unsort_index].copy() print ref.flux.shape, star_positions.shape np.savetxt( params.loc_output + os.path.sep + reference_image + '.' + extname, np.vstack((ref.flux, ref.dflux))) # # Process difference images # for f in files: if not (os.path.exists(params.loc_output + os.path.sep + f.name + '.' + extname)): print 'Processing', f.name target = f.name dtarget = params.loc_output + os.path.sep + 'd_' + os.path.basename( target) ntarget = params.loc_output + os.path.sep + 'n_' + os.path.basename( target) ztarget = params.loc_output + os.path.sep + 'z_' + os.path.basename( target) ktable = params.loc_output + os.path.sep + 'k_' + os.path.basename( target) if os.path.exists(dtarget) and os.path.exists( ntarget) and os.path.exists(ktable): norm, h = IO.read_fits_file(ntarget) diff, h = IO.read_fits_file(dtarget) mask, h = IO.read_fits_file(ztarget) inv_var = (norm / diff)**2 + (1 - mask) kernelIndex, extendedBasis, c, params = IO.read_kernel_table( ktable, params) kernelRadius = np.max(kernelIndex[:, 0]) + 1 if np.sum(extendedBasis) > 0: kernelRadius += 1 print 'kernelIndex', kernelIndex print 'extendedBasis', extendedBasis print 'coeffs', c print 'kernelRadius', kernelRadius IO.write_image(diff, params.loc_output + os.path.sep + 'diff1.fits') diff = IM.undo_photometric_scale(diff, c, params.pdeg) IO.write_image(diff, params.loc_output + os.path.sep + 'diff2.fits') IO.write_image( inv_var, params.loc_output + os.path.sep + 'inv_var.fits') IO.write_kernel_table( params.loc_output + os.path.sep + 'ktable.fits', kernelIndex, extendedBasis, c, params) flux, dflux = CI.photom_all_stars( diff, inv_var, star_positions, psf_file, c, kernelIndex, extendedBasis, kernelRadius, params, star_group_boundaries, detector_mean_positions_x, detector_mean_positions_y) print 'flux[100:110]:' print flux[100:110] if isinstance(flux, np.ndarray): if not (params.use_GPU): print 'ungrouping fluxes' flux = flux[star_unsort_index].copy() dflux = dflux[star_unsort_index].copy() print 'unsort flux[100:110]:' print flux[100:110] np.savetxt( params.loc_output + os.path.sep + f.name + '.' + extname, np.vstack((flux, dflux)).T) sys.exit(0)
def imsub_all_fits(params, reference='ref.fits'): # # Create the output directory if it doesn't exist # if not (os.path.exists(params.loc_output)): os.mkdir(params.loc_output) # # The degree of spatial shape changes has to be at least as # high as the degree of spatial photometric scale # if (params.sdeg < params.pdeg): print 'Increasing params.sdeg to ', params.pdeg params.sdeg = params.pdeg # # Print out the parameters for this run. # print 'Parameters:' for par in dir(params): print par, getattr(params, par) print # # Determine our list of images # all_files = os.listdir(params.loc_data) all_files.sort() files = [] for f in all_files: print 'file', f if fnmatch.fnmatch(f, params.name_pattern): g = DS.Observation(params.loc_data + os.path.sep + f, params) del g.data del g.mask print 'fw', g.fw if g.fw > 0.0: files.append(g) print g.name, 'accepted' if len(files) < 3: print 'Only', len(files), 'files found matching', params.name_pattern print 'Exiting' sys.exit(0) # # Have we specified a registration template? # if params.registration_image: reg = DS.Observation(params.registration_image, params) else: reg = DS.EmptyBase() reg.fw = 999.0 for f in files: if (f.fw < reg.fw) and (f.fw > params.reference_min_seeing) and ( f.sky < params.registration_max_background): reg = f print 'Registration image:', reg.name # # Register images # print 'Registering images' files_copy = [f for f in files] for f in files: print f.name if f == reg: f.image = f.data rf = params.loc_output + os.path.sep + 'r_' + f.name IO.write_image(f.image, rf) else: if not f.register(reg, params): files_copy.remove(f) # delete image arrays to save memory del f.image del f.mask del f.inv_variance del reg.data del reg.image del reg.mask del reg.inv_variance files = files_copy # # Write image names and dates to a file # if params.image_list_file: try: with open(params.loc_output + os.path.sep + params.image_list_file, 'w') as fid: for f in files: date = None if params.datekey: date = IO.get_date( params.loc_data + os.path.sep + f.name, key=params.datekey) - 2450000 if date: fid.write(f.name + ' %10.5f\n' % date) else: fid.write(f.name) except: raise # # Make the photometric reference image if we don't have it. # Find stamp positions if required. # if not (os.path.exists(params.loc_output + os.path.sep + reference)): print 'Reg = ', reg.name stamp_positions = make_reference(files, reg, params, reference_image=reference) ref = DS.Observation(params.loc_output + os.path.sep + reference, params) mask, _ = IO.read_fits_file(params.loc_output + os.path.sep + 'mask_' + reference) variance, _ = IO.read_fits_file(params.loc_output + os.path.sep + 'var_' + reference) ref.mask = mask ref.inv_variance = 1.0 / variance ref.register(reg, params) else: ref = DS.Observation(params.loc_output + os.path.sep + reference, params) if os.path.exists(params.loc_output + os.path.sep + 'mask_' + reference): mask, _ = IO.read_fits_file(params.loc_output + os.path.sep + 'mask_' + reference) else: mask = np.ones_like(ref.data) ref.mask = mask ref.register(reg, params) stamp_positions = None if params.use_stamps: stamp_file = params.loc_output + os.path.sep + 'stamp_positions' if os.path.exists(stamp_file): stamp_positions = np.genfromtxt(stamp_file) else: stars = PF.choose_stamps(ref, params) stamp_positions = stars[:, 0:2] np.savetxt(stamp_file, stamp_positions) pm = params.pixel_max params.pixel_max *= 0.9 ref.mask *= IM.compute_saturated_pixel_mask(ref.image, params) params.pixel_max = pm ref.blur = IM.boxcar_blur(ref.image) if params.mask_cluster: ref.mask *= IM.mask_cluster(ref.image, ref.mask, params) # # Detect stars and compute the PSF if we are doing photometry # star_positions = None sky = 0.0 if params.do_photometry: star_file = params.loc_output + os.path.sep + 'star_positions' psf_file = params.loc_output + os.path.sep + 'psf.fits' if not (os.path.exists(psf_file)) or not (os.path.exists(star_file)): stars = PH.compute_psf_image(params, ref, psf_image=psf_file) star_positions = stars[:, 0:2] star_sky = stars[:, 4] if os.path.exists(star_file): star_positions = np.genfromtxt(star_file) star_sky = star_positions[:, 0] * 0.0 else: np.savetxt(star_file, star_positions) print 'sky =', sky # # If we have pre-determined star positions # if params.star_file: stars = np.genfromtxt(params.star_file) star_positions = stars[:, 1:3] if params.star_reference_image: star_ref, h = IO.read_fits_file(params.star_reference_image) offset, _, _ = register_translation(star_ref, ref.image, 1000) dy, dx = offset #dy, dx = IM.positional_shift(ref.image,star_ref) print 'position shift =', dx, dy star_positions[:, 0] -= dx star_positions[:, 1] -= dy np.savetxt(star_file, star_positions) # # If we are using a CPU, group the stars by location # print 'Group_check' print 'params.do_photometry', params.do_photometry print 'params.use_GPU', params.use_GPU if params.do_photometry: star_group_boundaries = None detector_mean_positions_x = None detector_mean_positions_y = None star_unsort_index = None star_sort_index,star_group_boundaries,detector_mean_positions_x,detector_mean_positions_y = \ PH.group_stars_ccd(params,star_positions,params.loc_output+os.path.sep+reference) star_positions = star_positions[star_sort_index] star_sky = star_sky[star_sort_index] star_unsort_index = np.argsort(star_sort_index) # # Do photometry of the reference image # if params.do_photometry: ref_flux_file = params.loc_output + os.path.sep + 'ref.flux' if not (os.path.exists(ref_flux_file)): result = difference_image( ref, ref, params, stamp_positions=stamp_positions, psf_image=psf_file, star_positions=star_positions, star_group_boundaries=star_group_boundaries, detector_mean_positions_x=detector_mean_positions_x, detector_mean_positions_y=detector_mean_positions_y, star_sky=star_sky) if isinstance(result.flux, np.ndarray): print 'ungrouping fluxes' result.flux = result.flux[star_unsort_index].copy() result.dflux = result.dflux[star_unsort_index].copy() np.savetxt(ref_flux_file, np.vstack((result.flux, result.dflux)).T) # # Process images # if params.make_difference_images: if not (params.use_GPU) and (params.n_parallel > 1): pool = Pool(params.n_parallel) pool.map( process_image_helper, itertools.izip( files, itertools.repeat( (ref, params, stamp_positions, star_positions, star_group_boundaries, star_unsort_index, detector_mean_positions_x, detector_mean_positions_y)))) else: for f in files: process_image( f, (ref, params, stamp_positions, star_positions, star_group_boundaries, star_unsort_index, detector_mean_positions_x, detector_mean_positions_y)) return files
def do_photometry(params, extname='newflux', star_file='star_positions', psf_file='psf.fits', star_positions=None, reference_image='ref.fits'): # # Determine our list of files # all_files = os.listdir(params.loc_data) all_files.sort() files = [] for f in all_files: if fnmatch.fnmatch(f, params.name_pattern): g = DS.Observation(params.loc_data + os.path.sep + f, params) if g.fw > 0.0: files.append(g) ref = DS.Observation(params.loc_output + os.path.sep + reference_image, params) ref.register(ref, params) # # Detect stars and compute the PSF if necessary # if params.do_photometry: psf_file = params.loc_output + os.path.sep + psf_file if os.path.exists(params.star_file): star_pos = np.genfromtxt(params.star_file)[:, 1:3] if not (os.path.exists(psf_file)): stars = PH.compute_psf_image(params, ref, psf_image=psf_file) else: if not (os.path.exists(star_file)): stars = PH.compute_psf_image(params, ref, psf_image=psf_file) star_pos = stars[:, 0:2] np.savetxt(star_file, star_pos) else: star_pos = np.genfromtxt(star_file) if not (os.path.exists(psf_file)): stars = PH.compute_psf_image(params, ref, psf_image=psf_file) # # Have we been passed an array of star positions? # if star_positions == None: star_positions = star_pos # # If we are using a CPU, group the stars by location # star_group_boundaries = None detector_mean_positions_x = None detector_mean_positions_y = None if not (params.use_GPU): star_sort_index, star_group_boundaries, detector_mean_positions_x, detector_mean_positions_y = PH.group_stars_ccd( params, star_positions, params.loc_output + os.path.sep + reference_image) star_positions = star_positions[star_sort_index] star_unsort_index = np.argsort(star_sort_index) # # Process the reference image # print('Processing', reference_image) ref = DS.Observation(params.loc_output + os.path.sep + reference_image, params) # reg = Observation(params.loc_data+os.path.sep+ # params.registration_image,params) ref.register(ref, params) smask = IM.compute_saturated_pixel_mask(ref.image, 6, params) ref.inv_variance += 1 - smask ktable = params.loc_output + os.path.sep + 'k_' + os.path.basename( reference_image) kernelIndex, extendedBasis, c, params = IO.read_kernel_table( ktable, params) kernelRadius = np.max(kernelIndex[:, 0]) + 1 if np.sum(extendedBasis) > 0: kernelRadius += 1 print('kernelIndex', kernelIndex) print('extendedBasis', extendedBasis) print('coeffs', c) print('kernelRadius', kernelRadius) phot_target = ref.image ref.flux, ref.dflux = CI.photom_all_stars( phot_target, ref.inv_variance, star_positions, psf_file, c, kernelIndex, extendedBasis, kernelRadius, params, star_group_boundaries, detector_mean_positions_x, detector_mean_positions_y) if isinstance(ref.flux, np.ndarray): if not (params.use_GPU): print('ungrouping fluxes') ref.flux = ref.flux[star_unsort_index].copy() ref.dflux = ref.dflux[star_unsort_index].copy() np.savetxt( params.loc_output + os.path.sep + reference_image + '.' + extname, np.vstack((ref.flux, ref.dflux)).T) # # Process difference images # for f in files: if not (os.path.exists(params.loc_output + os.path.sep + f.name + '.' + extname)): print('Processing', f.name) target = f.name dtarget = params.loc_output + os.path.sep + 'd_' + os.path.basename( target) ntarget = params.loc_output + os.path.sep + 'n_' + os.path.basename( target) ztarget = params.loc_output + os.path.sep + 'z_' + os.path.basename( target) ktable = params.loc_output + os.path.sep + 'k_' + os.path.basename( target) if os.path.exists(dtarget) and os.path.exists( ntarget) and os.path.exists(ktable): norm, h = IO.read_fits_file(ntarget) diff, h = IO.read_fits_file(dtarget) mask, h = IO.read_fits_file(ztarget) inv_var = (norm / diff)**2 + (1 - mask) kernelIndex, extendedBasis, c, params = IO.read_kernel_table( ktable, params) kernelRadius = np.max(kernelIndex[:, 0]) + 1 if np.sum(extendedBasis) > 0: kernelRadius += 1 print('kernelIndex', kernelIndex) print('extendedBasis', extendedBasis) print('coeffs', c) print('kernelRadius', kernelRadius) diff = IM.undo_photometric_scale(diff, c, params.pdeg) flux, dflux = PH.photom_all_stars( diff, inv_var, star_positions, psf_file, c, kernelIndex, extendedBasis, kernelRadius, params, star_group_boundaries, detector_mean_positions_x, detector_mean_positions_y) if isinstance(flux, np.ndarray): if not (params.use_GPU): print('ungrouping fluxes') flux = flux[star_unsort_index].copy() dflux = dflux[star_unsort_index].copy() np.savetxt( params.loc_output + os.path.sep + f.name + '.' + extname, np.vstack((flux, dflux)).T)
def do_everything(params=None): if params is None: params = set_default_parameters() # Create the output directory if it doesn't exist if not (os.path.exists(params.loc_output)): os.mkdir(params.loc_output) # The degree of spatial shape changes has to be at least as # high as the degree of spatial photometric scale if (params.sdeg < params.pdeg): print('Increasing params.sdeg to ', params.pdeg) params.sdeg = params.pdeg # Print out the parameters for this run. print('Parameters:') for par in dir(params): print(par, getattr(params, par)) print('Determining list of input images') input_file_basenames = os.listdir(params.loc_input) input_file_paths = [] for f in input_file_basenames: if fnmatch.fnmatch(f, params.name_pattern): input_file_paths.append(os.path.join(params.loc_input,f)) if params.dospintrim: print("Spinning and trimming input images") if not params.wcs_ref_image: refimname = params.ref_image refimpath = os.path.join(params.loc_output, refimname) if os.path.exists(refimpath): params.wcs_ref_image = refimpath if not params.wcs_ref_image: params.wcs_ref_image = input_file_paths[0] spin_and_trim(input_file_paths, params.wcs_ref_image, params.trimfrac) if params.dorefim: print("Making reference image") refim = make_ref_image(params) else: refimpath = os.path.join(params.loc_output, params.ref_image) refim = DS.Observation(refimpath, params) trim_file_basenames = os.listdir(params.loc_trim) trim_file_paths = [] # read in a list of input images for creating the ref image ref_input_filenames = [] if os.path.exists(params.ref_image_list): fin = open(params.ref_image_list, 'r') ref_input_filenames = [os.path.basename(f.strip()) for f in fin.readlines()] fin.close() for f in trim_file_basenames: if fnmatch.fnmatch(f, params.name_pattern): if f not in ref_input_filenames: trim_file_paths.append(os.path.join(params.loc_trim,f)) print("Trimmed files to subtract: %s"%str(trim_file_paths)) if params.dodiffs: print("Making diff images") make_diff_images(trim_file_paths, refim, params) return
def make_diff_images(filenamelist, refim, params): """ make a diff image for each file in filenamelist: file - refim. filenamelist : list of filenames for 'target' images refim : reference image, either a filename or a DIA Observation object params : DIA parameters object """ star_group_boundaries = None detector_mean_positions_x = None detector_mean_positions_y = None star_unsort_index = None star_positions = None stamp_positions = None sky = 0.0 if isinstance(refim, str) and os.path.exists(refim): refim = DS.Observation(refim, params) # TODO: investigate what is really being done here: # Apply saturation mask and boxcar blurring to reference image mask, _ = IO.read_fits_file( params.loc_output + os.path.sep + 'mask_' + refim.name) refim.mask = mask pm = params.pixel_max params.pixel_max *= 0.9 refim.mask *= IM.compute_saturated_pixel_mask(refim.image, 4, params) params.pixel_max = pm refim.blur = IM.boxcar_blur(refim.image) if params.mask_cluster: refim.mask *= IM.mask_cluster(refim.image, refim.mask, params) # For each given filename, get a pyDIA observation object image_list = get_observation_list(filenamelist, params) # Register the images, using the ref image as the registration template, # unless the user has specified otherwise if not params.registration_image: params.registration_image = refim.fullname registered_image_list = register_images(image_list, params) # make diff images: im - ref for im in registered_image_list: result = DIA.difference_image( refim, im, params, stamp_positions=stamp_positions, psf_image=params.loc_output + os.path.sep + 'psf.fits', star_positions=star_positions, star_group_boundaries=star_group_boundaries, detector_mean_positions_x=detector_mean_positions_x, detector_mean_positions_y=detector_mean_positions_y) del im.image del im.mask del im.inv_variance hdr = fits.getheader(im.fullname) # TODO : use astropy fits to propagate header with WCS from parent image # Save output images to files if isinstance(result.diff, np.ndarray): IO.write_image(result.diff, params.loc_output + os.path.sep + 'd_' + im.name, header=hdr)