def register_images(files, params): """Register the images in the given files list. Registered images are placed in the loc_output dir with prefix r_ """ # Have we specified a registration template? if params.registration_image: reg = DS.Observation(params.registration_image, params) else: reg = DS.EmptyBase() reg.fw = 999.0 for f in files: if (f.fw < reg.fw) and (f.fw > 1.2): reg = f print('Registration image:', reg.name) # Register images for f in files: if f == reg: f.image = f.data rf = params.loc_output + os.path.sep + 'r_' + f.name IO.write_image(f.image, rf) else: f.register(reg, params) # delete image arrays to save memory del f.image del f.mask del f.inv_variance del reg.data del reg.image del reg.mask del reg.inv_variance return(files)
def imsub_all_fits(params, reference='ref.fits'): # # Create the output directory if it doesn't exist # if not (os.path.exists(params.loc_output)): os.mkdir(params.loc_output) # # The degree of spatial shape changes has to be at least as # high as the degree of spatial photometric scale # if (params.sdeg < params.pdeg): print 'Increasing params.sdeg to ', params.pdeg params.sdeg = params.pdeg # # Print out the parameters for this run. # print 'Parameters:' for par in dir(params): print par, getattr(params, par) print # # Determine our list of images # all_files = os.listdir(params.loc_data) all_files.sort() files = [] for f in all_files: print 'file', f if fnmatch.fnmatch(f, params.name_pattern): g = DS.Observation(params.loc_data + os.path.sep + f, params) del g.data del g.mask print 'fw', g.fw if g.fw > 0.0: files.append(g) print g.name, 'accepted' if len(files) < 3: print 'Only', len(files), 'files found matching', params.name_pattern print 'Exiting' sys.exit(0) # # Have we specified a registration template? # if params.registration_image: reg = DS.Observation(params.registration_image, params) else: reg = DS.EmptyBase() reg.fw = 999.0 for f in files: if (f.fw < reg.fw) and (f.fw > params.reference_min_seeing) and ( f.sky < params.registration_max_background): reg = f print 'Registration image:', reg.name # # Register images # print 'Registering images' files_copy = [f for f in files] for f in files: print f.name if f == reg: f.image = f.data rf = params.loc_output + os.path.sep + 'r_' + f.name IO.write_image(f.image, rf) else: if not f.register(reg, params): files_copy.remove(f) # delete image arrays to save memory del f.image del f.mask del f.inv_variance del reg.data del reg.image del reg.mask del reg.inv_variance files = files_copy # # Write image names and dates to a file # if params.image_list_file: try: with open(params.loc_output + os.path.sep + params.image_list_file, 'w') as fid: for f in files: date = None if params.datekey: date = IO.get_date( params.loc_data + os.path.sep + f.name, key=params.datekey) - 2450000 if date: fid.write(f.name + ' %10.5f\n' % date) else: fid.write(f.name) except: raise # # Make the photometric reference image if we don't have it. # Find stamp positions if required. # if not (os.path.exists(params.loc_output + os.path.sep + reference)): print 'Reg = ', reg.name stamp_positions = make_reference(files, reg, params, reference_image=reference) ref = DS.Observation(params.loc_output + os.path.sep + reference, params) mask, _ = IO.read_fits_file(params.loc_output + os.path.sep + 'mask_' + reference) variance, _ = IO.read_fits_file(params.loc_output + os.path.sep + 'var_' + reference) ref.mask = mask ref.inv_variance = 1.0 / variance ref.register(reg, params) else: ref = DS.Observation(params.loc_output + os.path.sep + reference, params) if os.path.exists(params.loc_output + os.path.sep + 'mask_' + reference): mask, _ = IO.read_fits_file(params.loc_output + os.path.sep + 'mask_' + reference) else: mask = np.ones_like(ref.data) ref.mask = mask ref.register(reg, params) stamp_positions = None if params.use_stamps: stamp_file = params.loc_output + os.path.sep + 'stamp_positions' if os.path.exists(stamp_file): stamp_positions = np.genfromtxt(stamp_file) else: stars = PF.choose_stamps(ref, params) stamp_positions = stars[:, 0:2] np.savetxt(stamp_file, stamp_positions) pm = params.pixel_max params.pixel_max *= 0.9 ref.mask *= IM.compute_saturated_pixel_mask(ref.image, params) params.pixel_max = pm ref.blur = IM.boxcar_blur(ref.image) if params.mask_cluster: ref.mask *= IM.mask_cluster(ref.image, ref.mask, params) # # Detect stars and compute the PSF if we are doing photometry # star_positions = None sky = 0.0 if params.do_photometry: star_file = params.loc_output + os.path.sep + 'star_positions' psf_file = params.loc_output + os.path.sep + 'psf.fits' if not (os.path.exists(psf_file)) or not (os.path.exists(star_file)): stars = PH.compute_psf_image(params, ref, psf_image=psf_file) star_positions = stars[:, 0:2] star_sky = stars[:, 4] if os.path.exists(star_file): star_positions = np.genfromtxt(star_file) star_sky = star_positions[:, 0] * 0.0 else: np.savetxt(star_file, star_positions) print 'sky =', sky # # If we have pre-determined star positions # if params.star_file: stars = np.genfromtxt(params.star_file) star_positions = stars[:, 1:3] if params.star_reference_image: star_ref, h = IO.read_fits_file(params.star_reference_image) offset, _, _ = register_translation(star_ref, ref.image, 1000) dy, dx = offset #dy, dx = IM.positional_shift(ref.image,star_ref) print 'position shift =', dx, dy star_positions[:, 0] -= dx star_positions[:, 1] -= dy np.savetxt(star_file, star_positions) # # If we are using a CPU, group the stars by location # print 'Group_check' print 'params.do_photometry', params.do_photometry print 'params.use_GPU', params.use_GPU if params.do_photometry: star_group_boundaries = None detector_mean_positions_x = None detector_mean_positions_y = None star_unsort_index = None star_sort_index,star_group_boundaries,detector_mean_positions_x,detector_mean_positions_y = \ PH.group_stars_ccd(params,star_positions,params.loc_output+os.path.sep+reference) star_positions = star_positions[star_sort_index] star_sky = star_sky[star_sort_index] star_unsort_index = np.argsort(star_sort_index) # # Do photometry of the reference image # if params.do_photometry: ref_flux_file = params.loc_output + os.path.sep + 'ref.flux' if not (os.path.exists(ref_flux_file)): result = difference_image( ref, ref, params, stamp_positions=stamp_positions, psf_image=psf_file, star_positions=star_positions, star_group_boundaries=star_group_boundaries, detector_mean_positions_x=detector_mean_positions_x, detector_mean_positions_y=detector_mean_positions_y, star_sky=star_sky) if isinstance(result.flux, np.ndarray): print 'ungrouping fluxes' result.flux = result.flux[star_unsort_index].copy() result.dflux = result.dflux[star_unsort_index].copy() np.savetxt(ref_flux_file, np.vstack((result.flux, result.dflux)).T) # # Process images # if params.make_difference_images: if not (params.use_GPU) and (params.n_parallel > 1): pool = Pool(params.n_parallel) pool.map( process_image_helper, itertools.izip( files, itertools.repeat( (ref, params, stamp_positions, star_positions, star_group_boundaries, star_unsort_index, detector_mean_positions_x, detector_mean_positions_y)))) else: for f in files: process_image( f, (ref, params, stamp_positions, star_positions, star_group_boundaries, star_unsort_index, detector_mean_positions_x, detector_mean_positions_y)) return files
def difference_image(ref, target, params, stamp_positions=None, psf_image=None, star_positions=None, star_group_boundaries=None, detector_mean_positions_x=None, detector_mean_positions_y=None, star_sky=None, kernelRadius=None, kernel_inner_rad=7): from scipy.linalg import lu_solve, lu_factor, LinAlgError start = time.time() print 'difference_image', ref.name, target.name # # Set the kernel size based on the difference in seeing from the reference # #kernelRadius = min(params.kernel_maximum_radius, # max(params.kernel_minimum_radius, # np.abs(target.fw-ref.fw)*params.fwhm_mult)) if kernelRadius is None: kernelRadius = min( params.kernel_maximum_radius, max(params.kernel_minimum_radius, np.sqrt(np.abs(target.fw**2 - ref.fw**2)) * params.fwhm_mult)) # # Mask saturated pixels # #print 'Masking ',target.name,time.time()-start #smask = compute_saturated_pixel_mask(target.image,kernelRadius,params) # # Define the kernel basis functions # print 'Defining kernel pixels', time.time() - start if params.use_fft_kernel_pixels: kernelIndex, extendedBasis = IM.define_kernel_pixels_fft( ref, target, kernelRadius + 2, INNER_RADIUS=20, threshold=params.fft_kernel_threshold) else: kernelIndex, extendedBasis = IM.define_kernel_pixels( kernelRadius, INNER_RADIUS=kernel_inner_rad) nKernel = kernelIndex.shape[0] # # We dont want to use bad pixels in either the target or reference image # smask = target.mask * ref.mask bmask = np.ones(smask.shape, dtype=bool) g = DS.EmptyBase() for iteration in range(params.iterations): print 'Computing matrix', time.time() - start tmask = bmask * smask # # Compute the matrix and vector # H, V, texref = CI.compute_matrix_and_vector_cuda( ref.image, ref.blur, target.image, target.inv_variance, tmask, kernelIndex, extendedBasis, kernelRadius, params, stamp_positions=stamp_positions) # # Solve the matrix equation to find the kernel coefficients # print 'Solving matrix equation', time.time() - start try: lu, piv = lu_factor(H) c = lu_solve((lu, piv), V).astype(np.float32).copy() except (LinAlgError, ValueError): print 'LU decomposition failed' g.model = None g.flux = None g.diff = None print 'H' print H sys.stdout.flush() return g # # Compute the model image # print 'Computing model', time.time() - start g.model = CI.compute_model_cuda(ref.image.shape, texref, c, kernelIndex, extendedBasis, params) edges = np.where(ref.image < 1.0) g.model[edges] = 0.0 # # Compute the difference image # difference = (target.image - g.model) g.norm = difference * np.sqrt(target.inv_variance) # # Recompute the variance image from the model # #target.inv_variance = 1.0/(g.model/params.gain + # (params.readnoise/params.gain)**2) + (1-smask) mp = np.where(tmask == 0) if len(mp[0]) > 0: target.inv_variance[mp] = 1.e-12 # # Mask pixels that disagree with the model # if iteration > 2: bmask = IM.kappa_clip(smask, g.norm, params.pixel_rejection_threshold) print 'Iteration', iteration, 'completed', time.time() - start # # Delete the target image array to save memory # del target.image # # Save the kernel coefficients to a file # if params.do_photometry and psf_image: kf = params.loc_output + os.path.sep + 'k_' + os.path.basename( target.name) IO.write_kernel_table(kf, kernelIndex, extendedBasis, c, params) print 'coeffs', c g.norm = difference * np.sqrt(target.inv_variance) g.variance = 1.0 / target.inv_variance g.mask = tmask # # Do the photometry if requested # g.flux = None if params.do_photometry and psf_image: print 'star_positions', star_positions.shape print 'star_group_boundaries', star_group_boundaries if ref.name == target.name: sky_image, _ = IO.read_fits_file(params.loc_output + os.path.sep + 'temp.sub2.fits') phot_target = ref.image - sky_image IO.write_image( phot_target, params.loc_output + os.path.sep + 'clean_' + ref.name) g.flux, g.dflux = CIF.photom_all_stars_simultaneous( phot_target, target.inv_variance, star_positions, psf_image, c, kernelIndex, extendedBasis, kernelRadius, params, star_group_boundaries, detector_mean_positions_x, detector_mean_positions_y) else: phot_target = difference g.flux, g.dflux = CI.photom_all_stars( phot_target, target.inv_variance, star_positions, psf_image, c, kernelIndex, extendedBasis, kernelRadius, params, star_group_boundaries, detector_mean_positions_x, detector_mean_positions_y) print 'Photometry completed', time.time() - start # # Apply the photometric scale factor to the difference image. # We don't do this prior to the photometry because the PSF is # being convolved by the kernel, which already includes the # photometric scale factor. # g.diff = IM.apply_photometric_scale(difference, c, params.pdeg) sys.stdout.flush() return g