def choose_master_dark(exptime, extname, gccdtemp): par = common.gfa_misc_params() # eventually could cache the index of master darks... fname_index = os.path.join(os.environ[par['meta_env_var']], par['dark_index_filename']) print('Reading master dark index table : ' + fname_index) assert(os.path.exists(fname_index)) str = fits.getdata(fname_index) # cases of potential bad readout should already be removed, but just in case str = str[str['READWARN'] == 0] str = str[(str['ORIGTIME'] == exptime) & (str['EXTNAME'] == extname)] # this is the case where EXPTIME does not have any available # master darks in the library of master darks if len(str) == 0: return None indmin = np.argmin(np.abs(str['GCCDTEMP'] - gccdtemp)) fname = str[indmin]['FNAME_FULL'].replace(' ', '').split('/')[-1] fname = os.path.join(os.environ[par['meta_env_var']] + '/master_dark_library', fname) return fname
def nominal_pixel_sidelen_arith(): # calculate/return the nominal pixel sidelength in arcseconds # using the arithmetic mean of the x and y platescales par = common.gfa_misc_params() return np.mean([par['nominal_mer_cd'], par['nominal_sag_cd']]) * 3600.0
def check_image_level_outputs_exist(outdir, fname_in, gzip=True, cube_index=None): par = common.gfa_misc_params() for flavor in par['reduced_image_flavors']: _ = reduced_image_fname(outdir, fname_in, flavor, gzip=gzip, cube_index=cube_index, outdir_not_needed=True)
def segmentation_map(image, extname, get_kernel=False): # in this context image means a 2D numpy array rather than a GFA_image # object par = common.gfa_misc_params() fwhm_pix = par['nominal_fwhm_asec'] / \ util.nominal_pixel_sidelen_arith() threshold = detect_threshold(image, snr=2.0) sigma = fwhm_pix * gaussian_fwhm_to_sigma kernel = Gaussian2DKernel(sigma, x_size=int(np.round(fwhm_pix)), y_size=int(np.round(fwhm_pix))) kernel.normalize() segm = detect_sources(image, threshold, npixels=5, filter_kernel=kernel) # add my own dilation of segm.array ? # incorporate masking based on master flat/bias in this analysis ? if not get_kernel: return segm else: return segm, kernel
def __init__(self, image): # image should be a 2D numpy array with dimensions # 2248 x 1032 in the case of DESI GFA cameras par = common.gfa_misc_params() sh = image.shape assert(sh[0] == par['height_with_prescan_overscan']) assert(sh[1] == par['width_with_prescan_overscan']) amps = common.valid_amps_list() self.overscan_cutouts = {} self.prescan_cutouts = {} for amp in amps: bdy = common.overscan_bdy_coords(amp) self.overscan_cutouts[amp] = image[bdy['y_l']:bdy['y_u'], bdy['x_l']:bdy['x_u']] bdy = common.prescan_bdy_coords(amp) self.prescan_cutouts[amp] = image[bdy['y_l']:bdy['y_u'], bdy['x_l']:bdy['x_u']] self.n_badpix_overscan = self.count_badpixels() self.n_badpix_prescan = self.count_badpixels(prescan=True) # still per-amp but summing prescan and overscan counts together self.n_badpix = dict([(amp, self.n_badpix_overscan[amp] + self.n_badpix_prescan[amp]) for amp in amps]) # including all amps and lumping together prescan and overscan self.n_badpix_all = np.sum([n for n in self.n_badpix.values()]) # units are raw ADU self.overscan_medians = dict([(amp, np.median(self.overscan_cutouts[amp])) for amp in amps]) # units are raw ADU self.prescan_medians = dict([(amp, np.median(self.prescan_cutouts[amp])) for amp in amps])
def nominal_pixel_area_sq_asec(extname): par = common.gfa_misc_params() pixel_area_sq_asec = \ (par['nominal_mer_cd']*3600.0)*(par['nominal_sag_cd']*3600.0) return pixel_area_sq_asec
def read_dark_image(extname, exptime, t_celsius): assert(common.is_valid_extname(extname)) par = common.gfa_misc_params() # try getting a master dark with an exactly matching integration time dark_fname = choose_master_dark(exptime, extname, t_celsius) # if no master dark has an exactly matching integration time # then just go back to some 'standard' 5 s master dark # REVISIT THIS LATER TO DO BETTER if dark_fname is None: print('could not find a master dark with ORIGTIME matching EXPTIME') dark_fname = os.path.join(os.environ[par['meta_env_var']], \ par['master_dark_filename']) print('Attempting to read master dark : ' + dark_fname + ', extension name : ' + extname) assert(os.path.exists(dark_fname)) dark, hdark = fits.getdata(dark_fname, extname=extname, header=True) dark = load_calibs.remove_overscan(dark) return dark, hdark, dark_fname
def pmgstars_forced_phot(xcentroid, ycentroid, image, elg=False, bgs=False): assert(len(xcentroid) > 0) assert(len(ycentroid) > 0) # create the apertures # get the fluxes print('Attempting to do forced aperture photometry') # shouldn't happen... assert(not (elg and bgs)) if elg or bgs: par = common.gfa_misc_params() param = 'exp_kernel_filename' if elg else 'devauc_kernel_filename' fname = os.path.join(os.environ[par['meta_env_var']], par[param]) kern = fits.getdata(fname) # non-optimal to repeatedly read this... image = ndimage.convolve(image, kern, mode='constant') positions = list(zip(xcentroid, ycentroid)) # the 1.52/1.462 factor comes from David Schlegel's request # to have gfa_reduce fiber flux fraction related quantities # be referenced to a 1.52 asec diameter aperture, even though # the angular diameter corresponding to a 107 um fiber at the # GFA focal plane position is smaller (1.462 asec using GFA # platescale geometric mean); see SurveySpeed wiki page for 1.52 value radius = 3.567*(1.52/1.462) # pixels apertures = CircularAperture(positions, r=radius) annulus_apertures = CircularAnnulus(positions, r_in=60.0, r_out=65.0) annulus_masks = annulus_apertures.to_mask(method='center') bkg_median = [] for mask in annulus_masks: annulus_data = mask.multiply(image) annulus_data_1d = annulus_data[mask.data > 0] # this sigma_clipped_stats call is actually the slow part !! _, median_sigclip, std_bg = sigma_clipped_stats(annulus_data_1d) bkg_median.append(median_sigclip) bkg_median = np.array(bkg_median) phot = aperture_photometry(image, apertures) aper_bkg_tot = bkg_median*_get_area_from_ap(apertures[0]) aper_fluxes = np.array(phot['aperture_sum']) - aper_bkg_tot return aper_fluxes
def gfa_center_pix_coords(): # native binning, this is the exact center of the image, # which is at the corner of four pixels because of even sidelengths par = common.gfa_misc_params() x_pix_center = par['width_pix_native'] * 0.5 + 0.5 y_pix_center = par['height_pix_native'] * 0.5 + 0.5 return x_pix_center, y_pix_center
def bgs_convolution(self): par = common.gfa_misc_params() fname = os.path.join(os.environ[par['meta_env_var']], par['devauc_kernel_filename']) kern = fits.getdata(fname) smth = ndimage.convolve(self.psf_image, kern, mode='constant') self.smoothed_psf_image_bgs = smth
def create_satmask(im, extname): # im is just a 2D array of pixels, not a GFA_image object par = common.gfa_misc_params() gain = common.gfa_camera_gain(extname) sat_thresh = par['full_well_electrons'] / gain satmask = (im >= sat_thresh) return satmask
def load_lst(): par = common.gfa_misc_params() fname = os.path.join(os.environ[par['meta_env_var']], par['ephem_filename']) print('READING EPHEMERIS FILE : ', fname) assert (os.path.exists(fname)) eph = fits.getdata(fname) return eph
def zenith_zeropoint_photometric_1amp(extname, amp): par = common.gfa_misc_params() fname = os.path.join(os.environ[par['meta_env_var']], par['zp_filename']) # would be better to cache this, but it's of order ~10 kb .. tab = fits.getdata(fname) good = (tab['EXTNAME'] == extname) & (tab['AMP'] == amp) assert (np.sum(good) == 1) return tab[good][0]['ZP_ADU_PER_S']
def gaia_chunknames(ipix, ps1=False): # could add checks to make sure that all ipix values are # sane HEALPix pixel indices # RIGHT NOW THIS ASSUMES IPIX IS AN ARRAY !! # should eventually make this also work for scalar ipix par = common.gfa_misc_params() env_var = par['ps1_env_var'] if ps1 else par['gaia_env_var'] gaia_dir = os.environ[env_var] flist = [os.path.join(gaia_dir, 'chunk-' + str(i).zfill(5) + '.fits') for i in ipix] return flist
def read_bias_image(extname): assert (common.is_valid_extname(extname)) par = common.gfa_misc_params() bias_fname = os.path.join(os.environ[par['meta_env_var']], \ par['master_bias_filename']) print('Attempting to read master bias : ' + bias_fname + ', extension name : ' + extname) assert (os.path.exists(bias_fname)) bias = fits.getdata(bias_fname, extname=extname) bias = remove_overscan(bias) return bias
def read_static_mask_image(extname): assert (common.is_valid_extname(extname)) par = common.gfa_misc_params() mask_fname = os.path.join(os.environ[par['meta_env_var']], \ par['static_mask_filename']) print('Attempting to read static bad pixel mask : ' + mask_fname + ', extension name : ' + extname) assert (os.path.exists(mask_fname)) mask = fits.getdata(mask_fname, extname=extname) mask = remove_overscan(mask) return mask
def gfa_pixel_ymin(pix_center=False, quadrant=None): """ "y" here is in GFA pixel coordinates """ # left edge of leftmost pixel ymin = -0.5 if pix_center: ymin += 0.5 # center of leftmost pixel if (quadrant == 1) or (quadrant == 2): par = common.gfa_misc_params() # haven't thought about whether assumption of even width matters here ymin += par['height_pix_native'] / 2 return ymin
def gfa_pixel_ymax(pix_center=False, quadrant=None): """ "y" here is in GFA pixel coordinates """ par = common.gfa_misc_params() # right edge of rightmost pixel ymax = par['height_pix_native'] - 0.5 if pix_center: ymax -= 0.5 # center of rightmost pixel if (quadrant == 3) or (quadrant == 4): # haven't thought about whether assumption of even width matters here ymax -= par['height_pix_native'] / 2 return ymax
def local_tan_wcs(telra, teldec, extname): wcs_big = nominal_tan_wcs(telra, teldec, extname) crval1, crval2 = ccd_center_radec(wcs_big) par = common.gfa_misc_params() fname = os.path.join(os.environ[par['meta_env_var']], 'dummy_with_headers_local_SIP.zenith.fits.gz') h = fits.getheader(fname, extname=extname) h['CRVAL1'] = float(crval1) h['CRVAL2'] = float(crval2) w = wcs.WCS(h) return w
def read_flat_image(extname): # at some point should add option to return master flat's # inverse variance as well assert (common.is_valid_extname(extname)) par = common.gfa_misc_params() flat_fname = os.path.join(os.environ[par['meta_env_var']], \ par['master_flat_filename']) print('Attempting to read master flat : ' + flat_fname + ', extension name : ' + extname) assert (os.path.exists(flat_fname)) flat = fits.getdata(flat_fname, extname=extname) flat = remove_overscan(flat) return flat
def nominal_tan_wcs(telra, teldec, extname): # Create a new WCS object. The number of axes must be set # from the start par = common.gfa_misc_params() fname = os.path.join(os.environ[par['meta_env_var']], par['wcs_templates_filename']) templates = pickle.load(open(fname, 'rb')) h = templates[extname] h['CRVAL1'] = telra h['CRVAL2'] = teldec w = wcs.WCS(h) return w
def gfa_pixel_xmax(pix_center=False, quadrant=None): """ "x" here is in GFA pixel coordinates could imagine adding a "binfac" keyword here for use in processing steps where I've performed an integer downbinning """ par = common.gfa_misc_params() # right edge of rightmost pixel xmax = par['width_pix_native'] - 0.5 if pix_center: xmax -= 0.5 # center of rightmost pixel if (quadrant == 2) or (quadrant == 3): # haven't thought about whether assumption of even width matters here xmax -= par['width_pix_native'] / 2 return xmax
def zp_photometric_at_airmass(extname, airmass, amp=None): # for now don't worry about vectorization assert (airmass > 0.99) # allow for some roundoff to < 1 if amp is None: zp_zenith = median_zenith_camera_zeropoint(extname) else: zp_zenith = zenith_zeropoint_photometric_1amp(extname, amp) par = common.gfa_misc_params() # account for airmass (k term from DESI-5418-v2) # "photometric" here means 'in photometric conditions' at this airmass zp_photometric = zp_zenith - (airmass - 1) * par['kterm'] return zp_photometric
def _zenith_distance(ra, dec, lst_deg): # output value should be in degrees if np.isnan(ra) or np.isnan(dec) or np.isnan(lst_deg): return np.nan # for now assume scalar inputs, can work on vectorization later if desired par = common.gfa_misc_params() kpno_latitude = par['kpno_lat_deg'] c = SkyCoord(ra * u.deg, dec * u.deg) zenith = SkyCoord(lst_deg * u.deg, kpno_latitude * u.deg) dangle = c.separation(zenith) return dangle.deg
def gfa_boundary_pixel_coords(pix_center=True): par = common.gfa_misc_params() x_top = np.arange(gfa_pixel_xmin(pix_center=pix_center), gfa_pixel_xmax(pix_center=pix_center) + 1) x_left = np.zeros(par['height_pix_native'] + 1*(not pix_center)) + \ gfa_pixel_xmin(pix_center=pix_center) y_left = np.arange(gfa_pixel_ymin(pix_center=pix_center), gfa_pixel_ymax(pix_center=pix_center) + 1) y_bottom = np.zeros(par['width_pix_native'] + 1*(not pix_center)) + \ gfa_pixel_ymin(pix_center=pix_center) y_top = y_bottom + par['height_pix_native'] - 1 + 1 * (not pix_center) x_right = x_left + par['width_pix_native'] - 1 + 1 * (not pix_center) y_right = np.flip(y_left, axis=0) x_bottom = np.flip(x_top, axis=0) x_bdy = np.concatenate((x_left, x_top, x_right, x_bottom)) y_bdy = np.concatenate((y_left, y_top, y_right, y_bottom)) return x_bdy, y_bdy
def __init__(self, image_list, exp_header=None, bintables=None, max_cbox=31, pmgstars=None): # images is a dictionary of GFA_image objects par = common.gfa_misc_params() _extnames = [_im.header['EXTNAME'] for _im in image_list] _extnames.sort() self.images = dict(zip(_extnames, par['n_cameras'] * [None])) self.dark_current_objs = dict( zip(common.valid_image_extname_list(), par['n_cameras'] * [None])) self.assign_image_list(image_list) # exposure-level header self.exp_header = exp_header # hack for 20210106 if self.exp_header is not None: if self.exp_header['SKYRA'] is None: print('REPLACING GUIDER SKYRA WITH REQRA') self.exp_header['SKYRA'] = self.exp_header['REQRA'] if self.exp_header['SKYDEC'] is None: print('REPLACING GUIDER SKYDEC WITH REQDEC') self.exp_header['SKYDEC'] = self.exp_header['REQDEC'] self.pixels_calibrated = None self.bintables = bintables self.max_cbox = max_cbox self.assign_max_cbox() # to the per-camera images ... self.pmgstars = pmgstars # eventually may get assigned to be the _ccds summary table # object for this exposure self.ccds = None
def write_image_level_outputs(exp, outdir, proc_obj, gzip=True, cube_index=None, dont_write_invvar=False, compress_reduced_image=False, write_detmap=False): # exp is a GFA_exposure object # outdir is the output directory (string) par = common.gfa_misc_params() flavors_list = par['reduced_image_flavors'] if not write_detmap: flavors_list.remove('DETMAP') if dont_write_invvar: flavors_list.remove('INVVAR') for flavor in par['reduced_image_flavors']: _gzip = (gzip if (flavor != 'REDUCED') else compress_reduced_image) outname = reduced_image_fname(outdir, proc_obj.fname_in, flavor, gzip=_gzip, cube_index=cube_index) hdulist = exp.to_hdulist(flavor=flavor) for hdu in hdulist: hdu.header['GITREV'] = proc_obj.gitrev print('Attempting to write ' + flavor + ' image output to ' + outname) _atomic_write(hdulist, outname) print('Successfully wrote ' + flavor + ' image output to ' + outname)
def __init__(self, image_list, exp_header=None, bintables=None, max_cbox=31): # images is a dictionary of GFA_image objects par = common.gfa_misc_params() self.images = dict( zip(common.valid_image_extname_list(), par['n_cameras'] * [None])) self.dark_current_objs = dict( zip(common.valid_image_extname_list(), par['n_cameras'] * [None])) self.assign_image_list(image_list) # exposure-level header self.exp_header = exp_header self.pixels_calibrated = None self.bintables = bintables self.max_cbox = max_cbox self.assign_max_cbox() # to the per-camera images ...
def gfa_downbinned_shape(binfac): # assume integer rebinning until I come across a case where # arbitrary rebinning would be valuable # assume same rebinning factor in both dimensions for now, until # I come across a case where I would want otherwise assert ((type(binfac).__name__ == 'int') or binfac.is_integer()) par = common.gfa_misc_params() width_native = par['width_pix_native'] height_native = par['height_pix_native'] width_downbinned = float(width_native) / float(binfac) height_downbinned = float(height_native) / float(binfac) assert (width_downbinned.is_integer()) assert (height_downbinned.is_integer()) # note Python convention for (height, width) return int(height_downbinned), int(width_downbinned)
def adu_to_surface_brightness(sky_adu_1pixel, acttime, extname): """ convert from ADU (per pixel) to mag per square asec (AB) note that this is meant to be applied to an average sky value across an entire GFA camera; this function does not take into account platescale variations within a camera """ if (sky_adu_1pixel <= 0) or (acttime <= 0): return np.nan par = common.gfa_misc_params() pixel_area_sq_asec = util.nominal_pixel_area_sq_asec(extname) sky_adu_per_sq_asec = sky_adu_1pixel/pixel_area_sq_asec sky_adu_per_sec_sq_asec = sky_adu_per_sq_asec/acttime sky_e_per_sec_sq_asec = sky_adu_per_sec_sq_asec*common.gfa_camera_gain(extname) return (par['nominal_zeropoint'] - 2.5*np.log10(sky_e_per_sec_sq_asec))