def create_master_bias(bias_files): """ Creates a master bias """ bias_frames = [] if len(bias_files) > 0: for bias_file in glob.glob('*{0}*.f*t*'.format(bias_files)): fits = pf.open(bias_file, memmap=False) try: bias_frames.append(float(fits[0].header['BZERO']) + float(fits[0].header['BSCALE']) * fits[0].data) except KeyError: bias_frames.append(fits[0].data) fits.close() if len(bias_frames) > 0: if master_bias_method == 'median': master_bias = np.median(bias_frames, 0) elif master_bias_method == 'mean': master_bias = np.mean(bias_frames, 0) else: master_bias = np.median(bias_frames, 0) else: master_bias = 0.0 return master_bias
def create_master_flat(flat_files): """ Creates a master flat """ flat_frames = [] if len(str(flat_files)) > 0: for flat_file in glob.glob('*{0}*.f*t*'.format(flat_files)): fits = pf.open(flat_file, memmap=False) try: flat_frame = float(fits[0].header['BZERO']) + float(fits[0].header['BSCALE']) * fits[0].data except KeyError: flat_frame = fits[0].data flat_frames.append(flat_frame - master_bias - im_details.exposure_time * master_dark) #####NEEED WAY OF ACTUALLY GETTING EXP TIME FROM FIT HEADER, DO THIS WHEN DEFINING IM_DETAILS fits.close() if len(flat_frames) > 0: if master_flat_method == 'median': flat_frames = [ff / np.median(ff) for ff in flat_frames] master_flat = np.median(flat_frames, 0) elif master_flat_method == 'mean': master_flat = np.mean(flat_frames, 0) else: flat_frames = [ff / np.median(ff) for ff in flat_frames] master_flat = np.median(flat_frames, 0) master_flat = master_flat / np.median(master_flat) else: master_flat = 1.0 return master_flat
def create_master_dark(dark_files): """ Creates a master dark """ dark_frames = [] if len(str(dark_files)) > 0: for dark_file in glob.glob('*{0}*.f*t*'.format(dark_files)): fits = pf.open(dark_file, memmap=False) try: dark_frame = float(fits[0].header['BZERO']) + float(fits[0].header['BSCALE']) * fits[0].data except KeyError: dark_frame = fits[0].data dark_frames.append((dark_frame - master_bias) /im_details.exposure_time) fits.close() if len(dark_frames) > 0: if master_dark_method == 'median': master_dark = np.median(dark_frames, 0) elif master_dark_method == 'mean': master_dark = np.mean(dark_frames, 0) else: master_dark = np.median(dark_frames, 0) else: master_dark = 0.0 return master_dark
def _renormalize_weight(self): """Renormalizes the weight image of the stack.""" fits = astropy.io.fits.open(self._coadd_weightpath) image = fits[0].data image[image > 0.] = 1. fits[0].data = image fits.writeto(self._coadd_weightpath, clobber=True) fits.close()
def find_point_sources(image, seeing_pix=4, threshold_sigma=3, size_lim=5, out_file=None, plot=False): """sourceInIm takes a direct image and find all sources above some detection threshold in it. Used by find_sources_in_direct_image Inputs: image: an array representing the direct image seeing_pix: seeing size in pixel threshold_sigma: detection threshold in sigmas away from the standard deviation in background fluctuation size_lim: the limit of stddev, in pixel, over which we don't accept the fit as a source out_file: If defined then a .reg file for ds9 is created in the XY format. """ threshold = threshold_sigma * np.sqrt(np.var(np.nan_to_num(image))) #get all cutouts cutouts = pointFinder(image, seeing_pix, threshold) #list to collect results all_results = [] #loop through cutouts, fit gaussian to get position of sources for i in cutouts: #make cutouts from indices, then run fit_gaussian_to_cutout if np.logical_and( len(np.ravel(image[i])) < 200, len(np.ravel(image[i])) > 10): #a legit cutout shouldn't be large if plot: plt.imshow(image[i], interpolation='none') res = fit_gaussian_to_cutout(image[i], seeing_pix) #get x y x = (i[1].start) + res[0].x_mean.value y = (i[0].start) + res[0].y_mean.value x_stddev = res[0].x_stddev.value y_stddev = res[0].y_stddev.value #filter out bad fits. stddev should be of some reasonable number if x_stddev < size_lim and y_stddev < size_lim: all_results += [(y, x, y_stddev, x_stddev)] else: None #print(i, ' is invalid.' ) #return cutouts if out_file != None: f = open(out_file, mode='w') for i in all_results: f.write(str(i[1] + 1) + ' ' + str(i[0] + 1) + '\n') f.close() return all_results
def _parse_fits(uri): fits = pyfits.open(uri, memmap=False) #print(fits.info()) if len(fits) > 1: dat = fits[1].data #print(dat) else: #print(fits[0].data[0]) dat=[] dat = fits[0].data[0] #dat[1] = fits[0].data[0] fits.close() return dat.tolist()
def filter_list(fullframepath, imdrizzlepath): """ Returns a dictionary containig a file list for each filter/grism in the dataset INPUTS ------- fullframepath : string path to full frame data imdrizzlepath :string path where image files will be copied to will be created if does not exist """ #Get a list of all fullframe files files = glob.glob(os.path.join(fullframepath, "*f_flt.fits")) #Dictionary to store file lists filters = {} #Populate filter dictionary with file lists for file in files: fits = pyfits.open(file) filter = fits[0].header["FILTER"] fits.close() if filter not in filters: filters[filter] = [] filters[filter].append(file) #For each filter write the file list to a text file with extension .lis #Place images into a directory to be drizzled (create directory if it doesn't exist) if not os.path.exists(imdrizzlepath): os.makedirs(imdrizzlepath) for filter in filters: f = open(os.path.join(fullframepath, filter + ".lis"), 'w') #Create a list file for that filter for file in filters[filter]: (path, fname) = os.path.split(file) f.write(fname + "\n") if filter[0].lower() == 'f': #This is an image, not a spectrum shutil.copy(file, os.path.join(imdrizzlepath, fname)) #copy to imdrizzlepath f.close() if filter[0].lower( ) == 'f': #If this is an image, then copy the list file to imdrizzlepath shutil.copy(os.path.join(fullframepath, filter + '.lis'), os.path.join(imdrizzlepath, filter + '.lis')) return filters
def _parse_fits_metadata(uri,metadata): fits = pyfits.open(uri, memmap=False) if len(fits)>1: header = fits[1].header #doc = {'ra':header['RA'],'dec':header['DEC'], 'filename':header['TITLE']} print(header['TITLE'],header['RA'],header['DEC'], sep=',', file= metadata) else: header = fits[0].header #doc = {'ra': header['RA'],'dec': header['DEC'],'filename': header['FILENAME']} print(header['FILENAME'],header['RA'],header['DEC'], sep=',', file= metadata) fits.close() #if 'folder' in json_dict: #folder = json_dict['folder' #es.index(index='adass', doc_type='doc', id=header['TITLE'], body=doc) #print(header, file = metadata) return header
def _createStarFromFITS(self, fits): DB_NAME_END = "_name" DB_IDENT_SEP = "_id_" prim_hdu = fits[0].header ra = prim_hdu.get(self.FITS_RA) dec = prim_hdu.get(self.FITS_DEC) ra_unit = prim_hdu.get(self.FITS_RA_UNIT) dec_unit = prim_hdu.get(self.FITS_DEC_UNIT) star = Star(name=prim_hdu.get(self.FITS_NAME), coo=(ra, dec, (ra_unit, dec_unit)), starClass=prim_hdu.get(self.FITS_CLASS)) ident = {} more = {} for db_name_key in list(prim_hdu.keys()): if db_name_key.endswith(DB_NAME_END): db_name = db_name_key[:-len(DB_NAME_END)] ident[db_name] = {} ident[db_name]["name"] = prim_hdu[db_name_key] elif DB_IDENT_SEP in db_name_key: db_name, ident_key = db_name_key.split(DB_IDENT_SEP) if not ident[db_name].get("db_ident"): ident[db_name]["db_ident"] = {} ident[db_name]["db_ident"][ident_key] = prim_hdu[db_name_key] elif db_name_key not in [ "SIMPLE", "BITPIX", "NAXIS", "EXTEND", self.FITS_RA, self.FITS_DEC, self.FITS_RA_UNIT, self.FITS_DEC_UNIT, self.FITS_NAME, self.FITS_CLASS ]: more[db_name_key.lower()] = prim_hdu[db_name_key] star.ident = ident star.more = more for lc_hdu in fits[1:]: star.putLightCurve(self._createLcFromFits(lc_hdu)) fits.close() return star
def populate_database_from_fits_file( db: BundleDB, os_filepath: str, fits_product_lidvid: str ) -> None: file_basename = basename(os_filepath) try: fits = astropy.io.fits.open(os_filepath) try: db.create_fits_file( os_filepath, file_basename, fits_product_lidvid, len(fits) ) _populate_hdus_associations_and_cards( db, fits, file_basename, fits_product_lidvid ) finally: fits.close() except OSError as e: db.create_bad_fits_file(os_filepath, file_basename, fits_product_lidvid, str(e))
def testExposure(self): """Test that we load the Wcs from the binary table instead of headers when possible.""" self.addSipMetadata() wcsIn = lsst.afw.geom.makeSkyWcs(self.metadata) dim = lsst.afw.geom.Extent2I(20, 30) expIn = lsst.afw.image.ExposureF(dim) expIn.setWcs(wcsIn) with lsst.utils.tests.getTempFilePath(".fits") as fileName: expIn.writeFits(fileName) # Manually mess up the headers, so we'd know if we were loading the Wcs from that; # when there is a WCS in the header and a WCS in the FITS table, we should use the # latter, because the former might just be an approximation. fits = astropy.io.fits.open(fileName) fits[1].header.remove("CTYPE1") fits[1].header.remove("CTYPE2") fits.writeto(fileName, overwrite=True) fits.close() # now load it using afw expOut = lsst.afw.image.ExposureF(fileName) wcsOut = expOut.getWcs() self.assertEqual(wcsIn, wcsOut)
def testExposure(self): """Test that we load the Wcs from the binary table instead of headers when possible.""" self.addSipMetadata() wcsIn = lsst.afw.geom.makeSkyWcs(self.metadata) dim = lsst.geom.Extent2I(20, 30) expIn = lsst.afw.image.ExposureF(dim) expIn.setWcs(wcsIn) with lsst.utils.tests.getTempFilePath(".fits") as fileName: expIn.writeFits(fileName) # Manually mess up the headers, so we'd know if we were loading the Wcs from that; # when there is a WCS in the header and a WCS in the FITS table, we should use the # latter, because the former might just be an approximation. fits = astropy.io.fits.open(fileName) fits[1].header.remove("CTYPE1") fits[1].header.remove("CTYPE2") fits.writeto(fileName, overwrite=True) fits.close() # now load it using afw expOut = lsst.afw.image.ExposureF(fileName) wcsOut = expOut.getWcs() self.assertEqual(wcsIn, wcsOut)
def _make_temp_images(self): """Make pixel coverage and variance images.""" cov_paths, var_paths = [], [] for sigma_path, weight_path in zip(self._paths, self._weight_paths): coverage_path = ".".join((os.path.splitext(sigma_path)[0], "coverage.fits")) var_path = ".".join((os.path.splitext(sigma_path)[0], "var.fits")) fits = astropy.io.fits.open(sigma_path) wfits = astropy.io.fits.open(weight_path) # Make a coverage image from the weightmap wfits[0].data[wfits[0].data > 0.] = 1. wfits[0].data[wfits[0].data <= 0.] = 0. wfits.writeto(coverage_path, clobber=True) # Make a variance map fits[0].data = fits[0].data ** 2. fits[0].data[wfits[0].data == 0.] = 0. # FIXME NaNs propagate bad? fits.writeto(var_path, clobber=True) fits.close() wfits.close() cov_paths.append(coverage_path) var_paths.append(var_path) return cov_paths, var_paths
def fits(zl,pzl,zs,pzs,filename): try: os.remove(filename) print 'Removing file ',filename except OSError: pass fits=fio.FITS(filename,'rw') hdr={'extname':'NZ_POSITION','NZDATA':True} out=np.empty(np.shape(zl),dtype=[('Z_MID','f8')]) out['Z_MID']=zl fits.write(out,header=hdr) fits[-1].insert_column('Z_LOW',np.abs(zl-(zl[1]-zl[0])/2.)) fits[-1].insert_column('Z_HIGH',zl+(zl[1]-zl[0])/2.) if 'notomo' in filename: fits[-1].insert_column('BIN1',pzl) else: for i in range(len(pzl)): fits[-1].insert_column('BIN'+str(i+1),pzl[i,:]) hdr={'extname':'NZ_SHEAR','NZDATA':True} out=np.empty(np.shape(zs),dtype=[('Z_MID','f8')]) out['Z_MID']=zs fits.write(out,header=hdr) fits[-1].insert_column('Z_LOW',np.abs(zs-(zs[1]-zs[0])/2.)) fits[-1].insert_column('Z_HIGH',zs+(zs[1]-zs[0])/2.) if 'notomo' in filename: fits[-1].insert_column('BIN1',pzs) else: for i in range(len(pzs)): fits[-1].insert_column('BIN'+str(i+1),pzs[i,:]) fits.close() return
def plot_diff(diff_path, median, sigma, plot_path): """Plot histogram of the difference image.""" fits = astropy.io.fits.open(diff_path) pixels = fits[0].data pixels = pixels[np.isfinite(pixels)].ravel() fig = Figure(figsize=(3.5, 3.5)) canvas = FigureCanvas(fig) gs = gridspec.GridSpec(1, 1, left=0.15, right=0.95, bottom=0.15, top=0.95, wspace=None, hspace=None, width_ratios=None, height_ratios=None) ax = fig.add_subplot(gs[0]) ax.hist(pixels, 1000, histtype='stepfilled', edgecolor='None', facecolor='dodgerblue') ax.axvline(median, ls='-', c='k', lw=2) ax.axvline(median - sigma, ls='--', c='k', lw=1) ax.axvline(median + sigma, ls='--', c='k', lw=1) ax.text(0.1, 0.9, r"$%.2f \pm %.2f$" % (median, sigma), ha='left', va='top', transform=ax.transAxes) ax.set_xlim(median - 3 * sigma, median + 3 * sigma) gs.tight_layout(fig, pad=1.08, h_pad=None, w_pad=None, rect=None) canvas.print_figure(plot_path + ".pdf", format="pdf") fits.close()
def reduce_observations(self, observation_files): """ Reduces the observations Args observation_files (str) - string containing identifying name of observation files """ # observation_files = glob.glob('../*{0}*.f*t*'.format(observation_files)) observation_files = Frame.objects.filter(campaign_id=1,observation_type='S') observation_files.sort() percent = 0 lt0 = time.time() for counter, science_file in enumerate(observation_files): # correct it with master bias_files, master dark_files and master flat_files fits = pf.open(science_file, memmap=False) try: data_frame = float(fits[0].header['BZERO']) + float(fits[0].header['BSCALE']) * fits[0].data except KeyError: data_frame = fits[0].data fits[0].data = (data_frame - master_bias - fits[0].header[exposure_time_key] * master_dark) / master_flat ###RENAME EVERY TIME EXPOSURE TIME KEY IS USED TO IM_DETAILS.EXPOSURE fits[0].header.set('BZERO', 0.0) fits[0].header.set('BSCALE', 1.0) norm, floor, mean, std = fit_distribution1d_gaussian(fits[0].data, binning=fits[0].data.size / bin_to) if np.isnan(norm): mean = np.mean(fits[0].data) std = np.std(fits[0].data) julian_date = (ephem.julian_date(float(ephem.Date(local_time))) + im_details.exposure / (2.0 * 60.0 * 60.0 * 24.0)) ra_target, dec_target = target_ra_dec.split() heliocentric_julian_date = jd_to_hjd(ra_target, dec_target, julian_date) self.testx.append(heliocentric_julian_date) self.testy.append(mean) self.testz.append(std) fits[0].header.set(mean_key, mean) fits[0].header.set(std_key, std) # write the new fits file if observation_date_key == observation_time_key: local_time = fits[0].header[observation_date_key] local_time = '{0}_'.format(local_time.replace('-', '_').replace('T', '_').replace(':', '_')) else: local_time = '{0}_{1}_'.format(fits[0].header[observation_date_key].split('T')[0].replace('-', '_'), fits[0].header[observation_time_key].replace(':', '_')) try: hdu = pf.CompImageHDU(header=fits[0].header, data=fits[0].data) except: hdu = pf.ImageHDU(header=fits[0].header, data=fits[0].data) hdu.writeto('{0}{1}{2}{3}{4}'.format(reduction_directory, os.sep, reduction_prefix, local_time, science_file.split(os.sep)[-1])) if counter == 0: ax.cla() ax.imshow(fits[0].data[::2, ::2], origin='lower', cmap=cm.Greys_r, vmin=fits[0].header[mean_key] + frame_low_std * fits[0].header[std_key], vmax=fits[0].header[mean_key] + frame_upper_std * fits[0].header[std_key]) ax.axis('off') canvas.show() fits.close()
def _resize_resampled_images(self, target_fits_path, resampled_paths): """Ensures that the CRPIX, CRVAL and NAXIS1/2 values of the resampled images match the target, and crops/adds padding if not. Swarp *should* do this properly, but sometimes does not. """ target_fits = astropy.io.fits.open(target_fits_path) rNAXIS1 = target_fits[0].header['NAXIS1'] rNAXIS2 = target_fits[0].header['NAXIS2'] rCRPIX1 = target_fits[0].header['CRPIX1'] rCRPIX2 = target_fits[0].header['CRPIX2'] for path in resampled_paths: touched = False # toggled True if image modified print "path", path fits = astropy.io.fits.open(path) image = fits[0].data print "orig shape", image.shape # x-axis if rCRPIX1 > fits[0].header['CRPIX1']: # pad from left print "CRPIX1 conflict %i %i" \ % (rCRPIX1, fits[0].header['CRPIX1']) dx = rCRPIX1 - fits[0].header['CRPIX1'] print "Pad left by %i" % dx pad = np.ones((image.shape[0], dx)) * np.nan image = np.hstack((pad, image)) print image.shape touched = True elif rCRPIX1 < fits[0].header['CRPIX1']: # trim from left print "CRPIX1 conflict %i %i" \ % (rCRPIX1, fits[0].header['CRPIX1']) dx = fits[0].header['CRPIX1'] - rCRPIX1 print "Trim left by %i" % dx image = image[:, dx:] print image.shape touched = True if rNAXIS1 > image.shape[1]: # pad to the right print "NAXIS1 conflict %i %i" % (rNAXIS1, image.shape[1]) dx = rNAXIS1 - image.shape[1] print "Pad from right by %i" % dx pad = np.ones((image.shape[0], dx)) * np.nan image = np.hstack((image, pad)) print image.shape touched = True elif rNAXIS1 < image.shape[1]: # trim from right print "NAXIS1 conflict %i %i" % (rNAXIS1, image.shape[1]) dx = image.shape[1] - rNAXIS1 print "Trim from right by %i" % dx image = image[:, :-dx] print image.shape touched = True # y-axis crpix2 = fits[0].header['CRPIX2'] if rCRPIX2 > crpix2: # Pad from bottom (low index in image array) print "pad from bottom" dx = rCRPIX2 - crpix2 pad = np.ones((dx, image.shape[1])) * np.nan image = np.vstack((pad, image)) touched = True elif rCRPIX2 < crpix2: # Trim from bottom (low index in image array) print "trim from bottom" dx = crpix2 - rCRPIX2 image = image[dx:, :] touched = True if rNAXIS2 > image.shape[0]: # Pad from top (high index in image array) print "pad from top" dx = rNAXIS2 - image.shape[0] pad = np.ones((dx, image.shape[1])) * np.nan image = np.vstack((image, pad)) touched = True elif rNAXIS2 < image.shape[0]: # Trim from top (high index in image array) print "trim from top" dx = rNAXIS2 - image.shape[0] image = image[:-dx, :] touched = True if touched: fits[0].data = image print "image.shape", image.shape fits[0].header.set('NAXIS1', image.shape[1]) fits[0].header.set('NAXIS2', image.shape[0]) fits[0].header.set('CRPIX1', rCRPIX1) fits[0].header.set('CRPIX2', rCRPIX2) fits.writeto(path, clobber=True) fits.close() target_fits.close()
def sigma_l2fits( filename, nchains, burnin, path, outname, save=True, ): """ Converts c3-h5 dataset to fits for c1 BR and GBR estimator analysis. ex. c3pp sigma-l2fits chains_v1/chain 5 10 cmb_sigma_l_GBRlike.fits If "chain_c0001.h5", filename is cut to "chain" and will look in same directory for "chain_c*****.h5". See comm_like_tools for further information about BR and GBR post processing """ click.echo("{:-^48}".format("Formatting sigma_l data to fits file")) import h5py if filename.endswith(".h5"): filename = filename.rsplit("_", 1)[0] temp = np.zeros(nchains) for nc in range(1, nchains + 1): with h5py.File( filename + "_c" + str(nc).zfill(4) + ".h5", "r", ) as f: groups = list(f.keys()) temp[nc - 1] = len(groups) nsamples_max = int(max(temp[:])) click.echo( f"Largest chain has {nsamples_max} samples, using burnin {burnin}\n") for nc in range(1, nchains + 1): fn = filename + "_c" + str(nc).zfill(4) + ".h5" with h5py.File( fn, "r", ) as f: click.echo(f"Reading {fn}") groups = list(f.keys()) nsamples = len(groups) if nc == 1: dset = np.zeros(( nsamples_max + 1, 1, len(f[groups[0] + "/" + path]), len(f[groups[0] + "/" + path][0]), )) nspec = len(f[groups[0] + "/" + path]) lmax = len(f[groups[0] + "/" + path][0]) - 1 else: dset = np.append( dset, np.zeros(( nsamples_max + 1, 1, nspec, lmax + 1, )), axis=1, ) click.echo( f"Dataset: {path} \n# samples: {nsamples} \n# spectra: {nspec} \nlmax: {lmax}" ) for i in range(nsamples): for j in range(nspec): dset[i + 1, nc - 1, j, :] = np.asarray(f[groups[i] + "/" + path][j][:]) click.echo("") # Optimize with jit? ell = np.arange(lmax + 1) for nc in range(1, nchains + 1): for i in range(1, nsamples_max + 1): for j in range(nspec): dset[i, nc - 1, j, :] = dset[i, nc - 1, j, :] * ell[:] * (ell[:] + 1.0) / 2.0 / np.pi dset[0, :, :, :] = nsamples - burnin if save: click.echo(f"Dumping fits file: {outname}") dset = np.asarray(dset, dtype="f4") from astropy.io import fits head = fits.Header() head["FUNCNAME"] = ("Gibbs sampled power spectra", "Full function name") head["LMAX"] = (lmax, "Maximum multipole moment") head["NUMSAMP"] = (nsamples_max, "Number of samples") head["NUMCHAIN"] = (nchains, "Number of independent chains") head["NUMSPEC"] = (nspec, "Number of power spectra") fits.writeto(outname, dset, head, overwrite=True) # FITSIO Saving Deprecated (Use astropy) if False: import fitsio fits = fitsio.FITS( outname, mode="rw", clobber=True, verbose=True, ) h_dict = [ { "name": "FUNCNAME", "value": "Gibbs sampled power spectra", "comment": "Full function name", }, { "name": "LMAX", "value": lmax, "comment": "Maximum multipole moment", }, { "name": "NUMSAMP", "value": nsamples_max, "comment": "Number of samples", }, { "name": "NUMCHAIN", "value": nchains, "comment": "Number of independent chains", }, { "name": "NUMSPEC", "value": nspec, "comment": "Number of power spectra", }, ] fits.write( dset[:, :, :, :], header=h_dict, clobber=True, ) fits.close() return dset