def makeMasterFlat(images, master_bias): """ Flats are corrected for their bias level (if master_bias) TODO: Finish docstring """ try: fitsfile = 'master_flat.fits' master_flat = CCDData.read(fitsfile, unit=u.adu) return master_flat except FileNotFoundError: # empty list for the flats flat_list = [] # create the master flat field print('Reducing flats') for f in images.files_filtered(imagetyp=FLAT_KEYWORD): print(f) with fits.open(f) as fitsfile: data_exp = fitsfile[0].header[EXPTIME_KEYWORD] ccd = CCDData.read(f, unit=u.adu) if master_bias: ccd = subtract_bias(ccd, master_bias) else: print('No master bias, skipping correction...') flat_list.append(ccd) try: master_flat = combine(flat_list, method='median') master_flat.write('master_flat.fits', clobber=True) return master_flat except IndexError: print('There are no flats, skipping...') master_flat = None
def load_image(filename): try: data = CCDData.read(filename) except(ValueError): try: data = CCDData.read(filename, unit = u.dyn) except: raise return data
def reduce_image(imagefile, dark=None, flat=None): im = CCDData.read(imagefile, unit='adu') if dark is not None: dark = CCDData.read(dark, unit='adu') im = im.subtract(dark) if flat is not None: # masterflat = CCDData.read(flat, unit='adu') hdul = fits.open(flat) masterflat = CCDData(data=hdul[0].data, uncertainty=None, meta=hdul[0].header, unit='adu') im = flat_correct(im, masterflat) return im
def _save_stack(self, stack_arr, stack_name, master_hdr): CCDData.write(stack_arr, os.path.join(self.output_directory, stack_name), hdu_mask=None, hdu_uncertainty=None, overwrite=True) f = fits.open(os.path.join(self.output_directory, stack_name), mode='update') f[0].header = master_hdr f.verify('silentfix') f.flush() self.info('Saving stack {} finished'.format(stack_name))
def test_combine_masked(self): x = np.random.normal(size=(10, 10)) x[5, :] = 0 x = np.ma.masked_where(x == 0, x) ccd1 = CCDData(x, unit='adu') ccd2 = ccd1.copy() ccd3 = ccd1.copy() combiner = Combiner([ccd1, ccd2, ccd3]) combiner.sigma_clipping(low_thresh=2, high_thresh=5) combined_data = combiner.median_combine() np.testing.assert_equal(combined_data.data, ccd1.data)
def combineflat(cal_dir="../Data/20181207/cals", mast_dir=".", filt=[], binning=2): #Generates master flat file from calibration directory #Get master bias and dark files if os.path.isfile(mast_dir + "/master/master_bias.FIT") != True: print "No master bias file" return False if os.path.isfile(mast_dir + "/master/master_dark.FIT") != True: print "No master dark file" return False master_bias = CCDData.read(mast_dir + "/master/master_bias.FIT") master_dark = CCDData.read(mast_dir + "/master/master_dark.FIT") #Generate image list imagelist = gen.getimages(cal_dir, filt=filt) flat_list = [] for img in imagelist: ccd = CCDData.read(cal_dir + '/' + img, unit=u.adu) if ccd.header["IMAGETYP"].strip() == "FLAT": #Rebin images if needed if ccd.header["XBINNING"] > binning: print "ERROR: Binning too low" return False elif ccd.header["XBINNING"] < binning: ccd.data = gen.rebin(ccd.data, oldbin=ccd.header["XBINNING"], newbin=binning) ccd.header["XBINNING"] = binning ccd.header["YBINNING"] = binning #Remove bias and dark effects ccd = ccdproc.subtract_bias(ccd, master_bias) ccd = ccdproc.subtract_dark(ccd, master_dark, \ dark_exposure=master_dark.header["EXPTIME"]*u.s, \ data_exposure=ccd.header["EXPTIME"]*u.s, scale=True) ccd.data = ccd.data/np.median(ccd.data) flat_list.append(ccd) if len(flat_list) == 0: print "ERROR: no flat files" return False #Generate master file master_flat = ccdproc.combine(flat_list, method='median', dtype="float32") master_flat.write(mast_dir + "/master/master_flat_" + ".FIT", \ overwrite=True) ''' print "Created master_flat file for " + filter_name + " filter" ''' return True
def ccdproc_images_filter(list_files, image_filter=None, master_flat=None, master_bias=None, fits_section=None, gain=None, readnoise=None, error=False, sky=True, dout=None, cosmic=False, mbox=15, rbox=15, gbox=11, cleantype="medmask", cosmic_method='lacosmic', sigclip=5, key_filter='filter', dfilter={'imagetyp':'LIGHT'}, mask=None, key_find='find', invert_find=False, **kwargs): if error and (gain is None or readnoise is None): print ('WARNING: You need to provide "gain" and "readnoise" to compute the error!') return if gain is not None and not isinstance(gain, u.Quantity): gain = gain * u.electron / u.adu if readnoise is not None and not isinstance(readnoise, u.Quantity): readnoise = readnoise * u.electron if dfilter is not None and key_filter is not None and image_filter is not None: dfilter = addKeysListDict(dfilter, {key_filter: image_filter}) list_files = getListFiles(list_files, dfilter, mask, key_find=key_find, invert_find=invert_find) dccd = {} for filename in list_files: ccd = CCDData.read(filename, unit= u.adu) nccd = ccdproc.ccd_process(ccd, trim=fits_section, gain=gain, master_bias=master_bias, master_flat=master_flat, readnoise=readnoise, error=error) for key in ccd.header: if not key in nccd.header: nccd.header[key] = ccd.header[key] # Better get rid of the cosmic rays BEFORE subtracting the global sky background if cosmic: nccd = cleanCosmic(nccd, mbox=mbox, rbox=rbox, gbox=gbox, sigclip=sigclip, cleantype=cleantype, cosmic_method=cosmic_method) if sky: nccd = subtract_sky_ccd(nccd, **kwargs) addKeyHdr(nccd.header, 'MBIAS', getFilename(master_bias)) addKeyHdr(nccd.header, 'MFLAT', getFilename(master_flat)) filename = 'c%s' % os.path.basename(filename) dccd[filename] = nccd filename = join_path(filename, dout) nccd.header['FILENAME'] = os.path.basename(filename) nccd.header['CCDVER'] = VERSION nccd.header = ammendHeader(nccd.header) nccd.write(filename, clobber=True) return dccd
def main(): parser = argparse.ArgumentParser(description='Perform LACosmic cleaning of images') parser.add_argument('filenames',nargs='+',help='List of files to clean.') parser.add_argument('-odir',metavar='outdir',required=True,type=str,help='Output directory for files.') #parser.add_argument('-mode',choices=['lacosmic','median'],default='lacosmic',help='Specify mode of operation (default=lacosmic)') parser.add_argument('-sclip',metavar='sigclip',type=float,default=5,help='Laplacian-to-noise limit for cosmic ray detection. Lower values will flag more pixels as cosmic rays (default=5).') parser.add_argument('-sfrac',metavar='sigfrac',type=float,default=0.3,help='Fractional detection limit for neighboring pixels. For cosmic ray neighbor pixels, a Laplacian-to-noise detection limit of sigfrac * sigclip will be used. (default=0.3).') parser.add_argument('-objlim',type=float,default=5,help='Minimum contrast between Laplacian image and the fine structure image. Increase this value if cores of bright stars are flagged as cosmic rays (default=5).') parser.add_argument('-satlevel',type=float,default=65535,help='Saturation level of the image (electrons). This value is used to detect saturated stars and pixels at or above this level are added to the mask (default=65535)') parser.add_argument('-niter',type=int,default=5,help='umber of iterations of the LA Cosmic algorithm to perform (default=5).') #parser.add_argument('-thresh',metavar='threshold',type=float,default=5,help='Threshold for detecting cosmic rays [median] (default=5).') #parser.add_argument('-mbox',type=float,default=11,help='Median box for detecting cosmic rays [mbox] (default=11).') parser.add_argument('-njobs',type=int,default=1,help='Process images in parallel. "-1" is all CPUs (default=1).') parser.add_argument('--c',action='store_true',help='Clobber (overwrite) on output') args = parser.parse_args() ccds = (CCDData.read(fname,unit='adu') for fname in args.filenames) with Parallel(args.njobs,verbose=11) as parallel: cleaned = parallel(delayed(cosmicray_lacosmic)(ccd,sigclip=args.sclip,sigfrac=args.sfrac,niter=args.niter,objlim=args.objlim,satlevel=args.satlevel) for ccd in ccds) outfiles = (os.path.join(args.odir,os.path.basename(fname)) for fname in args.filenames) for hdu,outfile in zip(cleaned,outfiles): if isinstance(hdu,CCDData): hdu = hdu.to_hdu(hdu_mask=None,hdu_uncertainty=None) header = hdu[0].header header.add_history('clean.py - %s' % Time(Time.now(),format='fits')) header['CLEANED'] = (True,'Cleaned with LACosmics') header['CLNMTHD'] = (CLNMTHD,'Method used to clean') try: hdu.writeto(outfile,overwrite=args.c) except OSError as e: raise OSError("File '%s' already exists. Re-run with --c flag to overwrite existing files." % outfile) from e
def setUp(self): self.create = GenerateDcrParFile() self.ccd = CCDData(data=np.ones((100, 100)), meta=fits.Header(), unit='adu') self.ccd.header.set('INSTCONF', value='Red') self.ccd.header.set('CCDSUM', value='1 1')
def setUp(self): self.ccd = CCDData(data=np.ones((800, 2000)), meta=fits.Header(), unit='adu') self.all_keywords = [ 'GSP_TMOD', 'GSP_TORD', 'GSP_TC00', 'GSP_TC01', 'GSP_TC02', 'GSP_TERR' ] self.trace_info = collections.OrderedDict() self.trace_info['GSP_TMOD'] = [ 'Polinomial1D', 'Model name used to fit trace' ] self.trace_info['GSP_TORD'] = [ 2, 'Degree of the model used to fit ' 'target trace' ] self.trace_info['GSP_TC00'] = [500, 'Parameter c0'] self.trace_info['GSP_TC01'] = [1, 'Parameter c1'] self.trace_info['GSP_TC02'] = [2, 'Parameter c2'] self.trace_info['GSP_TERR'] = [0.5, 'RMS error of target trace']
def create_master_bias(list_files, fitsfile=None, fits_section=None, gain=None, method='median', dfilter={'imagetyp':'bias'}, mask=None, key_find='find', invert_find=False, sjoin=','): if gain is not None and not isinstance(gain, u.Quantity): gain = gain * u.electron / u.adu lbias = [] list_files = getListFiles(list_files, dfilter, mask, key_find=key_find, invert_find=invert_find) for filename in list_files: ccd = CCDData.read(filename, unit= u.adu) trimmed = True if fits_section is not None else False ccd = ccdproc.trim_image(ccd, fits_section=fits_section, add_keyword={'trimmed': trimmed}) if gain is not None: ccd = ccdproc.gain_correct(ccd, gain) lbias.append(ccd) combine = ccdproc.combine(lbias, method=method) if gain is not None and not 'GAIN' in combine.header: combine.header.set('GAIN', gain.value, gain.unit) combine.header['CGAIN'] = True if gain is not None else False combine.header['IMAGETYP'] = 'BIAS' combine.header['CMETHOD'] = method combine.header['CCDVER'] = VERSION if sjoin is not None: combine.header['LBIAS'] = sjoin.join([os.path.basename(fits) for fits in list_files]) combine.header['NBIAS'] = len(list_files) if fitsfile is not None: combine.header['FILENAME'] = os.path.basename(fitsfile) combine.write(fitsfile, clobber=True) return combine
def __call__(self, in_file, save=False): self.file = in_file self.fig, self.ax = plt.subplots() # read data and get its wavelength solution ccd = CCDData.read(self.file, unit=u.adu) wcs_reader = ReadWavelengthSolution(header=ccd.header, data=ccd.data) wavelength, intensity = wcs_reader() manager = plt.get_current_fig_manager() manager.window.showMaximized() plt.title('{:s}\n{:s}'.format(self.file, ccd.header['OBJECT'])) self.ax.plot(wavelength, intensity, color='k', label='Data') self.ax.axvline(6562.8, color='r') self.ax.set_xlim((wavelength[0], wavelength[-1])) self.ax.set_ylabel('Intensity (ADU)') self.ax.set_xlabel('Wavelength (Angstrom)') plt.legend(loc='best') plt.subplots_adjust(left=0.05, right=0.99, top=0.96, bottom=0.04, hspace=0.17, wspace=0.11) # plt.tight_layout() if not save: self.fig.canvas.mpl_connect('key_press_event', self.key_pressed) plt.show()
def setUp(self): self.fake_image = CCDData(data=np.ones((100, 100)), meta=fits.Header(), unit='adu') self.fake_image.header.set('NAXIS', value=2) self.fake_image.header.set('NAXIS1', value=100) self.fake_image.header.set('NAXIS2', value=100) self.fake_image.header.set('OBSTYPE', value='COMP') self.fake_image.header['GSP_FNAM'] = 'fake-image.fits' # Create model aligned with pixels - represents the trace self.target_trace = models.Linear1D(slope=0, intercept=50.3) # Calculate the STDDEV self.stddev = 8.4 # Calculate how many STDDEV will be extracted - N_STDDEV self.n_stddev = 2 # Calculate how far the background is from the the center. self.distance = 1 self.target_profile = models.Gaussian1D(amplitude=1, mean=50.3, stddev=self.stddev) self.reference_result = np.ones(100) * self.stddev * self.n_stddev
def __init__(self, filename, ccd_gain, ccd_readnoise, # ccd properties oscan_idx, oscan_size, # overscan region plot_path=None, zscaler=None, cmap=None, # for plotting **read_kwargs): # read CCD frame self.ccd = CCDData.read(filename, **read_kwargs) self.filename = filename self._filename_base = path.splitext(path.basename(self.filename))[0] self._obj_name = self.ccd.header['OBJECT'] # CCD properties self.ccd_gain = ccd_gain self.ccd_readnoise = ccd_readnoise self.oscan_idx = oscan_idx self.oscan_size = oscan_size # Plot settings self.plot_path = plot_path if zscaler is None: zscaler = ZScaleInterval(32768, krej=5., max_iterations=16) self.zscaler = zscaler if cmap is None: cmap = 'Greys_r' self.cmap = cmap
def __call__(self, *args, **kwargs): for file_name in self.file_list: ccd = CCDData.read(file_name, unit='adu') if self.args.style == 'light': plt.style.use('default') elif self.args.style == 'dark': plt.style.use('dark_background') else: plt.style.use('dark_background') fig, ax = plt.subplots(figsize=(16, 9)) fig.canvas.set_window_title(file_name) ax.set_title(file_name) if ccd.header['NAXIS'] == 2: zlow, zhigh = self.scale.get_limits(ccd.data) im = ax.imshow(ccd.data, cmap=self.args.cmap, clim=(zlow, zhigh)) divider = make_axes_locatable(ax) cax = divider.append_axes('right', size="3%", pad=0.05) fig.colorbar(im, cax=cax) elif ccd.header['NAXIS'] == 1: wav, intens = self.wcs.read(ccd=ccd) ax.plot(wav, intens) ax.set_ylabel('Intensity') ax.set_xlabel('Wavelength') plt.tight_layout() plt.show()
def setUp(self): # create a master flat self.master_flat = CCDData(data=np.ones((100, 100)), meta=fits.Header(), unit='adu') self.master_flat.header.set('GRATING', value='RALC_1200-BLUE') self.master_flat.header.set('SLIT', value='0.84" long slit') self.master_flat.header.set('FILTER2', value='<NO FILTER>') self.master_flat.header.set('WAVMODE', value='1200 m2') self.master_flat_name = 'master_flat_1200m2.fits' # expected master flat to be retrieved by get_best_flat self.reference_flat_name = 'master_flat_1200m2_0.84_dome.fits' # location of sample flats self.flat_path = 'goodman_pipeline/data/test_data/master_flat' slit = re.sub('[A-Za-z" ]', '', self.master_flat.header['SLIT']) self.flat_name_base = re.sub('.fits', '_' + slit + '*.fits', self.master_flat_name) # save a master flat with some random structure. self.master_flat_name_norm = 'flat_to_normalize.fits' # add a bias level self.master_flat.data += 300. # add noise self.master_flat.data += np.random.random_sample( self.master_flat.data.shape) self.master_flat.write(os.path.join(self.flat_path, self.master_flat_name_norm), overwrite=False)
def setUp(self): self.fake_image = CCDData(data=np.ones((100, 100)), meta=fits.Header(), unit='adu') self.file_name = 'sample_file.fits' self.target_non_zero = 4 self.current_directory = os.getcwd() self.full_path = os.path.join(self.current_directory, self.file_name) self.parent_file = 'parent_file.fits' self.fake_image.header.set('CCDSUM', value='1 1', comment='Fake values') self.fake_image.header.set('OBSTYPE', value='OBJECT', comment='Fake values') self.fake_image.header.set('GSP_FNAM', value=self.file_name, comment='Fake values') self.fake_image.header.set('GSP_PNAM', value=self.parent_file, comment='Fake values') self.fake_image.write(self.full_path, overwrite=False)
def swarp(hdus, reference_hdu, rate, hdu_idx=None, stacking_mode="MEAN"): """ use the WCS to project all image to the 'reference_hdu' shifting the the CRVAL of each image by rate*dt :param stacking_mode: what process to use for combining images MEAN or MEDIAN :param hdu_idx: which HDU in each HDUList listed in hdus is the ImageData in? :param hdus: list of HDUList :param reference_hdu: reference HDUList in hdus :param rate: dictionary with the ra/dec shift rates. :return: fits.HDUList """ # Project the input images to the same grid using interpolation if stacking_mode not in ['MEDIAN', 'MEAN']: logging.warning( f'{stacking_mode} not available for swarp stack. Setting to MEAN') stacking_mode = 'MEAN' if hdu_idx is None: hdu_idx = HSC_HDU_MAP reference_date = mid_exposure_mjd(reference_hdu[0]) stack_input = [] logging.info(f'stacking at rate/angle set: {rate}') ccd_data = {} for hdu in hdus: wcs_header = hdu[1].header.copy() dt = (mid_exposure_mjd(hdu[0]) - reference_date) if rate is not None: wcs_header['CRVAL1'] += (rate['dra'] * dt) wcs_header['CRVAL2'] += (rate['ddec'] * dt) for layer in hdu_idx: data = hdu[hdu_idx[layer]].data if layer == 'variance': data = VarianceUncertainty(data) elif layer == 'mask': data = bitfield_to_boolean_mask(data, ignore_flags=STACK_MASK, flip_bits=True) ccd_data[layer] = data logging.info(f'Adding {hdu[0]} to projected stack.') stack_input.append( wcs_project( CCDData(ccd_data['image'], mask=ccd_data['mask'], header=wcs_header, wcs=WCS(wcs_header), unit='adu', uncertainty=ccd_data['variance']), WCS(reference_hdu.header))) logging.debug(f'{stack_input[-1].header}') if rate is not None: combiner = Combiner(stack_input) if stacking_mode == 'MEDIAN': stacked_image = combiner.median_combine() else: stacked_image = combiner.average_combine() return fits.HDUList([ fits.PrimaryHDU(header=reference_hdu[0]), fits.ImageHDU(data=stacked_image.data, header=reference_hdu[1].header) ]) else: return stack_input
def photometry(path, basename, db, config, logger): """Master Photometry Program""" imf = os.sep.join((path, basename)) + '.fits' bgf = imf.replace('.fits', '.bg.fits') catf = imf.replace('.fits', '.cat.fits') # Background check row = db.execute( ''' SELECT count() FROM obs WHERE bg IS NOT null AND filename=? ''', [basename]).fetchone() count = row[0] run_bg = any((count == 0, config.reprocess_bg, not os.path.exists(bgf))) # Photometry check run_phot = any((run_bg, not os.path.exists(catf), config.reprocess_phot)) run_cal = any((run_phot, config.reprocess_cal)) if not any((run_bg, run_phot, run_cal)): return logger.info(basename) ccd = border_mask(CCDData.read(imf), config) ccd.mask[np.isnan(ccd.data)] = True if run_bg: background.background(ccd, bgf, db, config, logger) if run_phot: catalog(ccd, bgf, catf, db, config, logger) if run_cal: calibrate(ccd, basename, imf, catf, db, config, logger)
def fits2CCDData(lfits, key_unit='BUNIT', key_file='FILENAME', unit=None, single=False): lccd = [] if not isinstance(lfits, (tuple, list)): lfits = [lfits] for fits in lfits: fits_unit = unit if os.path.exists(fits): hdr = pyfits.getheader(fits) else: print ('>>> WARNING: File "%s" NOT found' % os.path.basename(fits)) continue if key_unit is not None and key_unit in hdr: try: fits_unit = eval('u.%s' % hdr[key_unit]) except: pass if fits_unit is None: if key_unit is not None: sys.exit('>>> Units NOT found in header ("%s") of image "%s". Specify one in "unit" variable' % (key_unit, os.path.basename(fits))) else: print ('>>> WARNING: "key_unit" not specified') ccd = CCDData.read(fits, unit=fits_unit) if key_file is not None and not key_file in ccd.header: ccd.header[key_file] = os.path.basename(fits) lccd.append(ccd) if len(lccd) == 0: print ('>>> WARNING: NO files found!') return if single and len(lccd) == 1: lccd = lccd[0] return lccd
def reduce(filename): if os.path.isfile(filename): ccd = CCDData.read(filename, unit='adu') raw_data = {} raw_data["file_name"] = os.path.basename(filename) raw_data["date"] = ccd.header['DATE'] raw_data["ut_time"] = ccd.header['UT'] raw_data["instrument"] = ccd.header['INSTRUME'] raw_data["camera"] = ccd.header['INSTCONF'] raw_data["object"] = ccd.header['OBJECT'] raw_data["obstype"] = ccd.header['OBSTYPE'] raw_data["ra"] = ccd.header['RA'] raw_data["dec"] = ccd.header['DEC'] raw_data['airmass'] = ccd.header['AIRMASS'] raw_data['seeing'] = ccd.header['SEEING'] raw_data['filter'] = ccd.header['FILTER'] raw_data['filter2'] = ccd.header['FILTER2'] raw_data['grating'] = ccd.header['GRATING'] raw_data['slit'] = ccd.header['SLIT'] raw_data['wavmode'] = ccd.header['WAVMODE'] raw_data['exptime'] = ccd.header['EXPTIME'] json_data = json.dumps(raw_data) post_header = {"Content-type": "application/json"} r = requests.post('http://api:8080/api/files', data=json_data, headers=post_header) print(r.status_code, r.reason) print(json_data) return "Reducing {}".format(filename)
def reduceframes(img_dir="../Data/20181207/imgs", mast_dir=".", mast_cal_dir=False): #Removes effects from bias, dark, and flat master files and makes calibrated images #Get master bias and dark frames - get flat later once have filter_name if not mast_cal_dir: mast_cal_dir = mast_dir if os.path.isfile(mast_cal_dir + "/master/master_bias.FIT") != True: print "No master bias file" return False if os.path.isfile(mast_cal_dir + "/master/master_dark.FIT") != True: print "No master dark file" return False master_bias = CCDData.read(mast_cal_dir + "/master/master_bias.FIT") master_dark = CCDData.read(mast_cal_dir + "/master/master_dark.FIT") # if not os.path.exists(mast_dir + "/frames"): # makedirs(mast_dir + "/frames") #Reduce images raw_image_names = gen.getimages(img_dir, filt=[]) for img in raw_image_names: print img ccd = CCDData.read(img_dir + '/' + img, unit=u.adu) #Mask saturated pixels - not doing any more '''mask_saturated = (ccd.data > 50000) .data = np.array(ccd.data, dtype=np.float32) .data[mask_saturated] = np.nan''' ccd = ccdproc.subtract_bias(ccd, master_bias) ccd = ccdproc.subtract_dark(ccd, master_dark, \ dark_exposure=master_dark.header["EXPTIME"]*u.s, \ data_exposure=ccd.header["EXPTIME"]*u.s, scale=True) mean, background, std = sigma_clipped_stats(ccd.data, sigma=3.0, iters=5) ccd.data = ccd.data - background ccd.data = ccd.data/ccd.header["EXPTIME"] ccd.unit = u.adu/u.s #Add info about background and raw image name to header ccd.header['SKY'] = background ccd.header['RAWFILE'] = img #Save calibrated frame ccd.write(mast_dir + '/frames/' + img[:-4] + '-calibrated.FIT' , overwrite=True) print "Created all calibrated frames in " + mast_dir + '/frames' return True
def setUp(self): self.file_list = [] argument_list = [ '--data-path', os.getcwd(), '--proc-path', os.getcwd(), '--search-pattern', 'cfzsto', '--output-prefix', 'w', '--extraction', 'fractional', '--reference-files', 'data/ref_comp', '--max-targets', '3', ] arguments = get_args(argument_list) self.wc = WavelengthCalibration() self.ccd = CCDData(data=np.random.random_sample(200), meta=fits.Header(), unit='adu') self.ccd = add_wcs_keys(ccd=self.ccd) self.ccd.header.set('SLIT', value='1.0_LONG_SLIT', comment="slit [arcsec]") self.ccd.header.set('GSP_FNAM', value='some_name.fits', comment='Name of the current file') self.ccd.header.set('OBSTYPE', value='SPECTRUM', comment='Obstype') self.ccd.header.set('OBJECT', value='An X Object', comment='Some random object name') self.ccd.header.set('GSP_FLAT', value='some_flat_file.fits', comment='The name of the flat') self.ccd.header.set('CCDSUM', value='1 1', comment='Binning') self.ccd.header.set('WAVMODE', value='400 M1', comment='wavmode') self.lamp = self.ccd.copy() self.lamp.header.set('OBSTYPE', value='COMP', comment='Comparison lamp obstype') self.lamp.header.set('OBJECT', value='HgArNe')
def create_ccd(size=50, scale=1.0, mean=0.0, seed=123): """Create a fake ccd for data testing data processing """ with NumpyRNGContext(seed): data = np.random.normal(loc=mean, size=[size, size], scale=scale) ccd = CCDData(data, unit=u.adu) return ccd
def test_fit_linear(self): test_file = os.path.join(self.data_path, 'goodman_comp_400M1_HgArNe.fits') ccd = CCDData.read(test_file, unit='adu') pixel, angstrom = self._recover_lines(ccd=ccd) model = self.wcs.fit(physical=pixel, wavelength=angstrom, model_name='linear') self.assertIsInstance(model, Model)
def setUp(self): self.sm = SpectroscopicMode() self.ccd = CCDData(data=np.ones((800, 2000)), meta=fits.Header(), unit='adu') self.ccd.header.set('GRATING', value='SYZY_400') self.ccd.header.set('CAM_TARG', value='16.1') self.ccd.header.set('GRT_TARG', value='7.5') self.ccd.header.set('FILTER2', value='GG455')
def test_data_classifier_mixed_instconf(self): sample_file = os.listdir(self.raw_path)[0] raw_path_full = os.path.join(self.raw_path, sample_file) recovered_ccd = CCDData.read(raw_path_full, unit='adu') recovered_ccd.header['INSTCONF'] = 'Blue' # recovered_ccd.header['WAVMODE'] = 'Imaging' recovered_ccd.write(raw_path_full, overwrite=True) with self.assertRaises(SystemExit): self.data_classifier(raw_path=self.raw_path)
def test_read__non_linear_legendre(self): test_file = os.path.join(self.data_path, 'non-linear_fits_solution_legendre.fits') self.assertTrue(os.path.isfile(test_file)) ccd = CCDData.read(test_file, unit='adu') result = self.wcs.read(ccd=ccd) self.assertIsInstance(self.wcs.model, Model) self.assertEqual(self.wcs.model.__class__.__name__, 'Legendre1D')
def test_read__non_linear_cspline(self): test_file = os.path.join(self.data_path, 'non-linear_fits_solution_cubic-spline.fits') self.assertTrue(os.path.isfile(test_file)) ccd = CCDData.read(test_file, unit='adu') self.assertRaises(NotImplementedError, self.wcs.read, ccd) self.assertRaisesRegex(NotImplementedError, 'Cubic spline is not implemented', self.wcs.read, ccd)
def test_read__invalid(self): test_file = os.path.join(self.data_path, 'linear_fits_solution.fits') self.assertTrue(os.path.isfile(test_file)) ccd = CCDData.read(test_file, unit='adu') ccd.wcs.wcs.ctype[0] = 'INVALID' self.assertRaisesRegex(NotImplementedError, 'CTYPE INVALID is not recognized', self.wcs.read, ccd) self.assertRaises(NotImplementedError, self.wcs.read, ccd)
def test_read__linear(self): test_file = os.path.join(self.data_path, 'linear_fits_solution.fits') self.assertTrue(os.path.isfile(test_file)) ccd = CCDData.read(test_file, unit='adu') result = self.wcs.read(ccd=ccd) self.assertIsInstance(result, list) self.assertEqual(len(result), 2) self.assertIsInstance(self.wcs.get_model(), Model)
def test_read_gsp_wcs(self): test_file = os.path.join(self.data_path, 'goodman_comp_400M1_HgArNe.fits') self.assertTrue(os.path.isfile(test_file)) ccd = CCDData.read(test_file, unit='adu') result = self.wcs.read_gsp_wcs(ccd=ccd) self.assertIsInstance(result, list) self.assertEqual(len(result), 2) self.assertIsInstance(self.wcs.get_model(), Model)
def setUp(self): self.ccd = CCDData(data=np.ones((100, 100)), meta=fits.Header(), unit='adu') self.ccd.header.set('INSTCONF', value='Red') self.ccd.header.set('GAIN', value=1.48) self.ccd.header.set('RDNOISE', value=3.89) self.half_full_well = 69257 self.saturation_values = SaturationValues(ccd=self.ccd)
def create_plot(mode): data_path = os.path.dirname(__import__('goodman_lamps').__file__) search_pattern = os.path.join(data_path, 'data/lamps/*{:s}*.fits'.format(mode)) print(data_path) print(search_pattern) file_list = glob.glob(search_pattern) for _file in file_list: print(_file) for file_name in file_list: fig, ax = plt.subplots(figsize=(20, 7)) ccd = CCDData.read(file_name, unit='adu') line_list = LineList() line_list.import_from_file(ccd=ccd) wavelength, intensity = goodman_wcs.read_gsp_wcs(ccd=ccd) top_lim = 1.4 * ccd.data.max() bottom_lim = ccd.data.min() - 0.05 * ccd.data.max() plt.ylim((bottom_lim, top_lim)) plt.xlim((wavelength[0], wavelength[-1])) ax.set_title('{:s} - {:s}'.format(ccd.header['object'], ccd.header['wavmode'])) ax.set_xlabel('Wavelength (Angstrom)') ax.set_ylabel('Intensity (ADU)') ax.plot(wavelength, intensity) for pixel, wavelength, spectrum in line_list.lines: text = '{:.4f} - {:s}'.format(wavelength, spectrum) print(text) plt.axvline(wavelength, alpha=0.1, color='k') text_x = wavelength text_y = np.max((ccd.data[int(np.floor(pixel))], ccd.data[int(np.ceil(pixel))])) y_offset = 0.05 * ccd.data.max() plt.text(text_x, text_y + y_offset, text, rotation=90, verticalalignment='bottom', horizontalalignment='center') plt.tight_layout() plt.show() print('END. {}'.format(mode))
def test_write_gsp_wcs(self): test_file = os.path.join(self.data_path, 'goodman_comp_400M1_HgArNe.fits') ccd = CCDData.read(test_file, unit='adu') pixel, angstrom = self._recover_lines(ccd=ccd) model = self.wcs.fit(physical=pixel, wavelength=angstrom) self.assertIsInstance(model, Model) blank_ccd = CCDData(data=np.ones(ccd.data.shape), meta=fits.Header(), unit='adu') blank_ccd.header.set('GSP_WREJ', value=None, comment='empty') new_ccd = self.wcs.write_gsp_wcs(ccd=blank_ccd, model=model) self.assertEqual(new_ccd.header['GSP_FUNC'], ccd.header['GSP_FUNC']) self.assertEqual(new_ccd.header['GSP_ORDR'], ccd.header['GSP_ORDR']) self.assertEqual(new_ccd.header['GSP_NPIX'], ccd.header['GSP_NPIX']) for i in range(model.degree + 1): self.assertAlmostEqual(new_ccd.header['GSP_C{:03d}'.format(i)], ccd.header['GSP_C{:03d}'.format(i)])
def test_fit_invalid(self): test_file = os.path.join(self.data_path, 'goodman_comp_400M1_HgArNe.fits') ccd = CCDData.read(test_file, unit='adu') pixel, angstrom = self._recover_lines(ccd=ccd) self.assertRaisesRegex(NotImplementedError, 'The model invalid is not implemented', self.wcs.fit, pixel, angstrom, 'invalid') self.assertRaises(NotImplementedError, self.wcs.fit, pixel, angstrom, 'invalid')
def test_fit_chebyshev(self): test_file = os.path.join(self.data_path, 'goodman_comp_400M1_HgArNe.fits') ccd = CCDData.read(test_file, unit='adu') pixel, angstrom = self._recover_lines(ccd=ccd) model = self.wcs.fit(physical=pixel, wavelength=angstrom) self.assertIsInstance(model, Model) self.assertEqual(model.__class__.__name__, ccd.header['GSP_FUNC']) self.assertEqual(model.degree, ccd.header['GSP_ORDR']) for i in range(model.degree + 1): self.assertAlmostEqual(model.__getattr__('c{:d}'.format(i)).value, ccd.header['GSP_C{:03d}'.format(i)])
def makeMasterBias(images): """ Make a master bias using all biases found in images object TODO: Finish docstring """ try: master_bias = CCDData.read('master_bias.fits', unit=u.adu) return master_bias except FileNotFoundError: bias_list = [] for f in images.files_filtered(imagetyp=BIAS_KEYWORD): print(f) ccd = CCDData.read(f, unit=u.adu) bias_list.append(ccd) try: master_bias = combine(bias_list, method='median') master_bias.write('master_bias.fits', clobber=True) return master_bias except IndexError: return None
def create_master_flat(filepath='../../../KeckData/MOSFIRE_FCS/', flatfiles = ['m180130_0320.fits', 'm180130_0321.fits', 'm180130_0322.fits', 'm180130_0323.fits', 'm180130_0324.fits',], darkfile = 'm180130_0001.fits', ): dark = CCDData.read(os.path.join(filepath, darkfile), unit='adu') flats = [] for i,file in enumerate(flatfiles): flat = CCDData.read(os.path.join(filepath, file), unit='adu') flat = flat.subtract(dark) flats.append(flat) flat_combiner = Combiner(flats) flat_combiner.sigma_clipping() scaling_func = lambda arr: 1/np.ma.average(arr) flat_combiner.scaling = scaling_func masterflat = flat_combiner.median_combine() masterflat.write('masterflat.fits', overwrite=True)
def __call__(self): """Run the tool for all the images matching `search_pattern`""" for fits_file in self.file_list: print(fits_file) self.ccd = CCDData.read(fits_file, unit=u.adu) if not self.threads: # plot_thread = Thread(target=self._create_plot) # plot_thread.start() id_thread = Thread(target=self.identify_matching_line) id_thread.start() # self.identify_matching_line() # self.threads. self._create_plot() id_thread.join() self.ccd.write(fits_file, overwrite=True)
def __init__(self, reference_dir): self.cleaned_list = [] reference_data = ReferenceData(reference_dir) file_list = glob.glob(os.path.join(reference_dir, '*fits')) for lfile in file_list: self.fig, self.ax = plt.subplots() ccd = CCDData.read(lfile, unit=u.adu) read_wavelength = ReadWavelengthSolution(ccd.header, ccd.data) wavelength, intensity = read_wavelength() self.line_list = reference_data.get_line_list_by_name( ccd.header['OBJECT']) file_name = lfile.split('/')[-1] pickle_file_name = re.sub('.fits', '_list.pkl', file_name) manager = plt.get_current_fig_manager() manager.window.showMaximized() if os.path.isfile(pickle_file_name): with open(pickle_file_name, 'rb') as pickled_file: line_list = pickle.load(pickled_file) for ref_line in line_list: self.ax.axvline(ref_line, color='r', alpha=1) else: for ref_line in self.line_list: self.ax.axvline(ref_line, color='r', alpha=.4) self.ax.set_title(file_name) self.ax.plot(wavelength, intensity, color='k') self.ax.set_xlabel('Wavelength (Angstrom)') self.ax.set_ylabel('Intensity (ADU)') self.ax.set_xlim((wavelength[0], wavelength[-1])) self.fig.canvas.mpl_connect('button_press_event', self.on_click) plt.show() if self.cleaned_list != []: with open(pickle_file_name, 'wb') as list_file: pickle.dump(self.cleaned_list, list_file, protocol=pickle.HIGHEST_PROTOCOL)
def setUp(self): argument_list = ['--data-path', os.getcwd(), '--proc-path', os.getcwd(), '--search-pattern', 'cfzsto', '--output-prefix', 'w', '--extraction', 'fractional', '--reference-files', 'data/ref_comp', '--max-targets', '3', ] arguments = get_args(argument_list) self.wc = WavelengthCalibration(args=arguments) self.ccd = CCDData(data=np.random.random_sample(200), meta=fits.Header(), unit='adu') self.ccd = add_wcs_keys(ccd=self.ccd) self.ccd.header.set('SLIT', value='1.0" long slit', comment="slit [arcsec]")
def dq_ccd_insert(filename, sdb): """Insert CCD information into the database Parameters ---------- filename: str Raw file name sdb: sdb_user.mysql Connection to the sdb database """ logic="FileName='%s'" % os.path.basename(filename) FileData_Id = sdb.select('FileData_Id','FileData',logic)[0][0] i = 0 record=sdb.select('FileData_Id', 'PipelineDataQuality_CCD', 'FileData_Id=%i and Extension=%i' % (FileData_Id, i)) update=False if record: update=True #lets measureme the statistics in a 200x200 box in each image struct = CCDData.read(filename, unit='adu') my,mx=struct.data.shape dx1=int(mx*0.5) dx2=min(mx,dx1+200) dy1=int(my*0.5) dy2=min(my,dy1+200) mean,med,sig=stats.sigma_clipped_stats(struct.data[dy1:dy2,dx1:dx2], sigma=5, iters=5) omean=None orms=None ins_cmd='' if omean is not None: ins_cmd='OverscanMean=%s,' % omean if orms is not None: ins_cmd+='OverscanRms=%s,' % orms if mean is not None: ins_cmd+='BkgdMean=%f,' % mean if sig is not None: ins_cmd+='BkgdRms=%f' % sig if update: ins_cmd=ins_cmd.rstrip(',') sdb.update(ins_cmd, 'PipelineDataQuality_CCD', 'FileData_Id=%i and Extension=%i' % (FileData_Id, i)) else: ins_cmd+=',FileData_Id=%i, Extension=%i' % (FileData_Id, i) sdb.insert(ins_cmd, 'PipelineDataQuality_CCD')
def aller_test(image_list): wcs = WCS() assert isinstance(wcs, WCS) all_data = [] all_dates = [] for file_name in image_list: ccd = CCDData.read(file_name, unit=u.adu) print("Master Flat used: {:s}".format(ccd.header['GSP_FLAT'])) print("{:s} : {:s}".format(ccd.header['DATE-OBS'], file_name)) # print(ccd.header["GSP_*"]) wav, intens = wcs.read(ccd=ccd) all_data.append([wav, normalize_data(intens), ccd.header]) all_dates.append(ccd.header['DATE']) plt.title(file_name) plt.plot(wav, normalize_data(intens)) plt.xlabel("Wavelength (Angstrom)") plt.ylabel("Intensity (ADU)") plt.show() fig, (ax1, ax2) = plt.subplots(1, 2, sharey=True) plt.title("Normalized Spectrum") ax1.axvline(4822.3787, label='4822.3787A', color='k', linestyle='--', alpha=0.7) ax2.axvline(4955.91, label='4955.91A', color='k', linestyle='--', alpha=0.7) for i in range(len(all_data)): ax1.plot(all_data[i][0], all_data[i][1], label=all_dates[i]) ax1.set_xlim(4817, 4826) # ax1.axvline(4822.3787, label='4822.3787A', color='k', linestyle='--') ax2.plot(all_data[i][0], all_data[i][1], label=all_dates[i]) ax2.set_xlim(4951, 4958) ax1.set_xlabel("Wavelength (Angstrom)") ax1.set_ylabel("Intensity (ADU)") ax1.legend(loc='best') plt.legend(loc='best') plt.show() return all_data
def wave2Dfit(ccd, order_frame, soldir, outfile): l_xarr,l_warr,l_oarr=read_arclines(ccd, order_frame, soldir) xarr,farr,oarr=read_fits(ccd, order_frame, soldir) fitting_app=QtGui.QApplication(sys.argv) fitting_window=FitWavelengthWindow(l_xarr,l_warr,l_oarr,xarr,farr,oarr, outfile) fitting_window.raise_() fitting_app.exec_() fitting_app.deleteLater() return True if __name__=='__main__': parser = argparse.ArgumentParser() parser.add_argument("spectrum_fits",help="Fits file with an extracted HRS spectrum",type=str) parser.add_argument("order_frame",help="HRS order frame",type=str) parser.add_argument("calibration_folder",help="Path to the lr/mr/hr calibration folder",type=str) args=parser.parse_args() ccd = CCDData.read(args.spectrum_fits) order_frame = CCDData.read(args.order_frame, unit=u.adu) soldir = args.calibration_folder outfile = (args.spectrum_fits).replace('.fits', '_spec.fits') wave2Dfit(ccd, order_frame, soldir, outfile)
class WavelengthCalibrationTests(TestCase): def setUp(self): argument_list = ['--data-path', os.getcwd(), '--proc-path', os.getcwd(), '--search-pattern', 'cfzsto', '--output-prefix', 'w', '--extraction', 'fractional', '--reference-files', 'data/ref_comp', '--max-targets', '3', ] arguments = get_args(argument_list) self.wc = WavelengthCalibration(args=arguments) self.ccd = CCDData(data=np.random.random_sample(200), meta=fits.Header(), unit='adu') self.ccd = add_wcs_keys(ccd=self.ccd) self.ccd.header.set('SLIT', value='1.0" long slit', comment="slit [arcsec]") def test_add_wavelength_solution(self): self.wc.rms_error = 0.1 self.wc.n_points = 100 self.wc.n_rejections = 2 self.wc.calibration_lamp = 'non-existent.fits' crval1 = 3977.948 npix = 4060 cdelt = 0.9910068 x_axis = np.linspace(crval1, crval1 + cdelt * npix, npix) self.ccd = self.wc.add_wavelength_solution(ccd=self.ccd, x_axis=x_axis) self.assertEqual(self.ccd.header['CTYPE1'], 'LINEAR') self.assertEqual(self.ccd.header['CRVAL1'], crval1) self.assertEqual(self.ccd.header['CRPIX1'], 1) self.assertAlmostEqual(self.ccd.header['CDELT1'], cdelt, places=3) self.assertEqual(self.ccd.header['DCLOG1'], 'REFSPEC1 = {:s}'.format(self.wc.calibration_lamp)) self.assertEqual(self.ccd.header['GSP_WRMS'], self.wc.rms_error) self.assertEqual(self.ccd.header['GSP_WPOI'], self.wc.n_points) self.assertEqual(self.ccd.header['GSP_WREJ'], self.wc.n_rejections) @skip def test_automatic_wavelength_solution(self): pass def test__bin_reference_data(self): wavelength = np.linspace(3000, 7000, 4000) intensity = np.random.random_sample(4000) for i in range(1, 4): self.wc.serial_binning = i new_wavelength, new_intensity = self.wc._bin_reference_data( wavelength=wavelength, intensity=intensity) self.assertEqual(len(wavelength), len(intensity)) self.assertEqual(len(new_wavelength), len(new_intensity)) self.assertEqual(len(new_wavelength), np.floor(len(wavelength) / i)) @skip def test__cross_correlation(self): self.wc.lamp = self.ccd.copy() self.wc.serial_binning = 1 x_axis = np.arange(0, 4060, 1) reference = np.zeros(4060) gaussian = models.Gaussian1D(stddev=2) for i in sorted(np.random.choice(x_axis, 30)): gaussian.mean.value = i reference += gaussian(x_axis) offset = np.random.choice(range(1, 15), 1)[0] for slit in [1, 2, 3, 4, 5]: new_array = np.append(reference[offset:], np.zeros(offset)) if slit > 3: box_kernel = Box1DKernel(width=slit / 0.15) new_array = convolve(new_array, box_kernel) self.assertEqual(len(reference), len(new_array)) self.wc.lamp.header['SLIT'] = '{:d}.0" long slit'.format(slit) correlation_value = self.wc._cross_correlation(reference=reference, new_array=new_array) self.assertEqual(correlation_value, offset) def test__evaluate_solution(self): differences = np.array([0.5] * 10) clipped_differences = np.ma.masked_array(differences, mask=[0, 0, 1, 0, 0, 1, 0, 0, 1, 0]) rms_error, n_points, n_rej = self.wc._evaluate_solution( clipped_differences=clipped_differences) self.assertEqual(rms_error, 0.5) self.assertEqual(n_points, 10) self.assertEqual(n_rej, 3) @skip def test__get_lines_in_lamp(self): pass def test__get_spectral_characteristics(self): self.ccd.header.set('GRATING', 'SYZY_400') self.ccd.header.set('GRT_ANG', 7.5) self.ccd.header.set('CAM_ANG', 16.1) self.ccd.header.set('CCDSUM', '1 1') self.wc.lamp = self.ccd.copy() spec_charact = self.wc._get_spectral_characteristics() self.assertIsInstance(spec_charact, dict) self.assertEqual(len(spec_charact), 7) def test_get_wsolution(self): self.assertIsNone(self.wc.get_wsolution()) self.wc.wsolution = models.Chebyshev1D(degree=2) self.wc.wsolution.c0.value = 3977.9485 self.wc.wsolution.c1.value = 1.00153387 self.wc.wsolution.c2.value = -1.2891437 self.assertIsInstance(self.wc.get_wsolution(), Model)
def correctData(filename, master_bias, master_flat, filetype): """ Correct a science image using the available master calibrations. Skip a calibration step if the master frame does not exist. No reduced file is written in this new scheme. Instead, the corrected data is passed directly to the phot() routine, photometry is done as per the configuration and the photometry is written out only. TODO: Finish docstring """ print('Reducing {0:s}...'.format(filename)) with fits.open(filename) as fitsfile: # correct times for science spectra, # don't bother for arcs hdr = fitsfile[0].header if filetype == 'science': half_exptime = hdr[EXPTIME_KEYWORD]/2. utstart = hdr[UTSTART_KEYWORD] dateobs = hdr[DATEOBS_KEYWORD] ra = hdr[RA_KEYWORD] dec = hdr[DEC_KEYWORD] time_start = Time('{}T{}'.format(dateobs, utstart), scale='utc', format='isot', location=OBSERVATORY) # correct to mid exposure time jd_mid = time_start + half_exptime*u.second ltt_bary, ltt_helio = getLightTravelTimes(ra, dec, jd_mid) time_bary = jd_mid.tdb + ltt_bary time_helio = jd_mid.utc + ltt_helio hdr['BJD-MID'] = time_bary.jd hdr['HJD-MID'] = time_helio.jd hdr['JD-MID'] = jd_mid.jd hdr['UT-MID'] = jd_mid.isot ccd = CCDData.read(filename, unit=u.adu) if master_bias: ccd = subtract_bias(ccd, master_bias) else: print('No master bias, skipping correction...') if master_flat: ccd = flat_correct(ccd, master_flat) else: print('No master flat, skipping correction...') # after calibrating we get np.float64 data # if there are no calibrations we maintain dtype = np.uint16 # sep weeps # fix this by doing the following if isinstance(ccd.data[0][0], np.uint16): ccd.data = ccd.data.astype(np.float64) # trim the data ccd_trimmed = trim_image(ccd[1000:3001, :]) # write out the trimmed file and the updated header #ccd_trimmed.write(filename, hdr, clobber=True) trimmed_filename = '{}_t.fits'.format(filename.split('.')[0]) fits.writeto(trimmed_filename, ccd_trimmed.data, hdr) # remove the old untrimmed data os.system('rm {}'.format(filename))
class MasterFlatTest(TestCase): def setUp(self): # create a master flat self.master_flat = CCDData(data=np.ones((100, 100)), meta=fits.Header(), unit='adu') self.master_flat.header.set('GRATING', value='RALC_1200-BLUE') self.master_flat.header.set('SLIT', value='0.84" long slit') self.master_flat.header.set('FILTER2', value='<NO FILTER>') self.master_flat.header.set('WAVMODE', value='1200 m2') self.master_flat_name = 'master_flat_1200m2.fits' # expected master flat to be retrieved by get_best_flat self.reference_flat_name = 'master_flat_1200m2_0.84_dome.fits' # location of sample flats self.flat_path = 'goodman_pipeline/data/test_data/master_flat' slit = re.sub('[A-Za-z" ]', '', self.master_flat.header['SLIT']) self.flat_name_base = re.sub('.fits', '_' + slit + '*.fits', self.master_flat_name) # save a master flat with some random structure. self.master_flat_name_norm = 'flat_to_normalize.fits' # add a bias level self.master_flat.data += 300. # add noise self.master_flat.data += np.random.random_sample( self.master_flat.data.shape) self.master_flat.write(os.path.join(self.flat_path, self.master_flat_name_norm), overwrite=False) def tearDown(self): full_path = os.path.join(self.flat_path, self.master_flat_name_norm) self.assertTrue(os.path.isfile(full_path)) if os.path.isfile(full_path): os.unlink(full_path) self.assertFalse(os.path.isfile(full_path)) # remove normalized flat norm_flat = re.sub('flat_to_', 'norm_flat_to_', full_path) if os.path.isfile(norm_flat): os.unlink(norm_flat) self.assertFalse(os.path.isfile(norm_flat)) def test_get_best_flat(self): # print(self.flat_name_base) master_flat, master_flat_name = get_best_flat( flat_name=self.flat_name_base, path=self.flat_path) self.assertIsInstance(master_flat, CCDData) self.assertEqual(os.path.basename(master_flat_name), self.reference_flat_name) def test_get_best_flat_fail(self): # Introduce an error that will never produce a result. wrong_flat_name = re.sub('1200m2', '1300m2', self.flat_name_base) master_flat, master_flat_name = get_best_flat( flat_name=wrong_flat_name, path=self.flat_path) self.assertIsNone(master_flat) self.assertIsNone(master_flat_name) def test_normalize_master_flat(self): methods = ['mean', 'simple', 'full'] for method in methods: self.assertNotAlmostEqual(self.master_flat.data.mean(), 1.) normalized_flat, normalized_flat_name = normalize_master_flat( master=self.master_flat, name=os.path.join(self.flat_path, self.master_flat_name_norm), method=method) self.assertAlmostEqual(normalized_flat.data.mean(), 1., delta=0.001) self.assertEqual(normalized_flat.header['GSP_NORM'], method) self.assertIn('norm_', normalized_flat_name)
class FitsFileIOAndOps(TestCase): def setUp(self): self.fake_image = CCDData(data=np.ones((100, 100)), meta=fits.Header(), unit='adu') self.file_name = 'sample_file.fits' self.target_non_zero = 4 self.current_directory = os.getcwd() self.full_path = os.path.join(self.current_directory, self.file_name) self.parent_file = 'parent_file.fits' self.fake_image.header.set('CCDSUM', value='1 1', comment='Fake values') self.fake_image.header.set('OBSTYPE', value='OBJECT', comment='Fake values') self.fake_image.header.set('GSP_FNAM', value=self.file_name, comment='Fake values') self.fake_image.header.set('GSP_PNAM', value=self.parent_file, comment='Fake values') self.fake_image.write(self.full_path, overwrite=False) def test_write_fits(self): self.assertTrue(os.path.isfile(self.full_path)) os.remove(self.full_path) write_fits(ccd=self.fake_image, full_path=self.full_path, parent_file=self.parent_file, overwrite=False) self.assertTrue(os.path.isfile(self.full_path)) def test_read_fits(self): self.recovered_fake_image = read_fits(self.full_path) self.assertIsInstance(self.recovered_fake_image, CCDData) def test_image_overscan(self): data_value = 100. overscan_value = 0.1 # alter overscan region to a lower number self.fake_image.data *= data_value self.fake_image.data[:, 0:5] = overscan_value overscan_region = '[1:6,:]' self.assertEqual(self.fake_image.data[:, 6:99].mean(), data_value) self.assertEqual(self.fake_image.data[:, 0:5].mean(), overscan_value) self.fake_image = image_overscan(ccd=self.fake_image, overscan_region=overscan_region) self.assertEqual(self.fake_image.data[:, 6:99].mean(), data_value - overscan_value) self.assertEqual(self.fake_image.header['GSP_OVER'], overscan_region) def test_image_overscan_none(self): new_fake_image = image_overscan(ccd=self.fake_image, overscan_region=None) self.assertEqual(new_fake_image, self.fake_image) def test_image_trim(self): self.assertEqual(self.fake_image.data.shape, (100, 100)) trim_section = '[1:50,:]' self.fake_image = image_trim(ccd=self.fake_image, trim_section=trim_section, trim_type='trimsec') self.assertEqual(self.fake_image.data.shape, (100, 50)) self.assertEqual(self.fake_image.header['GSP_TRIM'], trim_section) def test_save_extracted_target_zero(self): self.fake_image.header.set('GSP_FNAM', value=self.file_name) same_fake_image = save_extracted(ccd=self.fake_image, destination=self.current_directory, prefix='e', target_number=0) self.assertEqual(same_fake_image, self.fake_image) self.assertTrue(os.path.isfile('e' + self.file_name)) def test_save_extracted_target_non_zero(self): self.fake_image.header.set('GSP_FNAM', value=self.file_name) same_fake_image = save_extracted(ccd=self.fake_image, destination=self.current_directory, prefix='e', target_number=self.target_non_zero) self.assertEqual(same_fake_image, self.fake_image) self.assertTrue(os.path.isfile('e' + re.sub('.fits', '_target_{:d}.fits'.format( self.target_non_zero), self.file_name))) def test_save_extracted_target_zero_comp(self): self.fake_image.header.set('GSP_FNAM', value=self.file_name) self.fake_image.header.set('OBSTYPE', value='COMP') self.fake_image.header.set('GSP_EXTR', value='100.00:101.00') same_fake_image = save_extracted(ccd=self.fake_image, destination=self.current_directory, prefix='e', target_number=0) self.assertEqual(same_fake_image, self.fake_image) self.assertTrue(os.path.isfile(self.fake_image.header['GSP_FNAM'])) def tearDown(self): files_to_remove = [self.full_path, self.fake_image.header['GSP_FNAM']] for _file in files_to_remove: if os.path.isfile(_file): os.unlink(_file)
def sdiff( afile, bfile, yc=roi['yc'], dy=roi['dy'], bg1=roi['bg1'], bg2=roi['bg2'], headertxt='5500', wc=5500, dw=-0.7, xsum=1, save=None, plot=True, ): # print '--%s %s--' % (afile, bfile) accd = CCDData.read(afile) bccd = CCDData.read(bfile) y1 = yc - dy y2 = yc + dy bg1 = bg1 bg2 = bg2 xbin = xsum # extract signal aspec = (accd.data[y1:y2,:] - np.median(accd.data[bg1:bg2,:], axis=0)).sum(axis=0) bspec = (bccd.data[y1:y2,:] - np.median(bccd.data[bg1:bg2,:], axis=0)).sum(axis=0) rspec = bspec/aspec # apply spectral binning abin = aspec.reshape(-1,xbin) bbin = bspec.reshape(-1,xbin) rbin = rspec.reshape(-1,xbin) # estimate variance from counts per bin # sigma-clip data in each bin for variance calc #v = np.apply_along_axis(st.sigmaclip, 1, abin, low=3.0, high=3.0) #avar = np.array([np.var(x) for x in v[:,0]]) #v = np.apply_along_axis(st.sigmaclip, 1, bbin, low=3.0, high=3.0) #bvar = np.array([np.var(x) for x in v[:,0]]) # estimate variance from ratio per bin c = np.apply_along_axis(st.sigmaclip, 1, rbin, low=3.0, high=3.0) rvar = 2.0/xbin*np.array([np.var(x) for x in c[:,0]]) # perform sigma clipping on bins c = np.apply_along_axis(st.sigmaclip, 1, abin, low=3.0, high=3.0) aspec = np.array([np.mean(x) for x in c[:,0]]) c = np.apply_along_axis(st.sigmaclip, 1, bbin, low=3.0, high=3.0) bspec = np.array([np.mean(x) for x in c[:,0]]) #aspec = abin.mean(axis=1) #bspec = bbin.mean(axis=1) rspec = bspec/aspec # extract variance # accdv = np.nan_to_num(accd.uncertainty.array) # bccdv = np.nan_to_num(bccd.uncertainty.array) # avar = accdv[y1:y2,:].sum(axis=0) # bvar = bccdv[y1:y2,:].sum(axis=0) # avar = avar.reshape(-1,xbin).mean(axis=1) # bvar = bvar.reshape(-1,xbin).mean(axis=1) xarr = np.arange(len(aspec)) warr = (wc + xbin/2) + dw*xbin*xarr print 'ave = %f %%' % (rspec.mean()*100.0) print 'stdev = %f %%' % (rspec.std()*100.0) print 'range = %f %%' % ((rspec.max() - rspec.min())*100.0) #print 'S/N = %f / %f = %f' % (aspec.mean(), avar.mean(), aspec.mean()/avar.mean()) if save: oarr = np.array([warr, aspec, bspec, rspec, rvar]).T oarr = oarr[oarr[:,0].argsort()] hdrtxt = "" # "\n%s\t%s\t%s\nwavelength [A]\trefspec [counts]\tcompspec [counts]\n" % (headertxt, afile, bfile) np.savetxt(save, oarr, fmt="%10e", delimiter="\t", header=hdrtxt) if plot: pl.figure() pl.subplot(311) pl.plot(warr, aspec) pl.plot(warr, bspec) pl.ylabel('Counts', size='x-large') pl.subplot(312) pl.plot(warr, bspec-aspec) pl.ylabel('Diff', size='x-large') pl.subplot(313) pl.plot(warr, rspec*100.0) pl.ylabel('Deviation (%)', size='x-large') pl.xlabel('Wavelength', size='x-large') pl.show()
return shift_dict, iws if __name__=='__main__': import argparse parser = argparse.ArgumentParser(description='Re-identify SALT HRS arc observations') parser.add_argument('infile', help='SALT HRS image') parser.add_argument('n_order', help='Order for the identification') parser.add_argument('--o', dest='order', help='Master order file') parser.add_argument('--sol', dest='soldir', help='Directory containing the solutions', default='./') parser.add_argument('--t', dest='target', help='Force extraction of "upper or "lower" fiber', default=None) args = parser.parse_args() arc = CCDData.read(args.infile) order_frame = CCDData.read(args.order, unit='electron') n_order = int(args.n_order) soldir = args.soldir shift_dict, ws = pickle.load(open(soldir+'sol_%i.pkl' % n_order)) camera_name = arc.header['DETNAM'].lower() arm, xpos, target, res, w_c, y1, y2 = mode_setup_information(arc.header) if args.target: target = args.target print arm, xpos, target target = 'upper' dc_dict, iws = identify(arc, order_frame, n_order, camera_name, xpos, ws=ws, target=target, interp=True, w_c=w_c,