def run(self): """ Runs the data reduction algorithm. The self.datain is run through the code, the result is in self.dataout. """ self.dataout = DataFits(config=self.config) self.dataout = self.datain.copy() self.astrometrymaster() # Update RA/Dec from astrometry self.dataout.header.update(self.wcs_out) try: w = wcs.WCS(self.dataout.header) n1 = float( self.dataout.header['NAXIS1']/2 ) n2 = float( self.dataout.header['NAXIS2']/2 ) ra, dec = w.all_pix2world(n1, n2, 1) # No update because update may affect accuracy of WCS solution # self.dataout.header['CRPIX1']=n1 # self.dataout.header['CRPIX2']=n2 # self.dataout.header['CRVAL1']=float(ra) # self.dataout.header['CRVAL2']=float(dec) self.dataout.header['RA'] = Angle(ra, u.deg).to_string(unit=u.hour, sep=':') self.dataout.header['Dec'] = Angle(dec, u.deg).to_string(sep=':') except: self.log.error('Run: Could not update RA/Dec from Astrometry') else: self.log.debug('Run: Updated RA/Dec from Astrometry') self.log.debug('Run: Done')
def run(self): """ Runs the combining algorithm. The self.datain is run through the code, the result is in self.dataout. """ # Find master bias to subtract from master dark biaslist = self.loadauxname('bias', multi=False) if (len(biaslist) == 0): self.log.error('No bias calibration frames found.') self.bias = ccdproc.CCDData.read(biaslist, unit='adu', relax=True) # Create empy list for filenames of loaded frames filelist = [] for fin in self.datain: self.log.debug("Input filename = %s" % fin.filename) filelist.append(fin.filename) # Make a dummy dataout self.dataout = DataFits(config=self.config) if len(self.datain) == 0: self.log.error('Flat calibration frame not found.') raise RuntimeError('No flat file(s) loaded') self.log.debug('Creating master flat frame...') # Create master frame: if there is just one file, turn it into master bias or else combine all to make master bias if (len(filelist) == 1): self.dark = ccdproc.CCDData.read(filelist[0], unit='adu', relax=True) self.dark = ccdproc.subtract_bias(self.dark, self.bias, add_keyword=False) else: darklist = [] for i in filelist: dark = ccdproc.CCDData.read(i, unit='adu', relax=True) darksubbias = ccdproc.subtract_bias(dark, self.bias, add_keyword=False) darklist.append(darksubbias) self.dark = ccdproc.combine(darklist, method=self.getarg('combinemethod'), unit='adu', add_keyword=True) # set output header, put image into output self.dataout.header = self.datain[0].header self.dataout.imageset(self.dark) # rename output filename outputfolder = self.getarg('outputfolder') if outputfolder != '': outputfolder = os.path.expandvars(outputfolder) self.dataout.filename = os.path.join(outputfolder, os.path.split(filelist[0])[1]) else: self.dataout.filename = filelist[0] # Add history self.dataout.setheadval('HISTORY', 'MasterDark: %d files used' % len(filelist))
def test(self): """ Test Pipe Step Flat Object: Runs basic tests """ # initial log message self.log.info('Testing pipe step flat') # get testin and a configuration if self.config != None and len( self.config) > 2: # i.e. if real config is loaded testin = DataFits(config=self.config) else: testin = DataFits(config=self.testconf) # load sample data datain = DataFits(config=testin.config) #infile = 'mode_chop/120207_000_00HA012.chop.dmd.fits' infile = 'mode_chop/120306_000_00HA006.chop.dmd.fits' #infile = 'mode_chop/120402_000_00HA035.chop.dmd.fits' #infile = 'sharp/sharc2-048485.dmdsqr.fits' testfile = os.path.join(datain.config['testing']['testpath'], infile) #testfile = '/Users/berthoud/testfit.fits' datain.load(testfile) if False: # change data (make complex number array with # Re=0,1,2,3,4,5,6 . . . in time Im=0) dataval = numpy.ones(datain.image.shape) dataval[..., 1] = 0.0 inclist = numpy.arange(dataval.shape[0]) incshape = [1 + i - i for i in dataval.shape[0:-1]] incshape[0] = dataval.shape[0] inclist.shape = incshape dataval[..., 0] = dataval[..., 0] * inclist #datain.image=dataval # run first flat dataout = self(datain) #print dataout.image[100,...] # print 100th image #print dataout.image[range(0,dataval.shape[0],1000),0,0] # 1 val per img dataout.save() # final log message self.log.info('Testing pipe step flat - Done')
def __init__(self): """ Constructor: Initialize data objects and variables """ # call superclass constructor (calls setup) super(StepFlat, self).__init__() # list of data and flats self.datalist = [] # used in run() for every new input data file # flat values self.flatloaded = 0 # indicates if flat has been loaded self.flats = [] # list containing arrays with flat values self.flatdata = DataFits() # Pipedata object containing the flat file # flat file info and header keywords to fit self.flatfile = '' # name of selected flat file self.fitkeys = [] # FITS keywords that have to fit self.keyvalues = [ ] # values of the keywords (from the first data file) # set configuration self.log.debug('Init: done')
def run(self): """ Runs the combining algorithm. The self.datain is run through the code, the result is in self.dataout. """ filelist = [] for fin in self.datain: self.log.debug("Input filename = %s" % fin.filename) filelist.append(fin.filename) # Make a dummy dataout self.dataout = DataFits(config=self.config) if len(self.datain) == 0: self.log.error('Bias calibration frame not found.') raise RuntimeError('No bias file(s) loaded') # self.log.debug('Creating master bias frame...') # if there is just one, use it as biasfile or else combine all to make a master bias if (len(filelist) == 1): self.bias = ccdproc.CCDData.read(filelist[0], unit='adu', relax=True) else: self.bias = ccdproc.combine(filelist, method=self.getarg('combinemethod'), unit='adu', add_keyword=True) # set output header, put image into output self.dataout.header = self.datain[0].header self.dataout.imageset(self.bias) # rename output filename outputfolder = self.getarg('outputfolder') if outputfolder != '': outputfolder = os.path.expandvars(outputfolder) self.dataout.filename = os.path.join(outputfolder, os.path.split(filelist[0])[1]) else: self.dataout.filename = filelist[0] # Add history self.dataout.setheadval('HISTORY', 'MasterBias: %d files used' % len(filelist))
def run(self): """ Runs the combining algorithm. The self.datain is run through the code, the result is in self.dataout. """ ''' Select 3 input dataset to use, store in datause ''' #Store number of inputs num_inputs = len(self.datain) # Create variable to hold input files # Copy input to output header and filename datause = [] self.log.debug('Number of input files = %d' % num_inputs) # Ensure datause has 3 elements irrespective of number of input files if num_inputs == 0: # Raise exception for no input raise ValueError('No input') elif num_inputs == 1: datause = [self.datain[0], self.datain[0], self.datain[0]] elif num_inputs == 2: datause = [self.datain[0], self.datain[1], self.datain[1]] else: # If inputs exceed 2 in number # Here we know there are at least 3 files ilist = [] # Make empty lists for each filter rlist = [] glist = [] other = [] for element in self.datain: # Loop through the input files and add to the lists fname = element.filename.lower() if 'i-band' in fname or 'iband' in fname or 'iprime' in fname: ilist.append(element) elif 'r-band' in fname or 'rband' in fname or 'rprime' in fname: rlist.append(element) elif 'g-band' in fname or 'gband' in fname or 'gprime' in fname: glist.append(element) else: other.append(element) continue self.log.debug( 'len(ilist) = %d, len(rlist) = %d, len(glist) = %d' % (len(ilist), len(rlist), len(glist))) # If there is at least one i-, r-, and g-band filter found in self.datain (best case) if len(ilist) >= 1 and len(rlist) >= 1 and len(glist) >= 1: # The first image from each filter list will be reduced in the correct order. datause = [ilist[0], rlist[0], glist[0]] elif len(ilist) == 0 and len(rlist) >= 1 and len(glist) >= 1: # Cases where there is no ilist if len(rlist) > len(glist): datause = [rlist[0], rlist[1], glist[0]] else: datause = [rlist[0], glist[0], glist[1]] elif len(glist) == 0 and len(rlist) >= 1 and len(ilist) >= 1: # Cases where there is no glist if len(rlist) > len(ilist): datause = [rlist[0], rlist[1], ilist[0]] else: datause = [rlist[0], ilist[0], ilist[1]] elif len(ilist) == 0 and len(rlist) >= 1 and len(glist) >= 1: # Cases where there is no rlist if len(ilist) > len(glist): datause = [ilist[0], ilist[1], glist[0]] else: datause = [ilist[0], glist[0], glist[1]] elif len(rlist) == 0 and len(glist) == 0: # Case where there is only ilist datause = [ilist[0], ilist[1], ilist[2]] elif len(rlist) == 0 and len(ilist) == 0: # Case where there is only glist datause = [glist[0], glist[1], glist[2]] elif len(ilist) == 0 and len(glist) == 0: # Case where there is only rlist datause = [rlist[0], rlist[1], rlist[2]] self.log.debug( 'Files used: R = %s G = %s B = %s' % (datause[0].filename, datause[1].filename, datause[2].filename)) self.dataout = DataFits(config=self.config) self.dataout.header = datause[0].header self.dataout.filename = datause[0].filename img = datause[0].image img1 = datause[1].image img2 = datause[2].image ''' Finding Min/Max scaling values ''' # Create a Data Cube with floats datacube = numpy.zeros((img.shape[0], img.shape[1], 3), dtype=float) # Enter the image data into the cube so an absolute max can be found datacube[:, :, 0] = img datacube[:, :, 1] = img1 datacube[:, :, 2] = img2 # Find how many data points are in the data cube datalength = img.shape[0] * img.shape[1] * 3 # Create a 1-dimensional array with all the data, then sort it datacube.shape = (datalength, ) datacube.sort() # Now use arrays for each filter to find separate min values rarray = img.copy() garray = img1.copy() barray = img2.copy() # Shape and sort the arrays arrlength = img.shape[0] * img.shape[1] rarray.shape = (arrlength, ) rarray.sort() garray.shape = (arrlength, ) garray.sort() barray.shape = (arrlength, ) barray.sort() # Find the min/max percentile values in the data for scaling # Values are determined by parameters in the pipe configuration file minpercent = int(arrlength * self.getarg('minpercent')) maxpercent = int(datalength * self.getarg('maxpercent')) # Find the final data values to use for scaling from the image data rminsv = rarray[minpercent] #sv stands for "scalevalue" gminsv = garray[minpercent] bminsv = barray[minpercent] maxsv = datacube[maxpercent] self.log.info(' Scale min r/g/b: %f/%f/%f' % (rminsv, gminsv, bminsv)) self.log.info(' Scale max: %f' % maxsv) # The same min/max values will be used to scale all filters ''' Finished Finding scaling values ''' ''' Combining Function ''' # Make new cube with the proper data type for color images (uint8) # Use square root (sqrt) scaling for each filter # log or asinh scaling is also available #astropy.vidualizations.SqrtStretch() imgcube = numpy.zeros((img.shape[0], img.shape[1], 3), dtype='uint8') minsv = [rminsv, gminsv, bminsv] for i in range(3): # Make normalization function norm = simple_norm(datause[i].image, 'sqrt', min_cut=minsv[i], max_cut=maxsv) # Apply it imgcube[:, :, i] = norm(datause[i].image) * 255. self.dataout.image = imgcube # Create variable containing all the scaled image data imgcolor = Image.fromarray(self.dataout.image, mode='RGB') # Save colored image as a .tif file (without the labels) imgcolortif = imgcube.copy() imgcolortif.astype('uint16') ### tiff.imsave('%s.tif' % self.dataout.filenamebase, imgcolortif) ''' End of combining function ''' ''' Add a Label to the Image ''' draw = ImageDraw.Draw(imgcolor) # Use a variable to make the positions and size of text relative imgwidth = img.shape[1] imgheight = img.shape[0] # Open Sans-Serif Font with a size relative to the picture size try: # This should work on Linux font = ImageFont.truetype( '/usr/share/fonts/liberation/LiberationSans-Regular.ttf', imgheight // 41) except: try: # This should work on Mac font = ImageFont.truetype('/Library/Fonts/Arial Unicode.ttf', imgheight // 41) except: try: # This should work on Windows font = ImageFont.truetype('C:\\Windows\\Fonts\\arial.ttf', imgheight // 41) except: # This should work in Colab font = ImageFont.truetype( '/usr/share/fonts/truetype/liberation/LiberationSans-Regular.ttf', imgheight // 41) # If this still doesn't work - then add more code to make it run on YOUR system # Use the beginning of the FITS filename as the object name filename = os.path.split(self.dataout.filename)[-1] try: objectname = filename.split('_')[0] objectname = objectname[0].upper() + objectname[1:] except Exception: objectname = 'Unknown.' objectname = 'Object: %s' % objectname # Read labels at their respective position (kept relative to image size) # Left corner: object, observer, observatory # Right corner: Filters used for red, green, and blue colors draw.text((imgwidth / 100, imgheight / 1.114), objectname, (255, 255, 255), font=font) # Read FITS keywords for the observer, observatory, and filters if 'OBSERVER' in self.dataout.header: observer = 'Observer: %s' % self.dataout.getheadval('OBSERVER') draw.text((imgwidth / 100, imgheight / 1.073), observer, (255, 255, 255), font=font) if 'OBSERVAT' in self.dataout.header: observatory = 'Observatory: %s' % self.dataout.getheadval( 'OBSERVAT') draw.text((imgwidth / 100, imgheight / 1.035), observatory, (255, 255, 255), font=font) if 'FILTER' in datause[0].header: red = 'R: %s' % datause[0].getheadval('FILTER') draw.text((imgwidth / 1.15, imgheight / 1.114), red, (255, 255, 255), font=font) if 'FILTER' in datause[1].header: green = 'G: %s' % datause[1].getheadval('FILTER') draw.text((imgwidth / 1.15, imgheight / 1.073), green, (255, 255, 255), font=font) if 'FILTER' in datause[2].header: blue = 'B: %s' % datause[2].getheadval('FILTER') draw.text((imgwidth / 1.15, imgheight / 1.035), blue, (255, 255, 255), font=font) # Make image name imgname = self.dataout.filenamebegin if imgname[-1] in '_-,.': imgname = imgname[:-1] imgname += '.jpg' # Save the completed image imgcolor.save(imgname) self.log.info('Saving file %sjpg' % self.dataout.filenamebegin) ''' End of Label Code ''' # Set complete flag self.dataout.setheadval('COMPLETE', 1, 'Data Reduction Pipe: Complete Data Flag')
def run(self): """ Runs the combining algorithm. The self.datain is run through the code, the result is in jpeg_dataout. """ ''' Select 3 input dataset to use, store in datause ''' #Store number of inputs num_inputs = len(self.datain) # Create variable to hold input files # Copy input to output header and filename datause = [None, None, None] self.log.debug('Number of input files = %d' % num_inputs) if num_inputs == 0: # Raise exception for no input raise ValueError('No input') elif num_inputs == 1: datause = [self.datain[0], self.datain[0], self.datain[0]] elif num_inputs == 2: datause = [self.datain[0], self.datain[0], self.datain[1]] else: filterorder_list = self.getarg('filterorder').split('|') filterprefs_list = self.getarg('filterprefs').split('|') datain_filter_list = [ element.getheadval('filter') for element in self.datain ] used_filter_flags = [False] * len(self.datain) if len(filterprefs_list) != 3: self.log.error( 'Invalid number of preferred filters provided (should be 3): ' + self.getarg('filterprefs')) else: # Locate data matching the filters specified in filterprefs for i, preferred_filter in enumerate(filterprefs_list): for j, element in enumerate(self.datain): if element.getheadval('filter') == preferred_filter: datause[i] = element used_filter_flags[j] = True break filterorder_walker = 0 for i, channel in enumerate(datause): if channel == None: for ordered_filter in filterorder_list[ filterorder_walker:]: filterorder_walker = filterorder_walker + 1 if ordered_filter in datain_filter_list: datain_index = datain_filter_list.index( ordered_filter) if not used_filter_flags[datain_index]: datause[i] = self.datain[datain_index] used_filter_flags[datain_index] = True break elif channel.getheadval('filter') in filterorder_list: filterorder_walker = filterorder_list.index( channel.getheadval('filter')) for i, channel in enumerate(datause): if channel == None: for j, datain_filter in enumerate(datain_filter_list): if not used_filter_flags[j]: datause[i] = self.datain[j] used_filter_flags[j] = True break self.log.debug( 'Files used: R = %s G = %s B = %s' % (datause[0].filename, datause[1].filename, datause[2].filename)) jpeg_dataout = DataFits(config=self.config) jpeg_dataout.header = datause[0].header jpeg_dataout.filename = datause[0].filename img = datause[0].image img1 = datause[1].image img2 = datause[2].image ''' Finding Min/Max scaling values ''' # Create a Data Cube with floats datacube = numpy.zeros((img.shape[0], img.shape[1], 3), dtype=float) # Enter the image data into the cube so an absolute max can be found datacube[:, :, 0] = img datacube[:, :, 1] = img1 datacube[:, :, 2] = img2 # Find how many data points are in the data cube datalength = img.shape[0] * img.shape[1] * 3 # Create a 1-dimensional array with all the data, then sort it datacube.shape = (datalength, ) datacube.sort() # Now use arrays for each filter to find separate min values rarray = img.copy() garray = img1.copy() barray = img2.copy() # Shape and sort the arrays arrlength = img.shape[0] * img.shape[1] rarray.shape = (arrlength, ) rarray.sort() garray.shape = (arrlength, ) garray.sort() barray.shape = (arrlength, ) barray.sort() # Find the min/max percentile values in the data for scaling # Values are determined by parameters in the pipe configuration file minpercent = int(arrlength * self.getarg('minpercent')) maxpercent = int(datalength * self.getarg('maxpercent')) # Find the final data values to use for scaling from the image data rminsv = rarray[minpercent] #sv stands for "scalevalue" gminsv = garray[minpercent] bminsv = barray[minpercent] maxsv = datacube[maxpercent] self.log.info(' Scale min r/g/b: %f/%f/%f' % (rminsv, gminsv, bminsv)) self.log.info(' Scale max: %f' % maxsv) # The same min/max values will be used to scale all filters ''' Finished Finding scaling values ''' ''' Combining Function ''' # Make new cube with the proper data type for color images (uint8) # Use square root (sqrt) scaling for each filter # log or asinh scaling is also available #astropy.vidualizations.SqrtStretch() imgcube = numpy.zeros((img.shape[0], img.shape[1], 3), dtype='uint8') minsv = [rminsv, gminsv, bminsv] for i in range(3): # Make normalization function norm = simple_norm(datause[i].image, 'sqrt', min_cut=minsv[i], max_cut=maxsv) # Apply it imgcube[:, :, i] = norm(datause[i].image) * 255. jpeg_dataout.image = imgcube # Create variable containing all the scaled image data imgcolor = Image.fromarray(jpeg_dataout.image, mode='RGB') # Save colored image as a .tif file (without the labels) imgcolortif = imgcube.copy() imgcolortif.astype('uint16') ### tiff.imsave('%s.tif' % jpeg_dataout.filenamebase, imgcolortif) ''' End of combining function ''' ''' Add a Label to the Image ''' draw = ImageDraw.Draw(imgcolor) # Use a variable to make the positions and size of text relative imgwidth = img.shape[1] imgheight = img.shape[0] # Open Sans-Serif Font with a size relative to the picture size try: # This should work on Linux font = ImageFont.truetype( '/usr/share/fonts/liberation/LiberationSans-Regular.ttf', imgheight // 41) except: try: # This should work on Mac font = ImageFont.truetype('/Library/Fonts/Arial Unicode.ttf', imgheight // 41) except: try: # This should work on Windows font = ImageFont.truetype('C:\\Windows\\Fonts\\arial.ttf', imgheight // 41) except: # This should work in Colab font = ImageFont.truetype( '/usr/share/fonts/truetype/liberation/LiberationSans-Regular.ttf', imgheight // 41) # If this still doesn't work - then add more code to make it run on YOUR system # Use the beginning of the FITS filename as the object name filename = os.path.split(jpeg_dataout.filename)[-1] try: objectname = filename.split('_')[0] objectname = objectname[0].upper() + objectname[1:] except Exception: objectname = 'Unknown.' objectname = 'Object: %s' % objectname # Read labels at their respective position (kept relative to image size) # Left corner: object, observer, observatory # Right corner: Filters used for red, green, and blue colors draw.text((imgwidth / 100, imgheight / 1.114), objectname, (255, 255, 255), font=font) # Read FITS keywords for the observer, observatory, and filters if 'OBSERVER' in jpeg_dataout.header: observer = 'Observer: %s' % jpeg_dataout.getheadval('OBSERVER') draw.text((imgwidth / 100, imgheight / 1.073), observer, (255, 255, 255), font=font) if 'OBSERVAT' in jpeg_dataout.header: observatory = 'Observatory: %s' % jpeg_dataout.getheadval( 'OBSERVAT') draw.text((imgwidth / 100, imgheight / 1.035), observatory, (255, 255, 255), font=font) if 'FILTER' in datause[0].header: red = 'R: %s' % datause[0].getheadval('FILTER') draw.text((imgwidth / 1.15, imgheight / 1.114), red, (255, 255, 255), font=font) if 'FILTER' in datause[1].header: green = 'G: %s' % datause[1].getheadval('FILTER') draw.text((imgwidth / 1.15, imgheight / 1.073), green, (255, 255, 255), font=font) if 'FILTER' in datause[2].header: blue = 'B: %s' % datause[2].getheadval('FILTER') draw.text((imgwidth / 1.15, imgheight / 1.035), blue, (255, 255, 255), font=font) # Make image name imgname = jpeg_dataout.filenamebegin if imgname[-1] in '_-,.': imgname = imgname[:-1] imgname += '.jpg' # Save the completed image imgcolor.save(imgname) self.log.info('Saving file %sjpg' % jpeg_dataout.filenamebegin) # Optional folder output setup baseimgname = os.path.basename(imgname) folderpaths_list = self.getarg('folderpaths').split(':') for path in folderpaths_list: path = time.strftime(path, time.localtime()) if not os.path.exists(path): if self.getarg('createfolders'): os.makedirs(path) self.log.info('Creating directory %s' % path) else: self.log.info('Invalid folder path %s' % path) try: imgcolor.save(os.path.join(path, baseimgname)) except: self.log.exception('Could not save image to directory %s' % path) ''' End of Label Code ''' # Set complete flag jpeg_dataout.setheadval('COMPLETE', 1, 'Data Reduction Pipe: Complete Data Flag') ### Make output data self.dataout = self.datain.copy() self.dataout.append(jpeg_dataout)
def run(self): """ Runs the calibrating algorithm. The calibrated data is returned in self.dataout """ ### Preparation # Load bias files if necessary if not self.biasloaded or self.getarg('reload'): self.loadbias() # Else: check data for correct instrument configuration - currently not in use(need improvement) else: for keyind in range(len(self.biasfitkeys)): if self.biaskeyvalues[keyind] != self.datain.getheadval( self.biasfitkeys[keyind]): self.log.warn( 'New data has different FITS key value for keyword %s' % self.biasfitkeys[keyind]) # Load dark files if necessary if not self.darkloaded or self.getarg('reload'): self.loaddark() # Else: check data for correct instrument configuration else: for keyind in range(len(self.darkfitkeys)): if self.darkkeyvalues[keyind] != self.datain.getheadval( self.darkfitkeys[keyind]): self.log.warn( 'New data has different FITS key value for keyword %s' % self.darkfitkeys[keyind]) # Load flat files if necessary if not self.flatloaded or self.getarg('reload'): self.loadflat() # Else: check data for correct instrument configuration else: for keyind in range(len(self.flatfitkeys)): if self.flatkeyvalues[keyind] != self.datain.getheadval( self.flatfitkeys[keyind]): self.log.warn( 'New data has different FITS key value for keyword %s' % self.flatfitkeys[keyind]) #convert self.datain to CCD Data object image = ccdproc.CCDData(self.datain.image, unit='adu') image.header = self.datain.header #subtract bias from image image = ccdproc.subtract_bias(image, self.bias, add_keyword=False) #subtract dark from image image = ccdproc.subtract_dark(image, self.dark, scale=True, exposure_time='EXPTIME', exposure_unit=u.second, add_keyword=False) #apply flat correction to image image = ccdproc.flat_correct(image, self.flat, add_keyword=False) # copy calibrated image into self.dataout - make sure self.dataout is a pipedata object self.dataout = DataFits(config=self.datain.config) self.dataout.image = image.data self.dataout.header = image.header self.dataout.filename = self.datain.filename ### Finish - cleanup # Update DATATYPE self.dataout.setheadval('DATATYPE', 'IMAGE') # Add bias, dark files to History self.dataout.setheadval('HISTORY', 'BIAS: %s' % self.biasname) self.dataout.setheadval('HISTORY', 'DARK: %s' % self.darkname) self.dataout.setheadval('HISTORY', 'FLAT: %s' % self.flatname)
def run(self): """ Runs the data reduction algorithm. The self.datain is run through the code, the result is in self.dataout. """ ### Preparation # construct a temp file name that astrometry will output fp = tempfile.NamedTemporaryFile(suffix=".fits", dir=os.getcwd()) # split off path name, because a path that is too long causes remap to # crash sometimes outname = os.path.split(fp.name)[1] fp.close() # Add input file path to ouput file and make new name outpath = os.path.split(self.datain.filename)[0] outnewname = os.path.join(outpath, outname.replace('.fits', '.new')) outwcsname = os.path.join(outpath, outname.replace('.fits', '.wcs')) # Make sure input data exists as file if not os.path.exists(self.datain.filename): self.datain.save() # Make command string rawcommand = self.getarg('astrocmd') % (self.datain.filename, outname) # get estimated RA and DEC center values from the config file or input FITS header raopt = self.getarg('ra') if raopt != '': ra = Angle(raopt, unit=u.hour).degree else: try: ra = Angle(self.datain.getheadval('RA'), unit=u.hour).degree except: ra = '' decopt = self.getarg('dec') if decopt != '': dec = Angle(decopt, unit=u.deg).degree else: try: dec = Angle(self.datain.getheadval('DEC'), unit=u.deg).degree except: dec = '' if (ra != '') and (dec != ''): # update command parameters to use these values rawcommand = rawcommand + ' --ra %f --dec %f --radius %f' % ( ra, dec, self.getarg('searchradius')) else: self.log.debug( 'FITS header missing RA/DEC -> searching entire sky') ### Run Astrometry: # This loop tries the downsample and param options until the fit is successful # need either --scale-low 0.5 --scale-high 2.0 --sort-column FLUX # or --guess-scale downsamples = self.getarg('downsample') paramoptions = self.getarg('paramoptions') for option in range(len(downsamples) * len(paramoptions)): #for downsample in self.getarg('downsample'): downsample = downsamples[option % len(downsamples)] paramoption = paramoptions[option // len(downsamples)] # Add options to command command = rawcommand + ' --downsample %d' % downsample + ' ' + paramoption optionstring = "Downsample=%s Paramopts=%s" % (downsample, paramoption[:10]) # Run the process - see note at the top of the file if using cron process = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT) self.log.debug('running command = %s' % command) # Wait for the process to be finished or timeout to be reached timeout = time.time() + self.getarg('timeout') while time.time() < timeout and process.poll() == None: time.sleep(1) poll = process.poll() if poll == None: process.kill() time.sleep(1) poll = process.poll() self.log.debug('command returns %d' % poll) if poll == 0 and os.path.exists(outnewname): self.log.debug('output file valid -> astrometry successful') break else: self.log.debug('output file missing -> astrometry failed') # Print the output from astrometry (cut if necessary) if self.getarg('verbose'): output = process.stdout.read().decode() if len(output) > 1000: outlines = output.split('\n') output = outlines[:10] + ['...', '...'] + outlines[-7:] output = '\n'.join(output) self.log.debug(output) ### Post processing # Read output file self.dataout = DataFits(config=self.config) self.log.debug('Opening astrometry.net output file %s' % outnewname) try: self.dataout.load(outnewname) self.dataout.filename = self.datain.filename except Exception as error: self.log.error("Unable to open astrometry. output file = %s" % outname) raise error self.log.debug('Successful parameter options = %s' % optionstring) # Add history message histmsg = 'Astrometry.Net: At downsample = %d, search took %d seconds' % ( downsample, time.time() - timeout + 300) self.dataout.setheadval('HISTORY', histmsg) # Add RA from astrometry w = wcs.WCS(self.dataout.header) n1 = float(self.dataout.header['NAXIS1'] / 2) n2 = float(self.dataout.header['NAXIS2'] / 2) ra, dec = w.all_pix2world(n1, n2, 1) self.dataout.header['CRPIX1'] = n1 self.dataout.header['CRPIX2'] = n2 self.dataout.header['CRVAL1'] = float(ra) self.dataout.header['CRVAL2'] = float(dec) self.dataout.header['RA'] = Angle(ra, u.deg).to_string(unit=u.hour, sep=':') self.dataout.header['Dec'] = Angle(dec, u.deg).to_string(sep=':') self.dataout.setheadval('HISTORY', 'Astrometry: Paramopts = ' + optionstring) # Delete temporary files if self.getarg('delete_temp'): os.remove(outnewname) os.remove(outwcsname) self.log.debug('Run: Done')
def run(self): """ Runs the mosaicing algorithm. The self.datain is run through the code, the result is in self.dataout. """ #calculate platescale of first input image try: det = np.linalg.det(wcs.WCS(self.datain[0].header).wcs.cd) pscale = np.sqrt(np.abs(det))*3600. except: try: det = np.linalg.det(wcs.WCS(self.datain[0].header).wcs.pc) pscale = np.sqrt(np.abs(det))*3600. except: pscale = self.datain[0].header['PIXSCAL'] #filtering out images which are too far away from the others #passing images added to a list of (image, WCS) tuples ''' image_centers = [] for f in self.datain: image_centers.append((f.header['CRVAL1'], f.header['CRVAL2'])) filtered_datain = [] dist_list = [[[0]*(len(image_centers)-1)]*len(image_centers)] for i in range(len(image_centers)): for j in range(len(image_centers)-1): dist_list[i][j+1] = np.sqrt((image_)**2+()**2) ''' #calculations necessary for updating wcs information px = [] py = [] #in order to avoid NaN interactions, creating weight map weights=[] for f in self.datain: weights.append((np.where(np.isnan(f.image) == True, 0, 1))) for f in self.datain: px.extend(wcs.WCS(f.header).calc_footprint()[:,0]) py.extend(wcs.WCS(f.header).calc_footprint()[:,1]) x0 = (max(px)+min(px))/2. y0 = (max(py)+min(py))/2. sx = (max(px)-min(px))*np.cos(y0/180*np.pi) # arcsec sy = (max(py)-min(py)) # arcsec size = (sx*3600+self.getarg('pad')*2, sy*3600+self.getarg('pad')*2) xpix = size[0]//pscale ypix = size[1]//pscale cdelt = [pscale/3600.]*2 #create self.dataout and give it a copy of an input's header self.dataout = DataFits(config = self.config) self.dataout.header = self.datain[0].header.copy() #update header wcs information self.log.info('Creating new WCS header') self.dataout.header['CRPIX1'] = xpix/2 self.dataout.header['CRPIX2'] = ypix/2 self.dataout.header['CRVAL1'] = x0 self.dataout.header['CRVAL2'] = y0 self.dataout.header['CD1_1'] = -cdelt[0] self.dataout.header['CD1_2'] = self.dataout.header['CD2_1'] = 0. self.dataout.header['CD2_2'] = cdelt[1] self.dataout.header['NAXIS1'] = int(xpix) self.dataout.header['NAXIS2'] = int(ypix) self.dataout.header['CTYPE1'] = 'RA---TAN-SIP' self.dataout.header['CTYPE2'] = 'DEC--TAN-SIP' self.dataout.header['RADESYS'] = 'ICRS' self.dataout.header['EQUINOX'] = 2000 self.dataout.header['LATPOLE'] = self.datain[0].header['CRVAL2'] self.dataout.header['LONPOLE'] = 180 self.dataout.header['PIXASEC'] = pscale theta_rad = np.deg2rad(self.getarg('outangle')) rot_matrix = np.array([[np.cos(theta_rad), -np.sin(theta_rad)], [np.sin(theta_rad), np.cos(theta_rad)]]) rot_cd = np.dot(rot_matrix, np.array([[self.dataout.header['CD1_1'], 0.],[0., self.dataout.header['CD2_2']]])) for i in [0,1]: for j in [0,1]: self.dataout.header['CD{0:d}_{1:d}'.format(i+1, j+1)] = rot_cd[i,j] #check drizzle arguments if self.getarg('kernel') == 'smoothing': kernel = 'lanczos3' elif self.getarg('kernel') in ['square', 'point', 'gaussian', 'tophat']: kernel = self.getarg('kernel') else: self.log.error('Kernel name not recognized, using default') kernel = 'square' if self.getarg('drizzleweights') == 'uniform': driz_wt = '' elif self.getarg('drizzleweights') in ['exptime', 'expsq']: driz_wt = self.getarg('drizzleweights') else: self.log.error('Drizzle weighting not recognized, using default') driz_wt = '' #create drizzle object and add input images fullwcs = wcs.WCS(self.dataout.header) self.log.info('Starting drizzle') driz = drz.Drizzle(outwcs = fullwcs, pixfrac=self.getarg('pixfrac'), \ kernel=kernel, fillval='10000', wt_scl=driz_wt) for i,f in enumerate(self.datain): self.log.info('Adding %s to drizzle stack' % f.filename) driz.add_image(f.imgdata[0], wcs.WCS(f.header), inwht=weights[i]) try: fillval=float(self.getarg('fillval')) except: fillval=np.nan self.log.error('Fillvalue not recognized or missing, using default') #creates output fits file from drizzle output self.dataout.imageset(np.where(driz.outsci == 10000, fillval, driz.outsci)) self.dataout.imageset(driz.outwht,'OutWeight', self.dataout.header) self.dataout.filename = self.datain[0].filename #add history self.dataout.setheadval('HISTORY','Coadd: %d files combined with %s kernel, pixfrac %f at %f times resolution' \ % (len(self.datain), kernel, self.getarg('pixfrac'), self.getarg('resolution')))
# print(repr(dfits.header)) ### OPTIONAL BUT RECOMMENDED: Check if all necessary files exist error_flag = False # Check if configuration file exists if not os.path.exists(baseconfig): print( 'ERROR: The config file you specified, %s,\n does NOT exist on your computer, fix "config" above' % baseconfig) error_flag = True # Check if input files exist for name in infilenames: if not os.path.exists(name): print( 'ERROR: The input file you specifed, %s,\n does NOT exist on your computer, fix "inputnames" above' % name) error_flag = True if not error_flag: print("All Good") os.chdir('/Users/josh/pipeline/pipeline/Developments/stepwebastrometry') step = StepWebAstrometry() indata = [] for f in infilenames: fits = DataFits(config=baseconfig) fits.load(f) indata.append(fits) outdata = step(indata[0]) print('Done')
def loaddark(self): """ Loads the dark information for the instrument settings described in the header of self.datain. If an appropriate file can not be found or the file is invalid various warnings and errors are returned. """ ### identify dark file to load, search if requested darkfile = self.getarg('darkfile') if darkfile == 'search' : # get list of keywords to fit fitkeys = self.getarg('fitkeys') # check format (make first element uppercase) try: _ = fitkeys[0].upper() except AttributeError: # AttributeError if it's not a string self.log.error('LoadDark: fitkeys config parameter is ' + 'incorrect format') raise TypeError('fitkeys config parameter is incorrect format') # get keywords from data datakeys=[] for fitkey in fitkeys: datakeys.append(self.datain.getheadval(fitkey)) # get dark files from darkdir folder darkfolder = self.getarg('darkfolder') filelist=[name for name in os.listdir(darkfolder) if name[0] != '.' and name.find('.fit') > -1 ] if len(filelist) < 1: self.log.error('LoadDark: no dark files found in folder ' + darkfolder) raise ValueError('no dark files found in folder ' + darkfolder) # match dark files, return best dark file bestind = 0 # index of file with best match in filelist bestfitn = 0 # number of keywords that match in best match fileind = 0 # index for going through the list while fileind < len(filelist) and bestfitn < len(fitkeys): # load keys of dark file filehead = pyfits.getheader(darkfolder+'/'+filelist[fileind]) filekeys=[] for fitkey in fitkeys: try: filekeys.append(filehead[fitkey]) except KeyError: self.log.warning('LoadDark: missing key [%s] in dark <%s>' % (fitkey, filelist[fileind] ) ) filekeys.append('') # determine number of fitting keywords keyfitn=0 while ( keyfitn < len(fitkeys) and datakeys[keyfitn] == filekeys[keyfitn] ): keyfitn = keyfitn + 1 # compare with previous best find if keyfitn > bestfitn: bestind = fileind bestfitn = keyfitn fileind=fileind+1 darkfile = darkfolder+'/'+filelist[ bestind ] if bestfitn < len(fitkeys): self.log.warn('Could not find perfect dark file match') self.log.warn('Best dark file found is <%s>' % filelist[bestind] ) else: self.log.info('Best dark file found is <%s>' % filelist[bestind] ) self.fitkeys = fitkeys self.keyvalues = datakeys ### load dark data into a DataFits object self.darkfile = darkfile darkdata = DataFits(config = self.config) darkdata.load(self.darkfile) ### find dark image data arrays and store them # get sizes of input data datalist = self.getarg('datalist') if len(datalist) > 0: # There are items in datalist -> loop over items self.darks = [] # Check if necessary number of images in darkdata if len(darkdata.imgdata) < len(datalist): msg = 'Number of images in dark file < ' msg += 'number of entries in datalist' self.log.error('LoadDark: %s' % msg) raise ValueError(msg) # Loop through datalist items for dataind in range(len(datalist)): dataitem = datalist[dataind] # Search for dataitem in self.datain images if dataitem.upper() in self.datain.imgnames: dataimg = self.datain.imageget(dataitem) self.log.debug('LoadDark: Found image <%s> to subtract dark' % dataitem) # Search dataitem in self.table columns else: try: dataimg = self.datain.table[dataitem] self.log.debug('LoadDark: Found column <%s> to subtract dark' % dataitem) except: msg = 'No data found for <%s>' % dataitem self.log.error('LoadDark: %s' % msg) raise ValueError(msg) # Get dimensions - append dark to list datasiz = dataimg.shape if self.getarg('l0method').upper() != 'NO': datasiz = datasiz[1:] darksiz = darkdata.imgdata[dataind].shape self.darks.append(darkdata.imgdata[dataind]) # Check dimension with dark data print(datasiz,darksiz) if len(datasiz) >= len(darksiz): # Data has >= dimensions than dark -> compare begind = len(datasiz)-len(darksiz) if datasiz[begind:] != darksiz: msg = 'Dark "%s" does not fit data - A' % dataitem self.log.error('LoadDark: %s' % msg) raise ValueError(msg) else: # More dimensions in dark data -> report error msg = 'Dark "%s" does not fit data - B' % dataitem self.log.error('LoadDark: %s' % dataitem) raise ValueError(msg) else: # Empty datalist -> Subtract dark from first image in data with first dark datasiz = self.datain.image.shape if self.getarg('l0method').upper() != 'NO': datasiz = datasiz[1:] darksiz = darkdata.image.shape if len(datasiz) >= len(darksiz): # Data has >= dimensions than dark -> compare begind = len(datasiz)-len(darksiz) if datasiz[begind:] != darksiz: self.log.error('LoadDark: Dark does not fit data - A') raise ValueError('Dark does not fit data - A') else: # More dimensions in dark data -> report error self.log.error('LoadDark: Dark does not fit data - B') raise ValueError('Dark does not fit data - B') self.log.debug('LoadDark: Subtracting Dark from first data image with first dark') self.darks=[darkdata.image] ### make good pixel map for each detector and add to #darktemp = numpy.abs(data[0,...])+numpy.abs(data[1,...]) #self.goodpixmap = numpy.ones(data.shape[1:]) #self.goodpixmap [ numpy.where(darktemp == 0.0)] = 0.0 # Finish up self.darkloaded = 1 self.log.debug('LoadDark: done')
def run(self): self.dataout = DataFits(config=self.config) self.dataout.load(self.mask())
def run(self): """ Runs the combining algorithm. The self.datain is run through the code, the result is in self.dataout. """ # Find master dark to subtract from master dark biaslist = self.loadauxname('bias', multi=False) darklist = self.loadauxname('dark', multi=False) if (len(biaslist) == 0): self.log.error('No bias calibration frames found.') if (len(darklist) == 0): self.log.error('No bias calibration frames found.') self.bias = ccdproc.CCDData.read(biaslist, unit='adu', relax=True) self.dark = ccdproc.CCDData.read(darklist, unit='adu', relax=True) # Create empy list for filenames of loaded frames filelist = [] for fin in self.datain: self.log.debug("Input filename = %s" % fin.filename) filelist.append(fin.filename) # Make a dummy dataout self.dataout = DataFits(config=self.config) if len(self.datain) == 0: self.log.error('Flat calibration frame not found.') raise RuntimeError('No flat file(s) loaded') self.log.debug('Creating master flat frame...') # Create master frame: if there is just one file, turn it into master bias or else combine all to make master bias if (len(filelist) == 1): self.flat = ccdproc.CCDData.read(filelist[0], unit='adu', relax=True) self.flat = ccdproc.subtract_bias(self.flat, self.bias, add_keyword=False) self.flat = ccdproc.subtract_dark(self.flat, self.dark, scale=True, exposure_time='EXPTIME', exposure_unit=u.second, add_keyword=False) else: #bias and dark correct frames flatlist = [] for i in filelist: flat = ccdproc.CCDData.read(i, unit='adu', relax=True) flatsubbias = ccdproc.subtract_bias(flat, self.bias, add_keyword=False) flatsubbiasdark = ccdproc.subtract_dark( flatsubbias, self.dark, scale=True, exposure_time='EXPTIME', exposure_unit=u.second, add_keyword=False) flatlist.append(flatsubbiasdark) #scale the flat component frames to have the same mean value, 10000.0 scaling_func = lambda arr: 10000.0 / numpy.ma.median(arr) #combine them self.flat = ccdproc.combine(flatlist, method=self.getarg('combinemethod'), scale=scaling_func, unit='adu', add_keyword=False) # set output header, put image into output self.dataout.header = self.datain[0].header self.dataout.imageset(self.flat) # rename output filename outputfolder = self.getarg('outputfolder') if outputfolder != '': outputfolder = os.path.expandvars(outputfolder) self.dataout.filename = os.path.join(outputfolder, os.path.split(filelist[0])[1]) else: self.dataout.filename = filelist[0] # Add history self.dataout.setheadval('HISTORY', 'MasterFlat: %d files used' % len(filelist))
def run(self): """ Runs the calibrating algorithm. The calibrated data is returned in self.dataout """ ### Preparation # Load bias files if necessary if not self.biasloaded or self.getarg('reload'): self.loadbias() # Else: check data for correct instrument configuration - currently not in use(need improvement) else: for keyind in range(len(self.biasfitkeys)): if self.biaskeyvalues[keyind] != self.datain.getheadval( self.biasfitkeys[keyind]): self.log.warn( 'New data has different FITS key value for keyword %s' % self.biasfitkeys[keyind]) # Load dark files if necessary if not self.darkloaded or self.getarg('reload'): self.loaddark() # Else: check data for correct instrument configuration else: for keyind in range(len(self.darkfitkeys)): if self.darkkeyvalues[keyind] != self.datain.getheadval( self.darkfitkeys[keyind]): self.log.warn( 'New data has different FITS key value for keyword %s' % self.darkfitkeys[keyind]) # Load flat files if necessary if not self.flatloaded or self.getarg('reload'): self.loadflat() # Else: check data for correct instrument configuration else: for keyind in range(len(self.flatfitkeys)): if self.flatkeyvalues[keyind] != self.datain.getheadval( self.flatfitkeys[keyind]): self.log.warn( 'New data has different FITS key value for keyword %s' % self.flatfitkeys[keyind]) # in the config file, set the 'intermediate' variable to either true or false to enable # saving of intermediate steps saveIntermediateSteps = self.config['biasdarkflat']['intermediate'] self.dataout = DataFits(config=self.datain.config) #convert self.datain to CCD Data object image = CCDData(self.datain.image, unit='adu') image.header = self.datain.header # subtract bias from image image = self.subtract_bias(image, self.bias) if (saveIntermediateSteps == "true"): self.dataout.imageset(image.data, imagename="BIAS") # self.dataout.setheadval('DATATYPE','IMAGE', dataname="BIAS") self.dataout.setheadval('HISTORY', 'BIAS: %s' % self.biasname, dataname="BIAS") # subtract dark from image image = self.subtract_dark(image, self.dark, scale=True, exposure_time='EXPTIME', exposure_unit=u.second) if (saveIntermediateSteps == "true"): self.dataout.imageset(image.data, imagename="DARK") # self.dataout.setheadval('DATATYPE','IMAGE', dataname="DARK") self.dataout.setheadval('HISTORY', 'BIAS: %s' % self.biasname, dataname="DARK") self.dataout.setheadval('HISTORY', 'DARK: %s' % self.darkname, dataname="DARK") # apply flat correction to image image = self.flat_correct(image, self.flat) # if separating bias,dark,flat steps , save the flat-corrected portion into its own hdu if (saveIntermediateSteps == "true"): self.dataout.imageset(image.data, imagename="FLAT") # self.dataout.setheadval('DATATYPE','IMAGE', dataname="FLAT") self.dataout.setheadval('HISTORY', 'BIAS: %s' % self.biasname, dataname="FLAT") self.dataout.setheadval('HISTORY', 'DARK: %s' % self.darkname, dataname="FLAT") self.dataout.setheadval('HISTORY', 'FLAT: %s' % self.flatname, dataname="FLAT") else: # copy calibrated image into self.dataout self.dataout.image = image.data self.dataout.header = self.datain.header ### Finish - cleanup # Update DATATYPE self.dataout.setheadval('DATATYPE', 'IMAGE') # Add bias, dark files to History self.dataout.setheadval('HISTORY', 'BIAS: %s' % self.biasname) self.dataout.setheadval('HISTORY', 'DARK: %s' % self.darkname) self.dataout.setheadval('HISTORY', 'FLAT: %s' % self.flatname) self.dataout.filename = self.datain.filename
from darepype.drp import DataFits from astropy.io import fits config = '/Users/josh/pipeline/pipeline/Developments/stepwebastrometry/pipeconf_stonedge_auto.txt' fp = '/Users/josh/Desktop/pipeline_test/data/M5_r-band_60s_bin2_200711_053415_itzamna_seo_0_RAW_TABLE.fits' fts = DataFits(config=config) fts.load(fp) # print(repr(fits.HDUList(file=fp))) # fts.header['RA'] = 0 # fts.header['Dec'] = 0 print(repr(fts.header)) print(repr(fts.image)) # print(repr(fts.table))
def run(self): self.dataout = DataFits(config=self.config) self.dataout.load(self.source_extract()) self.dataout.header['RA'] = self.datain.header['RA'] self.dataout.header['Dec'] = self.datain.header['Dec'] self.dataout.save()