def collapse_cube(w1, w2): """ Collapse a MUSE data cube. Arguments cube : MUSE data cube name containing both data and stat extensions. iext : Initial extension to be used. Default is one for combined cubes. """ fits = "slice_w{0}_{1}.fits".format(w1, w2) outfits = "collapsed_w{0}_{1}.fits".format(w1, w2) data = pf.getdata(fits, 0) error = pf.getdata(fits, 1) h = pf.getheader(fits, 0) h2 = pf.getheader(fits, 1) h["NAXIS"] = 2 del h["NAXIS3"] h2["NAXIS"] = 2 del h2["NAXIS3"] print "Starting collapsing process..." start = time.time() w = wavelength_array(fits) # newdata = np.trapz(data, dx=np.diff(w)[0], axis=0) # newdata = np.nansum(data, axis=0) * np.diff(w)[0] newdata = np.nanmedian(data, axis=0) noise = 1.482602 / np.sqrt(6.) * np.nanmedian(np.abs(2.* data - \ np.roll(data, 2, axis=0) - np.roll(data, -2, axis=0)), \ axis=0) end = time.time() print "Collapsing lasted {0} minutes.".format((end - start)/60.) hdu = pf.PrimaryHDU(newdata, h) hdu2 = pf.ImageHDU(noise, h2) hdulist = pf.HDUList([hdu, hdu2]) hdulist.writeto(outfits, clobber=True) return
def shiftRGB(redF,greenF,blueF,blueshiftr=0,blueshiftc=0,greenshiftr=0,greenshiftc=0,redshiftr=0,redshiftc=0,ext=None): """ this code shift the pixels of three r, g, b images. Using g image as reference and shift the other two images. It will return the shifted r,g,b images. each row goes along ra direction each col goes along dec direction CRVAL1 ; ra direction CRVAL2: dec direction """ blueHdr = pf.getheader(blueF,ext) greenHdr = pf.getheader(greenF,ext) redHdr = pf.getheader(redF,ext) bluerow = blueHdr['crval1']*3600./0.27 bluecol = blueHdr['crval2']*3600./0.27 greenrow = greenHdr['crval1']*3600./0.27 greencol = greenHdr['crval2']*3600./0.27 redrow = redHdr['crval1']*3600./0.27 redcol = redHdr['crval2']*3600./0.27 """ col0=int(blueHdr['datasec'].split('[')[1].split(']')[0].split(',')[0].split(':')[0])-1 col1=int(blueHdr['datasec'].split('[')[1].split(']')[0].split(',')[0].split(':')[1]) row0=int(blueHdr['datasec'].split('[')[1].split(']')[0].split(',')[1].split(':')[0])-1 row1=int(blueHdr['datasec'].split('[')[1].split(']')[0].split(',')[1].split(':')[1]) """ blue = pf.getdata(blueF,ext) green = pf.getdata(greenF,ext) red = pf.getdata(redF,ext) ctgreenrow = (bluerow+greenrow+redrow)/3. ctgreencol = (bluecol+greencol+redcol)/3. blue = nd.shift(blue,[bluerow - ctgreenrow+blueshiftr,bluecol-ctgreencol+blueshiftc],mode='nearest',order=1) green = nd.shift(green,[greenrow - ctgreenrow+greenshiftr,greencol-ctgreencol+greenshiftc],mode='nearest',order=1) red = nd.shift(red,[redrow - ctgreenrow+redshiftr, redcol-ctgreencol+redshiftc],mode='nearest',order=1) return red,green,blue
def do_test_3_fqpm_tilt(self): """ Test FQPM tilting (no FQPM yet), no field mask. Verify proper behavior in Lyot plane""" osys = poppy.OpticalSystem("test", oversample=self.oversample) osys.addPupil('Circle', radius=6.5/2) osys.addPupil('FQPM_FFT_aligner') osys.addImage() # perfect image plane osys.addPupil('FQPM_FFT_aligner', direction='backward') osys.addPupil('Circle', radius=6.5/2) osys.addDetector(pixelscale=self.pixelscale, fov_arcsec=3.0) #TODO testing of odd and even focal plane sizes? plt.clf() poppy._FLUXCHECK=True psf = osys.calcPSF(wavelength=self.wavelength, save_intermediates=True, display_intermediates=True) psf.writeto('test3a_psf.fits', clobber=True) # after the Lyot plane, the wavefront should be all real. check_wavefront('wavefront_plane_004.fits', test='is_real', comment='(Lyot Plane)') cen = webbpsf.measure_centroid('wavefront_plane_002.fits', boxsize=50) head = pyfits.getheader('wavefront_plane_002.fits') desired_pos = (head['NAXIS1']-1)/2.0 self.assertAlmostEqual( cen[0], desired_pos, delta=0.025) #within 1/50th of a pixel of desired pos? self.assertAlmostEqual( cen[1], desired_pos, delta=0.025) #within 1/50th of a pixel of desired pos? # This is likely dominated by uncertainties in the simple center measuring algorithm... _log.info("FQPM FFT half-pixel tilting is working properly in intermediate image plane") cen2 = webbpsf.measure_centroid('wavefront_plane_005.fits', boxsize=50) head2 = pyfits.getheader('wavefront_plane_005.fits') desired_pos2 = (head2['NAXIS1']-1)/2.0 self.assertAlmostEqual( cen2[0], desired_pos2, delta=0.05) #within 1/20th of a pixel of desired pos? _log.info("FQPM FFT half-pixel tilting is working properly in final image plane")
def imgray2fits(infile, fitsfile='', overwrite=False, headerfile=None, flip=False): if fitsfile == '': fitsfile = decapfile(infile) + '.fits' if exists(fitsfile): if overwrite: delfile(fitsfile) else: print fitsfile, 'EXISTS' sys.exit(1) data = loadgray(infile) # coeim.py #hdu = pyfits.PrimaryHDU() header = headerfile and pyfits.getheader(headerfile) hdu = pyfits.PrimaryHDU(None, header) hdulist = pyfits.HDUList([hdu]) hdulist.writeto(fitsfile) try: # If there's a 'SCI' extension, then that's where the WCS is header = pyfits.getheader(headerfile, 'SCI') except: pass if header <> None: if 'EXTNAME' in header.keys(): del(header['EXTNAME']) if flip: data = flipud(data) pyfits.append(fitsfile, data, header) print fitsfile, 'PRODUCED'
def use_correlation(self): """ Use correlation data-cube. """ import numpy from pyfits import getdata, getheader, writeto from glob import glob from os.path import splitext from sys import stdout self.print("\n A correlation cube will be used.") self.print(" Looking for an existing correlation data-cube in the current folder.") candidates = glob("*.fits") corr_cube = None for candidate in candidates: if 'CORRFROM' in getheader(candidate): if getheader(candidate)['CORRFROM'] == self.input_file: self.print(" Correlation cube to be used: %s" % candidate) return candidate if corr_cube == None: self.print(" Correlation cube not found. Creating a new one.") data = getdata(self.input_file) corr_cube = numpy.empty_like(data) x = numpy.arange(self.width) y = numpy.arange(self.height) X, Y = numpy.meshgrid(x, y) x, y = numpy.ravel(X), numpy.ravel(Y) for i in range(x.size): s = data[:,y[i],x[i]] s = s / s.max() # Normalize s = s - s.mean() # Remove mean to avoid triangular shape s = numpy.correlate(s, self.ref_s, mode='same') corr_cube[:,y[i],x[i]] = s temp = (((i + 1) * 100.00 / X.size)) stdout.write('\r %2d%% ' % temp) stdout.write(self.loading[int(temp * 10 % 5)]) stdout.flush() self.print(" Done.") corr_name = splitext(self.input_file)[0] + '--corrcube.fits' self.print(" Saving correlation cube to %s" % corr_name) corr_hdr = self.header.copy() corr_hdr.set('CORRFROM', self.input_file,'Cube used for corrcube.') corr_hdr.set('', '', before='CORRFROM') corr_hdr.set('', '--- Correlation cube ---', before='CORRFROM') writeto(corr_name, corr_cube, corr_hdr, clobber=True) del corr_hdr del corr_cube return corr_name
def test_pv_invert_many(find=True): import es_util from glob import glob sep = '-'*70 dir=os.path.expanduser("~/data/astrometry") imdir=os.path.join(dir,'image') plotdir=os.path.join(dir,'plots') testdir=os.path.join(dir,'test') x = numpy.array([21.34, 1000.0, 1500.17], dtype='f8') y = numpy.array([11.21, 1000.0, 1113.92], dtype='f8') pattern = os.path.join(imdir,'*') images=glob(pattern) hdr=pyfits.getheader(images[0]) n1=hdr['naxis1'] n2=hdr['naxis2'] xrang=numpy.array([1.0, n1], dtype='f8') yrang=numpy.array([1.0, n2], dtype='f8') #n=100 n=10 x,y = es_util.make_xy_grid(n, xrang, yrang) rms = [] xdiffs = [] ydiffs = [] for imname in images: sys.stdout.write('image: %s\n' % imname) sys.stdout.write(sep+'\n') hdr=pyfits.getheader(imname) wcs=wcsutil.WCS(hdr) sys.stdout.write('doing xforms\n') ra,dec = wcs.image2sky(x,y) xn,yn = wcs.sky2image(ra,dec, find=find) xdiff = xn-x ydiff = yn-y t=(xdiff)**2 + (ydiff)**2 trms = numpy.sqrt( t.sum()/t.size ) rms.append(trms) xdiffs.append(xdiff) ydiffs.append(ydiff) sys.stdout.write('rms: %s\n' % trms) rms = numpy.array(rms,dtype='f8') out={} out['rms'] = rms out['x'] = x out['y'] = y out['xdifflist'] = xdiffs out['ydifflist'] = ydiffs return out
def imsubtract( image1, image2, outfile=None, clobber=False, verbose=False, debug=False): """ Construct a simple subtraction: image2 - image1. Guards against different sized data arrays by assuming that the lower left pixel (0,0) is the anchor point. (i.e. the second image will be trimmed or extended if needed.) """ import os import pyfits from numpy import ndarray import exceptions if debug : import pdb; pdb.set_trace() if outfile : if os.path.isfile( outfile ) and not clobber : print("%s exists. Not clobbering."%outfile) return( outfile ) # read in the images if not os.path.isfile( image1 ) : raise exceptions.RuntimeError( "The image file %s is not valid."%image1 ) im1head = pyfits.getheader( image1 ) im1data = pyfits.getdata( image1 ) if not os.path.isfile( image2 ) : raise exceptions.RuntimeError( "The image file %s is not valid."%image2 ) im2head = pyfits.getheader( image2 ) im2data = pyfits.getdata( image2 ) # sometimes multidrizzle drops a pixel. Unpredictable. nx2,ny2 = im2data.shape nx1,ny1 = im1data.shape if nx2>nx1 or ny2>ny1 : im2data = im2data[:min(nx1,nx2),:min(ny1,ny2)] im1data = im1data[:min(nx1,nx2),:min(ny1,ny2)] elif nx2<nx1 or ny2<ny1 : im1data = im1data[:min(nx1,nx2),:min(ny1,ny2)] im2data = im2data[:min(nx1,nx2),:min(ny1,ny2)] diffim = im2data - im1data if not outfile : return( diffim ) else : im2head.update("SRCIM1",image1,"First source image = template for subtraction") im2head.update("SRCIM2",image2,"Second source image = search epoch image") outdir = os.path.split( outfile )[0] if outdir and not os.path.isdir(outdir): os.makedirs( outdir ) pyfits.writeto( outfile, diffim, header=im2head, clobber=clobber ) return( outfile )
def time_steps(path, num_exp): import pyfits as pf label = [0] * num_exp label[0] = '0 minutes' for i in range(num_exp - 1): label[i + 1] = str(round((pf.getheader(path, 4 + i + 1)['tai-beg'] - pf.getheader(path, 4)['tai-beg']) / 60)) + ' minutes' return label
def data_collect(file_directory, file_prefix, file_postfix, first_image, num_images, num_parameters): # Extracts data and header from first image block, gets dimensions image block # Determines the number of frames total for the file range x = p.getdata(file_directory + file_prefix + str(first_image) + file_postfix) h = p.getheader(file_directory + file_prefix + str(first_image) + file_postfix) cube_shape = x.shape num_frames = num_images * cube_shape[0] # Preallocates the 3d data cube data_cube = np.zeros((num_frames, cube_shape[1], cube_shape[2])) # Preallocates the 2D parameter array: (number of frames, number of # parameters) photometry = [None]*((num_images) * cube_shape[0]) for i in np.arange(num_frames): photometry[i] = [None] * num_parameters # Loop for each image cube, extracts frame data to data cube and updates # photometry parameters. image_num = 0 for image in np.arange(num_images): data = p.getdata(file_directory + file_prefix + str(first_image + image) + file_postfix) hdr = p.getheader(file_directory + file_prefix + str(first_image + image) + file_postfix) # Reads in time data from header time = hdr['UTCS_OBS'] interval = hdr['FRAMTIME'] for i in np.arange(cube_shape[0]): # Reads in each frame in the image to the data cube data_cube[image_num + i] = data[i] # Writes image number to photometry parameter 0 for each frame photometry[image_num + i][0] = first_image + image_num # Writes time the frame was taken to photometry parameter 1 photometry[image_num + i][1] = (interval * i) + time image_num += cube_shape[0] return(photometry, data_cube, num_frames, cube_shape)
def imarith(file1, operator, file2, output, verbose=True, keep=1): """ @param file1: Input fits image name 1. @param operator: Operator ['+','-','*','/']. @param file2: Input fits image name 2. @param output: Output image. @keyword verbose: Turn verbose on? [True]/False. @keyword keep: Choose whether keep header from [file1] or file2. """ from pyfits import getdata, getheader, writeto; from time import strftime; # Reading data data1 = get_fits_data(file1); data2 = get_fits_data(file2); header = getheader(file2) if keep is 2 else getheader(file1); # Applying operation if verbose: print(" %s %s %s = %s" % (file1, operator, file2, output)); if (data1.ndim is 3) and (data2.ndim is 2): data2 = data2.reshape((1, data2.shape[0], data2.shape[1])); data2 = data2.repeat(data1.shape[0], axis=0); if (data1.ndim is 3) and (data2.ndim is 1): data2 = data2.reshape((data2.shape[0], 1, 1)); data2 = data2.repeat(data1.shape[1], axis=1); data2 = data2.repeat(data1.shape[2], axis=2); assert data1.shape == data2.shape; if operator is '+': data1 = data1 + data2; elif operator is '-': data1 = data1 - data2; elif operator is '*': data1 = data1 * data2; elif operator is '/': data1 = data1 / data2; else: raise (ValueError, "Invalid operator."); # Updating header try: header.set('', ''); except AttributeError: from sys import exit; from pyfits import __version__ print(" Sorry, your PyFits version %s is not supported."% __version__); print(" Please, download PyFits v3.0 or superior." ); print(" Leaving now.\n " ); exit(); header.set('HISTORY', 'imarith applied on %s' % strftime('%Y-%m-%d %H:%M:%S %Z')); header.set('HISTORY', file1, 'First file used on imarith.py.'); header.set('HISTORY', operator, 'Operator used on imarith.py.'); header.set('HISTORY', file2, 'Second file used on imarith.py.'); # Writing output output = safesave(output); writeto(output, data1, header); return None;
def __init__(self, im, spec, verbose=False, instrument="hydra"): self.image = im self.id = im.replace('.fits', '') self.spec = spec self.verbose = verbose self.data = pf.getdata(spec) self.header = pf.getheader(spec) self.imheader = pf.getheader(im) self.shape_info() self.check_fields() if instrument == "hydra": self.reverse_spec() return
def get_spec_parameters(spec_file): ''' This function reads in a spectrum and the primary and first extension headers Input: spec_file: the name of the file Output: spec: binary fits table hdr0: primary header hdr1: first extension header ''' spec = pyfits.getdata(spec_file, 1) hdr0 = pyfits.getheader(spec_file, 0) hdr1 = pyfits.getheader(spec_file, 1) return spec,hdr0, hdr1
def get_sismo_data(ID): """ Retrieve CoRoT timeseries from a local data repository. The output record array has fields 'HJD', 'flux', 'e_flux', 'flag'. @param ID: ID of the target: either an integer (CoRoT ID), an SIMBAD-recognised target name, or a valid CoRoT FITS file @type ID: int or str @return: data, header @rtype: numpy recarray, dict """ #-- data on one target can be spread over multiple files: collect the # data data = [] if isinstance(ID,str) and os.path.isfile(ID): header = pyfits.getheader(ID) times,flux,error,flags = fits.read_corot(ID) data.append([times,flux,error,flags]) else: #-- resolve the target's name: it's either a target name or CoRoT ID. try: ID = int(ID) except ValueError: info = sesame.search(ID,db='S') IDs = [alias for alias in info['alias'] if 'HD' in alias] if len(IDs)!=1: logger.error("Data retrieval for %s not possible. Reason: no HD number resolved" % (ID)) return ID = IDs[0] #-- collect the files containing data on the target catfiles = config.glob((os.sep).join(['catalogs','corot','sismo']),'*.fits') for catfile in catfiles: try: header = pyfits.getheader(catfile) except IOError: continue if header['starname']==ID or header['corotid'].replace(' ','')=='%s'%(ID): times,flux,error,flags = fits.read_corot(catfile) data.append([times,flux,error,flags]) #-- now make a record array and sort according to times if not data: raise ValueError('target {0} not in offline CoRoT data repository'.format(ID)) data = np.hstack(data) data = np.rec.fromarrays(data,dtype=[('HJD','>f8'),('flux','>f8'),('e_flux','>f8'),('flag','i')]) sa = np.argsort(data['HJD']) return data[sa],header
def fitstoarrays(ffile,fmask): fitsfile = pyfits.open(ffile) data = fitsfile[0].data header = pyfits.getheader(ffile) naxis1 = header['naxis1'] naxis2 = header['naxis2'] cdelt1 = header['cdelt1'] cdelt2 = header['cdelt2'] crpix1 = header['crpix1'] crpix2 = header['crpix2'] crval1 = header['crval1'] crval2 = header['crval2'] X = zeros(data.shape) Y = zeros(data.shape) for j in range(data.shape[0]): for i in range(data.shape[1]): X[j,i] = (1+i)*cdelt1 Y[j,i] = (1+j)*cdelt2 maskfile = pyfits.open(fmask) datam = maskfile[0].data mask = datam!=0 #Z = (X**2+Y**2) return X[mask],Y[mask],data[mask]
def photo_calib(date, field): stfDir = 'starfinder/' files = glob.glob('{0}/S{1}*_{2}_cr_0.9_stf.lis'.format(stfDir, date, field)) # Calibrate flags flagStr = '-f 1 -c 13 -s 2 ' flagStr += '-N ngc1815_photo_calib.dat -M 1 -R ' for _file in files: # Fetch the name of the PSF star cooFile = _file.replace('_0.9_stf.lis', '.coo') _coo = open(cooFile, 'r') cooLine = _coo.readline() psfStar = cooLine.split()[-1] flagStrNow = flagStr + '-I ' + psfStar # Fetch the angle of the image. fitsFile = _file.replace('_0.9_stf.lis', '.fits') hdr = pyfits.getheader(fitsFile) angle = hdr['PA'] angle = 360 - angle flagStrNow += ' -T {0:.2f}'.format(angle) flagStrNow += ' ' + _file calibrate.main(argv=flagStrNow.split())
def getvsopname(gemfile, instrument, path): # Defaults validInstruments = ('GMOSN', 'GMOSS') instDict = {'N': 'GMOSN', 'S': 'GMOSS'} # Verify instrument if instrument == None: if matchInst.match(gemfile) == None: errmsg = 'Invalid Gemini frame name, \''+gemfile+'\'' raise IOError, errmsg instpref = matchInst.sub(r"\1",gemfile) instrument = instDict[instpref] elif instrument not in validInstruments: print '\nUSAGE ERROR: Invalid value for instrument (%s)' % instrument p.print_help() raise SystemExit phu = pyfits.getheader(path+'/'+gemfile, 0) dateobs = phu['DATE-OBS'] timeobs = phu['TIME-OBS'] (year,month,date) = map(string.atoi, dateobs.split('-')) d = datetime.date(year,month,date) (hour,minutes,seconds) = map(string.atof, timeobs.split(':')) microseconds = round((seconds - int(seconds)) * 1e6) if int(microseconds) == 0: microseconds += 1 t = datetime.time(int(hour),int(minutes),int(seconds),int(microseconds)) dt = datetime.datetime.combine(d,t) return instrument+'.'+dt.isoformat()[:-3]+'_s1d.fits'
def get_data(KIC): """ Retrieve Kepler timeseries from a remote data repository. Fields are 'HJD','flux','e_flux','bkg','quarter'. @param KIC: kic number or list of filenames @type KIC: integer or list @return: data, header @rtype: recarray, dict """ times = [] flux = [] e_flux = [] background = [] quarter = [] if isinstance(KIC,str) or isinstance(KIC,int): filenames = download_light_curve(KIC) else: filenames = KIC for filename in filenames: header = pyfits.getheader(filename) data = fits.read2recarray(filename,ext='LIGHTCURVE') times.append(data['TIME']+2454833.) flux.append(data['SAP_FLUX']) e_flux.append(data['SAP_FLUX_ERR']) background.append(data['SAP_BKG']) quarter.append(np.ones(len(data))*header['quarter']) data = np.rec.fromarrays([np.hstack(times),np.hstack(flux),np.hstack(e_flux), np.hstack(background),np.hstack(quarter)], names=['HJD','flux','e_flux','bkg','quarter']) return data,header
def wavelength_to_pixel(fits, sample_ext, sample_wl): slit_header = pyfits.getheader(fits, sample_ext) crpix1 = slit_header['CRPIX1'] cd1_1 = slit_header['CD1_1'] crval1 = slit_header['CRVAL1'] sample_pixel = crpix1 + ((sample_wl - crval1) / cd1_1) return sample_pixel
def imcopy(infile, outfile, dim = None): print >> sys.stdout, 'Copying ', infile, ' ----> ', outfile if len(outfile.split('[')) == 1: subprocess.call('cp ' + infile + ' ' + outfile, shell = True) else: if not dim: print >> sys.stderr, 'Error : for image section copying, dim parameter cannot be None. Exiting.' sys.exit(-1) header = pyfits.getheader(infile) output = numpy.zeros((dim, dim), dtype = numpy.float32) try: f1 = pyfits.open(infile) except: print >> sys.stderr, 'Error : Not able to open ', infile, '. Exiting.' sys.exit(-1) x1, x2 = int(outfile.split('[')[1].replace(']', '').split(',')[0].split(':')[0] ), int(outfile.split('[')[1].replace(']', '').split(',')[0].split(':')[1]) y1, y2 = int(outfile.split('[')[1].replace(']', '').split(',')[1].split(':')[0] ), int(outfile.split('[')[1].replace(']', '').split(',')[1].split(':')[1]) output[x1:x2, y1:y2] = f1[0].data outfile = outfile.split('[')[0] subprocess.call('rm -f ' + outfile, shell = True) pyfits.writeto(outfile, output, header = header) return outfile
def generate_display_id( dataset, version ): ''' Generate an ID from which all similar stackable data will have in common. @param dataset: Input AstroData or fits filename @type dataset: list of AstroData instance @param version: The version from which to run. @type version: string @return: A display id. @rtype: string ''' if version != version_index['display_id']: try: # designed to call generateStackableID_ idFunc = getattr( globals()['IDFactory'], 'generateDisplayID_' + version ) except: raise "Version: '" + version + "' is either invalid or not supported." return idFunc( inputf, version ) """ shaObj = hashlib.sha1() phu = pf.getheader( inputf[0], 0 ) shaObj.update( phu['OBSID'] ) shaObj.update( phu['OBJECT'] ) """ if type(dataset) == str: phu = pf.getheader(dataset) ID = version + "_" + phu['OBSID'] + "_" + phu['OBJECT'] elif type(dataset) == AstroData: ID = version + "_" + dataset.phuValue('OBSID') + "_" + dataset.phuValue('OBJECT') return ID
def make_wifes_p08_template(ddir, fn, out_dir, star,rv=0.0): """From a p08 file, create a template spectrum for future cross-correlation. The template is interpolated onto a 0.1 Angstrom grid (to match higher resolution templates. Parameters ---------- ddir: string Data directory for the p08 file fn: string p08 fits filename out_dir: string Output directory """ flux_stamp,wave = read_and_find_star_p08(ddir + '/' + fn) heliocentric_correction = pyfits.getheader(ddir + '/' + fn)['RADVEL'] spectrum,sig = weighted_extract_spectrum(flux_stamp) dell_template = 0.1 wave_template=np.arange(90000)*dell_template + 3000 spectrum_interp = np.interp(wave_template,wave*(1 - (rv - heliocentric_correction)/2.998e5),spectrum) outfn = out_dir + '/' + star + ':' + fn pyfits.writeto(outfn,spectrum_interp,clobber=True)
def guide_star_info(): """ Get the guide star information out of the image headers. """ imgDir = ngc1851_data + 'starfinder/' imgs = glob.glob(imgDir + '*_G1_1_cr.fits') # Keep results in an output file f_out = open(ngc1851_data + 'guide_star_info.txt', 'w') f_fmt = '{0:8s} {1:15s} ' for img in imgs: hdr = pyfits.getheader(img) f_out.write('{0:20s} '.format(os.path.basename(img))) # There are 4 possible guide stars. Try to print them all out for ii in range(4): try: cfg = 'GWFS{0}CFG'.format(ii+1) obj = 'GWFS{0}OBJ'.format(ii+1) f_out.write(f_fmt.format(hdr[cfg], hdr[obj])) except KeyError: f_out.write(f_fmt.format('-', '-')) f_out.write('\n') f_out.close()
def rdpsfmodel(psfmodelfile): """ Read in a psf model from a fits file. Gaussian parameters are in the header, and the image array has a lookup table of non-gaussian components sub-sampled to a half-pixel grid. If the user provides a 2-tuple instead of a filename, then we presume the user already has the psf model components, so we just return it back. :param psfmodelfile: a fits file containing the psf model :return: [gaussparam, lookuptable] """ psfmodelfile = psfmodelfile if isinstance(psfmodelfile, str): assert os.path.isfile( os.path.abspath(os.path.expanduser(psfmodelfile))) # read in the psf non-gaussian components array (i.e. the lookup table) lookuptable = pyfits.getdata(psfmodelfile) # read in the gaussian parameters from the image header hdr = pyfits.getheader(psfmodelfile) scale = hdr['GAUSS1'] # , 'Gaussian Scale Factor' xpsf = hdr['GAUSS2'] # , 'Gaussian X Position' ypsf = hdr['GAUSS3'] # , 'Gaussian Y Position' xsigma = hdr['GAUSS4'] # , 'Gaussian Sigma: X Direction' ysigma = hdr['GAUSS5'] # , 'Gaussian Sigma: Y Direction' psfmag = hdr['PSFMAG'] # , 'aperture magnitude of the PSF star psfzpt = hdr['PSFZPT'] # , 'zeropoint used to set PSF star mag scaling gaussparam = [scale, xpsf, ypsf, xsigma, ysigma] elif np.iterable(psfmodelfile): assert len(psfmodelfile) == 4 gaussparam, lookuptable, psfmag, psfzpt = psfmodelfile else: raise exceptions.RuntimeError( "psfmodel must either be a filename or a 4-tuple giving:" "[gaussian parameters, look-up table, psf mag, zpt]") return gaussparam, lookuptable, psfmag, psfzpt
def generate_input_image_list(filter, grism): """ Format : [name of grism image] [object catalogue] [name of direct image] :todo: this is not robust and may find wrong images!!! Should be redone! """ gr = {} im = {} dr_image = {} #find all grism images flts = g.glob('./save/*_flt.fits') for f in flts: hdr = PF.getheader(f, 0) if grism in hdr['FILTER']: gr[f] = hdr['DATE-OBS'] else: im[f] = hdr['DATE-OBS'] #find the closest direct image for gt in gr: for i in im: if gr[gt] == im[i]: #match dr_image[gt] = i out = open(filter + '.lis', 'w') for gt in gr: line = gt[7:] + ' ' + gt.replace('.fits', '_1.cat')[7:] + ' ' line += dr_image[gt][7:] + '\n' out.write(line) out.close()
def PlotTSmap(self) : """ Gather the results of the evaluation of each pixel and fill a fits file""" folder = self.config['out'] # Read the cmap produced before to get the grid for the TS map FitRunner = Observation(folder, self.config) try : header = pyfits.getheader(FitRunner.cmapfile) except : logging.error('Count map not found.') sys.exit(1) data = pyfits.getdata(FitRunner.cmapfile)*0. npix_im = min(header['NAXIS1'],header['NAXIS2']) npix = min(self.config['TSMap']['npix'],npix_im) Xref = header['CRPIX1'] Yref = header['CRPIX2'] binsz = header['CDELT1'] import string # read the results for i in xrange(npix): for j in xrange(npix): try : lines = open(self._PixelFile(i,j),"r").readlines() Value = float(string.split(lines[0])[2]) except : print "Cannot find, open or read ",self._PixelFile(i,j) Value = 0. data[Xref+ (i-npix/2.)][Yref+ (j-npix/2.)] = Value # save in a fits files pyfits.writeto(folder+"/"+self.TSfits,data,header) print "TS Map saved in "+folder+"/"+self.TSfits
def setkeywords(self): """ Set FITS image header keyword parameters. Parameters ---------- Returns ------- None """ header = pyfits.getheader(self.sci_file, ignore_missing_end = True) self.nx = header["NAXIS1"] self.ny = header["NAXIS2"] self.nframes = header["NAXIS3"] self.exptime = header["EXPTIME"] self.kintime = header["KINCYCTI"] self.sn = header["SERIALN"].split("=")[1].strip() self.amptype = header["AMPTYPE"].split()[0] self.emgain = header["EMGAIN"] self.hreadout = header["HREADOUT"].strip() self.preampg = header["PREAMPG"].strip() utcstart = header["UTCSTART"] self.utcstart = self.parser(utcstart) return
def get_star_speclist(star=None,abeid=None,normspec=False): import glob from pyfits import getheader import numpy as np from astropy.io import ascii startable=ascii.read(starcatalog) abeID=np.array(startable['ID']) tmassID=np.array(startable['2MASS']) if normspec is False: gdir=specdir if normspec is True: gdir=normspecdir allspectra=np.array(glob.glob(gdir+'*apV*fits')) nspec=len(allspectra) stars_all=[]; for i in range(nspec): head=getheader(allspectra[i],0) stars_all.append(head['objid']) if abeid is None: gd=np.where(stars_all==star) spectra=allspectra[gd] else: p=np.where(abeID==abeid) tm=tmassID[p] gd=np.where(stars_all==tm) spectra=allspectra[gd] return spectra
def Lee_cubo(spectra,XX,YY): global imagen imagen=pyfits.getdata(spectra,header=False) header=pyfits.getheader(spectra) #print len(imagen) #empty array Lambda_t=[] Flux_t=[] for i in range (len(imagen)): y=imagen[i][XX][YY] # x=i*header['CDELT1']+header['CRVAL1'] x=i*header['CD3_3']+header['CRVAL3'] Lambda_t.append(float(imagen[i][XX][YY])) #Flux_t.append(float(i*header['CDELT1']+header['CRVAL1'])) Flux_t.append(float(i*header['CD3_3']+header['CRVAL3'])) #print x,y Flux=np.array(Lambda_t) Lambda=np.array(Flux_t) x=Lambda y=Flux return x,y
def maskFits(regFile, imageFile): f1 = open(regFile,'r') reg = f1.read() hdulist = pyfits.open(imageFile) h = pyfits.getheader(imageFile) hdr = h.copy() matrix =hdulist[0].data r = pyregion.parse(reg) Xrange = matrix.shape[0] Yrange = matrix.shape[1] mask = r.get_mask(hdu=hdulist[0]) new_matrix = np.zeros((Xrange,Yrange)) mask_matrix = np.ones((Xrange,Yrange)) for i in range(Xrange): for j in range(Yrange): if(mask[i,j]!=True): mask_matrix[i,j] = 0 new_matrix[i,j]=matrix[i,j] # write out the new fits file. imageName = imageFile.split('.') outName = imageName[0] + "_masked." +imageName[1] hdu=fits.PrimaryHDU(new_matrix) hdu.writeto(outName,clobber='true') hdu_mask=fits.PrimaryHDU(mask_matrix) hdu_mask.writeto("mask.fits",clobber='true') f1.close() hdulist.close()
def image_info(): """ Get the exposure time and filter info for each image. Store it in an output file called image_info.txt. """ imgDir = ngc1851_data + 'starfinder/' imgs = glob.glob(imgDir + '*_G1_1_cr.fits') # Keep results in an output file f_out = open(ngc1851_data + 'image_info.txt', 'w') f_fmt = '{img:15s} {xoff:7.2f} {yoff:7.2f} {pa:5.1f} {filt:12s} ' f_fmt += '{exp:6.2f} {coad:2d} {tot:7.2f}\n' for img in imgs: hdr = pyfits.getheader(img) xoff = hdr['XOFFSET'] yoff = hdr['YOFFSET'] pa = hdr['PA'] filt = hdr['FILTER1'] exp = hdr['EXPTIME'] coadd = hdr['COADDS'] f_out.write(f_fmt.format(img=os.path.basename(img), xoff=xoff, yoff=yoff, pa=pa, filt=filt, exp=exp, coad=coadd, tot=exp*coadd)) f_out.close()
def mkFakeCoordFile(imfile, coofile=None): """ extract x,y coordinates of fake SNe from the imfile header """ import pyfits if not coofile: coofile = imfile.replace('.fits', '.coo') fout = open(coofile, 'w') hdr = pyfits.getheader(imfile) Nfake = hdr['NFAKESNE'] for i in range(Nfake): x = hdr['FAKE%03iX' % i] y = hdr['FAKE%03iY' % i] print >> fout, '%12.3f %12.3f' % (x, y) fout.close() return (coofile)
def makeCorrection(filename): base, ext = os.path.splitext(filename) header = pyfits.getheader(filename) xsize = header['NAXIS1'] ysize = header['NAXIS2'] model = illummodels.findChipModel(filename) correction = interpolateModel(model, xrange(xsize), xrange(ysize)) if correction.dtype != float32: hdu = pyfits.PrimaryHDU(correction.astype(float32)) else: hdu = pyfits.PrimaryHDU(correction) hdu.writeto('%s.illumcor.fits' % base, clobber=True)
def get_lc(files, key, limit=False): times = [] values = [] i = 0 for file in files: print("Opening file " + file + " (" + str(i) + "/" + str(len(files)) + ")") header = pyfits.getheader(file) date = datetime.strptime(header.get('DATE_OBS'), '%a %b %d %H:%M:%S %Y') + timedelta( seconds=header.get('TIME-FRACTION') / 1e9) times.append(date) values.append(header.get(key)) i += 1 if type(limit) == type(1): if i >= limit: break lc = pandas.Series(values, times) return lc
def total_expTime(fitsFiles, verbose=True): """ Calculate total exposure time for a group of images """ expTime = 0 for ii in range(len(fitsFiles)): hdr = pyfits.getheader(fitsFiles[ii]) if verbose: print('{0}: Exposure Time = {1} s'.format(fitsFiles[ii], hdr['EXPTIME'])) expTime += hdr['EXPTIME'] print('**** Total Exposure Time: {0} ****'.format(expTime)) return
def getFlux(f): hdr = pyfits.getheader(f) nreads=hdr.get('NFRAMES') imtype=hdr.get('IMAGETYP') if imtype not in ["QuartzFlat","InternalFlat","DomeFlat"]: return " - " dat = pyfits.getdata(f,0)/nreads dat=numpy.array(dat) y=1024; x= 1024 dat=dat[(y-200):(y+200),(x-100):(x+100)] med=numpy.mean(dat) if imtype=="QuartzFlat": nrm=167.05 elif imtype=="InternalFlat": nrm=94.05 elif imtype=="DomeFlat": nrm=81.525 else: nrm=None if nrm != None: return "%3i" % (med/nrm*100) else: return " ? "
def perform_metadata_tasks(self, fullname, do_update, update_info): """ Read metadata from file, updating file values Parameters ---------- fullname : str The name of the file to gather data from do_update : bool Whether to update the metadata of the file from update_info update_info : dict The data to update the header with Returns ------- dict containing the metadata """ if miscutils.fwdebug_check(3, 'FTMGMT_DEBUG'): miscutils.fwdebug_print("INFO: beg") # open file #hdulist = pyfits.open(fullname, 'update') primary_hdr = pyfits.getheader(fullname, 0) prihdu = pyfits.PrimaryHDU(header=primary_hdr) hdulist = pyfits.HDUList([prihdu]) # read metadata and call any special calc functions metadata, _ = self._gather_metadata_file(fullname, hdulist=hdulist) if miscutils.fwdebug_check(6, 'FTMGMT_DEBUG'): miscutils.fwdebug_print("INFO: file=%s" % (fullname)) # call function to update headers if do_update: miscutils.fwdebug_print( "WARN: cannot update a raw file's metadata") # close file hdulist.close() if miscutils.fwdebug_check(3, 'FTMGMT_DEBUG'): miscutils.fwdebug_print("INFO: end") return metadata
def nirc2log(directory): """Make an electronic NIRC2 log for all files in the specified directory. Output is a file called nirc2.log.""" if not os.access(directory, os.F_OK): print 'Cannot access directory ' + directory files = glob.glob(directory + '/*.fits') files.sort() f = open(directory + '/nirc2.log', 'w') for file in files: hdr = pyfits.getheader(file, ignore_missing_end=True) # First column is frame number frame = (hdr['filename'].strip())[0:5] f.write('%5s ' % frame) # Second column is object name f.write('%-16s ' % hdr['object'].replace(' ', '')) # Next is integration time, coadds, sampmode, multisam f.write('%8.3f %3d ' % (hdr['itime'], hdr['coadds'])) f.write('%1d x %2d ' % (hdr['sampmode'], hdr['multisam'])) # Filter filter1 = hdr['fwiname'] filter2 = hdr['fwoname'] filter = filter1 if (filter1.startswith('PK')): filter = filter2 f.write('%-10s ' % filter) # Camera name f.write('%-6s ' % hdr['camname']) # Shutter state f.write('%-6s ' % hdr['shrname']) # End of this line f.write('\n') f.close()
def loadFitsHeader(self, filename, extension=0, removeEmpty=0): """ Loads the header information from a Fits file Parameters --------------- filename : string Filename of the Fits file from which the header should be loaded. The full path to the file can be given. extension : integer, optional Extenstion of the Fits file from the header shall be read removeEmpty : integer (0 or 1), optional Removes empty entries from the header if set to 1. """ self._header = pyfits.getheader(filename, ext=extension) self._cardlist = self._header.ascardlist() self._origin = filename if removeEmpty == 1: self.removeHdrEntries()
def recordDir(dir): '''record information for all fits files of images in the given directory and subdirectories''' logger.info("Recording observations in "+dir) obsDB.login() for root, dirs, files in os.walk(dir): logger.info("root = "+root) for f in files: fullPath = os.path.join(root,f) if isFits(fullPath): #logger.info("Attempting to record observation for "+f) try: header = pyfits.getheader(fullPath) if frameTypes.getFrameType(header) != 'object': continue recordObservation(header,fullPath) except Exception as e: logger.error(traceback.format_exc()) break #keep going and record everything else
def chi_tiptilttest(seqnums): print "seqnums: " + str(seqnums) ttmode = np.zeros(len(seqnums)) emavgcts = np.zeros(len(seqnums)) date = '130921' fdir = '/raw/mir7/' + date + '/' for i in range(len(seqnums)): hd = pf.getheader(fdir + 'chi' + date + '.' + str(seqnums[i]) + '.fits') ttcom = hd['comment', 2] if ttcom.split(' ')[2] == 'OFF': ttmode[i] = 0 elif ttcom.split(' ')[2] == 'ON': ttmode[i] = 1 else: ttmode[i] = -1 emavgcts[i] = hd['EMAVG'] print str(seqnums[i]) + ' ' + str(emavgcts[i]) return emavgcts, ttmode
def process_output(imgfile,catfile,clean=False): from scipy import ndimage import numpy as np # ---------- # Catalog catalog_file = catfile os.system("sed -e 's/^[ ]*//' %s | tr -s ' ' > %s" % ('data/'+catfile,catalog_file)) columns = ['X','Y','ecc','lambda1','lambda2','A','B','theta','size'] Dcatin = ascii_data.dict_from_csv(catalog_file,columns,header_lines=10,delimiter=' ') # Lenzen outputs the 'Y' positions inverted. Lets fix that: hdr = pyfits.getheader('data/'+imgfile) y_size = hdr['naxis1'] x_size = hdr['naxis2'] Y_inv = Dcatin['Y'] finalimg = re.sub("_out_img.fits","_out_result.fits",imgfile) if len(Y_inv): Dcatin['Y'] = [ str(y_size-int(i)) for i in Y_inv ] Catin = fits_data.dict_to_tbHDU(Dcatin) # Select entries to output arc data Catout = fits_data.sample_entries(Catin,ecc=0.7) Catout.name = "Lenzen_arcsfound" finalcat = catalog_file[:-4]+'.fits' Catout.writeto(finalcat,clobber=True) # Image img = ndimage.imread('data/result.ppm') comb = np.maximum(img[...,0],img[...,1]) diff = img[...,2] - comb pyfits.writeto(finalimg,diff[::-1],clobber=True) else: blank_array = np.zeros((y_size,x_size)) pyfits.writeto(finalimg,blank_array,clobber=True) if clean: os.system('rm -rf temp/ %s %s' % (imgfile,catfile)); return
def read(cls, fname, sid, **kwargs): ftype = 'sap_flux' if kwargs.get( 'type', 'sap').lower() == 'sap' else 'pdcsap_flux' try: epic = int(re.findall('ktwo([0-9]+)-c', basename(fname))[0]) except: epic = int(re.findall('C([0-9]+)_smear', basename(fname))[0][2:]) # for smear data = pf.getdata(fname, 1) head = pf.getheader(fname, 0) return K2Data(epic, time=data['time'], cadence=data['cadenceno'], quality=data['sap_quality'], fluxes=data[ftype], errors=data[ftype + '_err'], x=data['pos_corr1'], y=data['pos_corr2'], sap_header=head)
def get_header_info(file): # Gets the appropriate header info for image file header = pyfits.getheader(file) GAIN = header['GAIN'] if header.has_key('CDELT1'): CDELT1 = header['CDELT1'] CDELT2 = header['CDELT2'] else: CDELT1 = header['CD1_1'] CDELT2 = header['CD2_2'] EXPTIME = header['EXPTIME'] PIXSCALE = (abs(float(CDELT1)) + abs(float(CDELT2))) / (2.0) * 3600.0 return GAIN, PIXSCALE, EXPTIME
def raw_view(request, filename='', channel=0, size=0, processed=False, type='jpeg'): base = fix_remote_path(settings.BASE_RAW, channel_id=channel) fullname = posixpath.join(base, filename) image = pyfits.getdata(fullname, -1) header = pyfits.getheader(fullname, -1) time = postprocess.get_time_from_filename(posixpath.split(filename)[-1]) if processed: darkname = find_image(time, 'dark', channel) darkname = fix_remote_path(darkname, channel) if posixpath.exists(darkname): dark = pyfits.getdata(darkname, -1) image -= dark if type == 'jpeg': img = fitsimage.FitsImageFromData(image, image.shape[1], image.shape[0], contrast="percentile", contrast_opts={'max_percent': 99.9}, scale="linear") if size: img.thumbnail((size, size)) #, resample=fitsimage.Image.ANTIALIAS) # now what? response = HttpResponse(content_type="image/jpeg") img.save(response, "JPEG", quality=95) elif type == 'fits': response = HttpResponse(FileWrapper( file(posixpath.join(base, filename))), content_type='application/octet-stream') response[ 'Content-Disposition'] = 'attachment; filename=' + os.path.split( filename)[-1] response['Content-Length'] = os.path.getsize( posixpath.join(base, filename)) return response
def extractBiasInfo(file): """ extreact CCD infromation from a fits file Input: file --- fits file name Output: feb_id --- FEB ID datamode --- DATAMODE start_row --- STARTROW row_cnt --- ROWCNT orc_mode --- ORC_MODE deagain --- DEAGAIN biasalg --- BIASALG biasarg# --- BIASARG# #: 0 - 3 overclock_# --- INITOCL# #: A, B, C, D """ # #--- read fits file header # # try: hdr = pyfits.getheader(file) fep_id = hdr['FEP_ID'] datamode = hdr['DATAMODE'] start_row = hdr['STARTROW'] row_cnt = hdr['ROWCNT'] orc_mode = hdr['ORC_MODE'] deagain = hdr['DEAGAIN'] biasalg = hdr['BIASALG'] biasarg0 = hdr['BIASARG0'] biasarg1 = hdr['BIASARG1'] biasarg2 = hdr['BIASARG2'] biasarg3 = hdr['BIASARG3'] overclock_a = hdr['INITOCLA'] overclock_b = hdr['INITOCLB'] overclock_c = hdr['INITOCLC'] overclock_d = hdr['INITOCLD'] return [ fep_id, datamode, start_row, row_cnt, orc_mode, deagain, biasalg, biasarg0, biasarg1, biasarg2, biasarg3, overclock_a, overclock_b, overclock_c, overclock_d ]
def draw(*args): from full_func import full from deredshift import deredshift from time_steps import time_steps import matplotlib.pyplot as plt import matplotlib.cm as cmap import pyfits as pf # get the path of the fits file if type(args[0]) != str: plate = str(args[0]).zfill(4) mjd = args[1] fiber = str(args[2]).zfill(4) path_fits = '/Users/sarelg/Documents/NLR/new_fits/sdss/files/spec-%s-%d-%s.fits' % ( plate, mjd, fiber) else: path_fits = '/Users/sarelg/gastro/sdss/files/%s' % args[0] # get the number of exposures num_exp = int((pf.getheader(path_fits, 0)['nexp']) / 2) # create the data, a list of all the sub-exposures data = [0] * num_exp for i in range(num_exp): x, y = full(i + 4, i + 4 + num_exp, path_fits) x = deredshift(path_fits, x) tup = x, y data[i] = tup # plot all of the exposures using a different color for each one colorm = cmap.get_cmap('autumn') cm_num = int(256 / num_exp) label = time_steps(path_fits, num_exp) for i in range(len(data)): plt.step(data[i][0], data[i][1], color=colorm(i * cm_num), label=label[i], linewidth=0.5) plt.legend(prop={'size': 11}) plt.show()
def write_to_cal_log(master_frame_path, frame_list): ####################################################################################### # When called with the pathname of a master calibration frame and a Python list of frames # from which the master frame was made, write_to_cal_log records the log information # about the creation of the master frame in an already existing calibration log. The # log records the name of the master calibration frame, its path, the time of creation, # and the names of the frames from which it was generated. master_frame = os.path.basename(master_frame_path) master_frame_header = pyfits.getheader(master_frame_path) date_of_creation = master_frame_header['date'] # Date-time string of when the master frame file was created cal_log.write('Master frame ' + master_frame + ' with path\n') # The "cal_log" file object is already existing because it is created in code that # is executed previous to the calling of this function cal_log.write(master_frame_path + '\n') cal_log.write('was created on ' + date_of_creation + ', UTC,\n') cal_log.write('from ' + str(len(frame_list)) + ' frames:\n') for i in range(len(frame_list)): cal_log.write(frame_list[i] + '\n') cal_log.write('\n\n')
def __init__(self, cube_file): """ Extract the cube_data from the cube_file, set max and min values (in sky coordinates) """ import pyfits, pywcs # Put the cube in RA - DEC - RM order and save it Cube.__init__(self, np.transpose(pyfits.getdata(cube_file), (2, 1, 0))) self.wcs = pywcs.WCS(pyfits.getheader(cube_file)) sky0 = self.pix2sky([0, 0, 0]) skyN = self.pix2sky([self.x_max, self.y_max, self.z_max]) self.ra_min = min(sky0[0], skyN[0]) self.ra_max = max(sky0[0], skyN[0]) self.ra_step = (self.ra_max - self.ra_min) / self.x_max self.dec_min = min(sky0[1], skyN[1]) self.dec_max = max(sky0[1], skyN[1]) self.dec_step = (self.dec_max - self.dec_min) / self.y_max self.fd_min = min(sky0[2], skyN[2]) self.fd_max = max(sky0[2], skyN[2]) self.fd_step = (self.fd_max - self.fd_min) / self.z_max
def filter_segmap(segimage, id_keep, output, blur_kernel="", threshold=0.1): """ Specify a list of ID numbers to keep, and zero-out the rest of the segmentation map. """ seg = pyfits.getdata(segimage) mask = np.zeros(seg.shape, 'int') # Loop through all IDs... is there a better way?? for x in id_keep: mask = np.where(seg == x, 1, mask) seg_masked = np.where(mask == 1, 1, 0) if os.path.exists(output): os.system('rm %s' % output) # Now convolve with a blurring kernel if desired if len(blur_kernel): mask = blur_mask(mask, blur_kernel, threshold=threshold) # k = pyfits.getdata(blur_kernel) # mask = hconvolve.hconvolve(mask, ) pyfits.append(output, data=seg_masked, header=pyfits.getheader(segimage)) return mask
def findExposureModel(filename): #need to know cluster, filter try: header = pyfits.getheader(filename) object = header['OBJNAME'] filter = header['FILTER'] pprun = header['PPRUN'] rotation = header['ROTATION'] except KeyError: raise UnreadableException('Cannot Read Header, Skipping: %s' % filename) fit = illumcorutils.get_fits(object, filter, pprun) coeffs = readChebyCoeffs(fit, rotation) return Chebeyshev3Model(coeffs)
def esosecondheader(img, _telescope, header_name, headerupdate): from pyfits import getheader import string, re if img[-5:] != '.fits': img = img + '.fits' a = getheader(img) HIERARCH = {} HIERARCH['hed_object'] = 'HIERARCH ESO DPR TECH' HIERARCH['hed_imagetyp'] = 'HIERARCH ESO DPR CATG' HIERARCH['hed_filter1'] = 'HIERARCH ESO INS FILT1 NAME' HIERARCH['hed_grism'] = 'HIERARCH ESO INS GRIS1 NAME' HIERARCH['hed_slitw'] = 'HIERARCH ESO INS SLIT1 NAME' HIERARCH['hed_airmass'] = 'HIERARCH ESO TEL AIRM START' HIERARCH['hed_UT'] = 'UTC' if not HIERARCH[header_name]: print 'WARNING: no header in hierarch28 table ' sys.exit() return '' else: return a[HIERARCH[header_name]]
def _file_2_arrays(fits_image, use_header, params, args, preset): """SExtract the Image and read the outputs to arrays.""" out = sextractor.run_segobj(fits_image, params, args, preset=preset); if (out == False): print >> sys.stderr, "Error: Sextractor raised and error coded during segmentation. Finishing run." return (False); objimg = pyfits.getdata( out['OBJECTS'] ); segimg = pyfits.getdata( out['SEGMENTATION'] ); tbhdu = pyfits.open(out['CATALOG'])[1]; if (use_header): header = pyfits.getheader( fits_image ); else: header = None; return (objimg, segimg, header, tbhdu);
def imWeightedAve(image1, image2, weight1, weight2, outfile, clobber=False, verbose=False): """ construct a weighted average of image1 and image2: (weight1*image1 + weight2*image2) / (weight1+weight2) Mean image is written to outfile. """ import os import pyfits from numpy import ndarray, nan_to_num import exceptions if os.path.isfile(outfile): if clobber: os.unlink(outfile) else: print("%s exists. Not clobbering." % outfile) return (outfile) # read in the sci and wht images im1hdr = pyfits.getheader(image1) im1 = pyfits.getdata(image1) im2 = pyfits.getdata(image2) wht1 = pyfits.getdata(weight1) wht2 = pyfits.getdata(weight2) meanim = nan_to_num((wht1 * im1 + wht2 * im2) / (wht1 + wht2)) # TODO : make a useful header outdir = os.path.dirname(outfile) if not os.path.isdir(outdir): os.makedirs(outdir) pyfits.writeto(outfile, meanim, header=im1hdr) return (outfile)
def binCube(input_cubename, bin_size, output_cubename=None, verbose=False, keep_header=False): """ Apply spatial binning on a data-cube. @param input_cubename: path containing the name of the input FITS data-cube. @param bin_size: bin size in pixel units. @keyword keep_header: Keep header untouched? @keyword output_cubename: path containing the name of the output binned FITS data-cube. @keyword verbose: turn on verbose mode. """ import numpy; import os; import pyfits; v = verbose; if v: print(" Loading the following file:\n %s" % input_cubename); cube = pyfits.getdata(input_cubename); if v: print(" Allocating memory."); temp = numpy.zeros((cube.shape[0], cube.shape[1] // bin_size, cube.shape[2] // bin_size)); if v: print(" Binning cube."); for i in range(bin_size): temp += cube[:,i::bin_size,i::bin_size]; temp = temp / bin_size; if not keep_header: if v: print(" Fixing header."); header = pyfits.getheader(input_cubename); try: header['CDELT1'] = header['CDELT1'] * bin_size; header['CDELT2'] = header['CDELT2'] * bin_size; except KeyError: header['CDELT1'] = bin_size; header['CDELT2'] = bin_size; if v: print(" Writing output file"); if output_cubename is None: output_cubename = os.path.splitext(input_cubename)[0] + "_binned.fits"; output_cubename = safesave(output_cubename); pyfits.writeto(output_cubename, temp, header);
def make_irac_lightmap(id_keep, hr_segmap, hr_mask, irac_psf, irac_drz, irac_output, blur_threshold=0.1, sigma=1.0): """ Make an IRAC map for cluster members (or an arbitrary set of ID numbers). id_keep: ID numbers in the high-res segmentation map to keep hr_segmap: high-res segmentation map hr_mask: high-res mask image (an intermediate product) irac_psf: PSF in IRAC, used to blur the high-res mask image irac_drz: IRAC science image, onto which we drizzle the high-res mask image irac_output: file name of the output IRAC light map blur_threshold: the threshold (between 0 and 1) in the step of blurring the high-res mask image. """ # First step, zero-out the non cluster members mask = filter_segmap(hr_segmap, id_keep, hr_mask, blur_kernel=irac_psf, threshold=blur_threshold) # Now we have a mask image in high-res, drizzle the pixels onto the low-res # pixel grid if os.path.exists("irac_mask.fits"): os.system('rm irac_mask.fits') drizzle_mask(hr_mask, irac_drz, "irac_mask.fits") irac_input = pyfits.getdata(irac_drz) irac_mask = pyfits.getdata("irac_mask.fits") irac_map = np.where(irac_mask > 0, irac_input, 0.) # Also smooth the output light map with a Gaussian kernel if sigma > 0: print "Smoothing the IRAC mask..." irac_map = filters.gaussian_filter(irac_map, sigma) irac_hdr = pyfits.getheader(irac_drz) os.system('rm %s' % irac_output) pyfits.append(irac_output, data=irac_map, header=irac_hdr) print "Done."
def __init__(self, im): """ Set attributes of canvas image. ---------------- Input Parameters ---------------- image : string Image to be used. Options are "vband", "residual", "dss" and "xrays". Other images can be registered in the function set_input. """ self.imtype = im self.set_input() self.D = 50.7 # Mpc self.data = pf.getdata(self.image) self.header = pf.getheader(self.image) self.wcs = pywcs.WCS(self.header) self.set_center() self.calc_extent() self.slits = Slitlets() self.slit_arrays() self.rescale()
def main(argv=sys.argv): if len(argv) != 4: print "wcsconvertregions.py coaddimage regionfile extension" sys.exit(1) coaddimage = argv[1] regionfile = argv[2] extension = argv[3] coadd_wcs = pywcs.WCS(pyfits.getheader(coaddimage)) regions = wrf.parseRegionFile(open(regionfile).readlines(), coadd_wcs) images = findInputImages(coaddimage, extension) regionfiledir = makeRegionFileDir(coaddimage) for image in images: scampheader, outputregion = findLocalFiles(image, extension, regionfiledir) processConversion(regions, image, scampheader, outputregion)
def findCorr(infile): m = getheader(infile)#read RA = (m['RA'])/radFrac DEC = (m['DEC'])/radFrac try: MJD = m['MJD-OBS'] except: pass try: MJD = m['MJD_OBS'] except: pass try: MJD = m['MJDOBS'] except: pass JD = MJD + 2400000. hVelo, bVelo = findVelo(JD, 2000) vhelio = bVelo[0]*cos(DEC)*cos(RA) + bVelo[1]*cos(DEC)*sin(RA) + bVelo[2]*sin(DEC) return vhelio
def weight2rms(weight_file, data_file, rms_file): """ Take a sextractor exposure weight file and transform to rms. """ print """Making rms image: %s from weight image %s""" % (rms_file, weight_file) dat = pyfits.getdata(data_file) hdr = pyfits.getheader(data_file) wt = pyfits.getdata(weight_file) try: sky_levl = hdr['SKYLEV'] except: sky_levl = 0. rms = make_rms(dat, wt, sky_levl=sky_levl) if (os.path.exists(rms_file)): os.remove(rms_file) pyfits.writeto(rms_file, rms, hdr)
def fitshead(imgname): """ Read CHIMERA FITS image header. Parameters ---------- image : string FITS image name Returns ------- img_header : python dictionary Dictionary of image header keywords """ try: img_header = pyfits.getheader(imgname, ignore_missing_end = True) return img_header except IOError: print "FITSHEAD: Unable to open FITS image %s. Stopping." %imgname return