def _deripple (infile, outfile, start, end, period):
    """
      Script to run deripple under pyraf.

      # Start pyraf
      pyraf
    
      # Set deripplepath to where the deripple.py module is
      # located.
      set deripplepath=/home/nzarate/deripple/

      # Define the pyraf task
      task deripple = deripplepath$deripple.cl
      
      # Load the task into the pyraf environment
      deripple

      # Run the task
      deripple(infile='data/xobj_comb.fits',outfile='out.fits',start=550 ,end=610 ,period=2)
    """
      
    
    infits = pf.open(infile)
    spectrum = infits['SCI'].data
    print 'SPEin',spectrum.shape

    outspectrum = deripple (spectrum,start,end,period)

    # Create a new file with the input PHU and the SCI header
    phu = infits[0]
    pf.writeto(outfile, None, header=infits[0].header, clobber=True)
    pf.append(outfile, outspectrum, header=infits['SCI'].header)

    infits.close()
    print 'SPEout:::',outspectrum.shape
def segmap_to_30mas(segimage):
    """
   Drizzle the 60mas seg map onto the 30mas pixel grid.
   whtimage is the weight of the segmentation map, and should be 1.0
   everywhere.
   """
    output = os.path.splitext(segimage)[0] + '_30mas.fits'
    if os.path.exists(output):
        os.remove(output)
    hdr = pyfits.getheader(segimage)
    seg = pyfits.getdata(segimage)
    nx = hdr['NAXIS1']
    ny = hdr['NAXIS2']
    # iraf.wdrizzle(data=segimage, outdata=output, outweig='weight_seg_60mas.fits',
    #               in_mask=whtimage, outnx=2*nx, outny=2*ny, kernel='point',
    #               scale=0.5, xsh=0.0, ysh=0.0, shft_un='output',
    #               shft_fr='output')
    # iraf.imlintran(input=segimage, output=output, xrotation=0., yrotation=0.,
    #                xmag=0.5, ymag=0.5, ncols=2*nx, nlines=2*ny,
    #                interpolant='drizzle[1.0]')
    seg2 = zoom(seg, 2.0, order=0).astype('int16')
    print seg2.shape
    # Now update the WCS header keywords
    # wcs = pywcs.WCS(hdr)
    hdr['crpix1'] = hdr['crpix1'] * 2
    hdr['crpix2'] = hdr['crpix2'] * 2
    hdr['cd1_1'] = hdr['cd1_1'] / 2.
    hdr['cd2_2'] = hdr['cd2_2'] / 2.
    pyfits.append(output, seg2, hdr)
    return output
Exemple #3
0
    def save_fits(self, fname=None):
        """
		save the spectrum into a 2D fits file
		"""
        if fname == None:
            fname = setup.folder + self.name + '.fits'
        hdu = pyfits.PrimaryHDU(self.f)
        hdu.writeto(fname)
        hdulist = pyfits.open(fname, mode='update')
        prihdr = hdulist[0].header
        prihdr['COMMENT'] = 'File written by galah_tools.py'
        prihdr.set('CRVAL1', self.l[0])
        prihdr.set('CDELT1', self.l[1] - self.l[0])
        prihdr.set('CRPIX1', 1)
        prihdr.set('CUNIT1', 'Angstroms')
        hdulist.flush()
        hdulist.close()
        pyfits.append(fname, self.fe)
        hdulist = pyfits.open(fname, mode='update')
        prihdr = hdulist[1].header
        prihdr['COMMENT'] = 'File written by galah_tools.py'
        prihdr.set('CRVAL1', self.l[0])
        prihdr.set('CDELT1', self.l[1] - self.l[0])
        prihdr.set('CRPIX1', 1)
        prihdr.set('CUNIT1', 'Angstroms')
        hdulist.flush()
        hdulist.close()
Exemple #4
0
	def save_fits(self, fname=None):
		"""
		save the spectrum into a 2D fits file
		"""
		if fname==None:
			fname=setup.folder+self.name+'.fits'
		hdu = pyfits.PrimaryHDU(self.f)
		hdu.writeto(fname)
		hdulist = pyfits.open(fname,mode='update')
		prihdr = hdulist[0].header
		prihdr['COMMENT']='File written by galah_tools.py'
		prihdr.set('CRVAL1', self.l[0])
		prihdr.set('CDELT1', self.l[1]-self.l[0])
		prihdr.set('CRPIX1', 1)
		prihdr.set('CUNIT1', 'Angstroms')
		hdulist.flush()
		hdulist.close()
		pyfits.append(fname,self.fe)
		hdulist = pyfits.open(fname,mode='update')
		prihdr = hdulist[1].header
		prihdr['COMMENT']='File written by galah_tools.py'
		prihdr.set('CRVAL1', self.l[0])
		prihdr.set('CDELT1', self.l[1]-self.l[0])
		prihdr.set('CRPIX1', 1)
		prihdr.set('CUNIT1', 'Angstroms')
		hdulist.flush()
		hdulist.close()
Exemple #5
0
 def addsimulated(self, band, save=0):
    """
    Add the noiseless images of artificial galaxies to the real images.
    """
    # simulation = root+'_sim.fits'
    assert os.path.exists(self.noiselessimages[band]), \
       "Noiseless image with artificial galaxies not calculated."
    broot = self.root + '_%s' % band
    outimage = broot + '.fits'
    if os.path.exists(outimage):
       os.remove(outimage)
    noiseless_img = pyfits.getdata(self.noiselessimages[band])
    realimage_img = pyfits.getdata(self.realimages[band])
    hdr = pyfits.getheader(self.realimages[band])
    simulated_img = realimage_img + noiseless_img
    if self.psfmatch:
       if band == self.detect_band:
          pass
       else:
          assert band in self.psfmatch_kernels.keys(), "PSF-match kernel for %s does not exist." % band
          kernel = pyfits.getdata(self.psfmatch_kernels[band])
          print "Convolving with PSF-match kernel in %s..." % band
          # if _hconvolve:
          #    simulated_img = hconvolve(simulated_img, kernel)
          # else:
          # Have to use scipy for this one... otherwise there is some weird
          # artifacts from convolution
          simulated_img = fftconvolve(simulated_img, kernel, mode='same')
    pyfits.append(outimage, simulated_img, hdr)
    self.fakeimages[band] = outimage
    # iraf.imcalc(realimage+","+simulation,outimage,"im1+im2")
    if not save:
       os.remove(self.noiselessimages[band])
Exemple #6
0
def imgray2fits(infile, fitsfile='', overwrite=False, headerfile=None, flip=False):
    if fitsfile == '':
        fitsfile = decapfile(infile) + '.fits'

    if exists(fitsfile):
        if overwrite:
            delfile(fitsfile)
        else:
            print fitsfile, 'EXISTS'
            sys.exit(1)
    
    data = loadgray(infile)  # coeim.py
    
    #hdu = pyfits.PrimaryHDU()
    header = headerfile and pyfits.getheader(headerfile)
    hdu = pyfits.PrimaryHDU(None, header)
    hdulist = pyfits.HDUList([hdu])
    hdulist.writeto(fitsfile)

    try:  # If there's a 'SCI' extension, then that's where the WCS is
        header = pyfits.getheader(headerfile, 'SCI')
    except:
        pass
    
    if header <> None:
        if 'EXTNAME' in header.keys():
            del(header['EXTNAME'])
    
    if flip:
        data = flipud(data)
    
    pyfits.append(fitsfile, data, header)
    
    print fitsfile, 'PRODUCED'
def cutout_30mas_v1(h_seg, v_drz):
    """
   Because the v1.0, 30mas version of the F606W mosaic includes the parallel 
   fields and therefore covers a larger area than the CANDELS footprint 
   (defined by the v0.5 mosaic), I am making a cutout from the 30mas mosaic
   to cover the same area as the v0.5 60mas mosaics.
   """
    hdr1 = pyfits.getheader(h_seg)
    hdr2 = pyfits.getheader(v_drz)
    nx1 = hdr1['naxis1']
    ny1 = hdr1['naxis2']
    # Now calculate the corners of the cutout in the 30mas frame; 1=60mas frame,
    # 2 = 30mas frame
    wcs1 = pywcs.WCS(hdr1)
    wcs2 = pywcs.WCS(hdr2)
    sky00 = wcs1.wcs_pix2sky([[1, 1]], 1)
    corner00 = np.floor(wcs2.wcs_sky2pix(sky00, 1)).astype('int')[0]
    sky11 = wcs1.wcs_pix2sky([[nx1, ny1]], 1)
    corner11 = np.ceil(wcs2.wcs_sky2pix(sky11, 1)).astype('int')[0]
    xlo, ylo = corner00
    xhi, yhi = corner11
    print "xlo, xhi, ylo, yhi", xlo, xhi, ylo, yhi
    output = os.path.splitext(v_drz)[0] + '_center.fits'
    v_drz_array = pyfits.getdata(v_drz)
    v_drz_hdr = pyfits.getheader(v_drz)
    v_drz_hdr['crpix1'] = v_drz_hdr['crpix1'] - xlo
    v_drz_hdr['crpix2'] = v_drz_hdr['crpix2'] - ylo
    v_drz_array_new = v_drz_array[ylo:yhi + 1, xlo:xhi + 1]
    pyfits.append(output, v_drz_array_new, v_drz_hdr)
Exemple #8
0
def main(argv=None):
    if argv is None:
        argv = sys.argv
    try:
        try:
            opts, args = getopt.getopt(argv[1:], "hi:o:v",
                                       ["help", "inputfile=", "output="])
        except getopt.error, msg:
            raise Usage(msg)

        # option processing
        for option, value in opts:
            if option == "-v":
                verbose = True
            if option in ("-h", "--help"):
                raise Usage(help_message)
            if option in ("-o", "--output"):
                outputFile = value
            if option in ("-i", "--inputfile"):
                inputfile = value

        hdulist = pf.open(inputfile)  # Read in input file
        # startingPixel = int(hdulist[0].header['CRPIX1'])
        # print "Starting pixel: ", startingPixel
        startingWavelength = hdulist[0].header['CRVAL1']
        print "Starting wavelength: ", startingWavelength
        stepSize = hdulist[0].header['CDELT1']
        print "Step size: ", stepSize

        fluxArray = hdulist[0].data
        wavelengthArray = np.arange(
            startingWavelength,
            len(fluxArray) * stepSize + startingWavelength, stepSize)

        if len(wavelengthArray) == len(fluxArray):
            print "Wavelength and flux arrays are the same length"
        else:
            print "Problems: wavelength and flux arrays are unequal lengths!"
            if len(wavelengthArray) > len(fluxArray):
                repeat = len(wavelengthArray) - len(fluxArray)
                wavelist = list(wavelengthArray)
                for x in range(repeat):
                    wavelist.pop(-1)
                wavelengthArray = np.array(wavelist)
            print len(wavelengthArray)
            print len(fluxArray)

        Overlap = 50.0  # Angstroms
        orders = 20  # number of orders to break it into.
        slicelength = len(wavelengthArray) / orders
        wav = []
        flx = []
        err = []
        outputFile = str("jw_" + inputfile)
        for i in range(orders):
            temp1 = np.array([wavelengthArray[slicelength * i: slicelength*(i+1) + 5000], \
              fluxArray[slicelength * i: slicelength*(i+1) + 5000],\
              np.sqrt(np.abs(fluxArray[slicelength * i: slicelength*(i+1) + 5000]))])
            pf.append(outputFile, temp1)
def update_srcs(infn, srcs=None, header=None, outfn=None):
    """Updates a srcs FITS file"""
    inf = pyfits.open(infn)
    if outfn == None:
       outfn = "updated-" + os.path.basename(infn)
    pyfits.writeto(outfn, inf[0].data, inf[0].header)
    pyfits.append(outfn, srcs , inf[1].header)
    inf.close()
Exemple #10
0
 def make_flg(self, flgimage, flgvalue=1):
    """
    Make a flag image out of a weight image.
    """
    flg = np.where(self.data > 0, 0, 1).astype('int16')
    if os.path.exists(flgimage):
       os.remove(flgimage)
    pyfits.append(flgimage, flg, self.header)
Exemple #11
0
 def test_append(self):
     hdul = fits.open(self.data('tb.fits'))
     hdul.writeto(self.temp('tmp.fits'), clobber=True)
     n = np.arange(100)
     fits.append(self.temp('tmp.fits'), n, checksum=True)
     hdul.close()
     hdul = fits.open(self.temp('tmp.fits'), checksum=True)
     assert hdul[0]._checksum is None
     hdul.close()
def scipy_imfftconvolve(image1, image2, output_image):
    # provide FITS image file names
    array1 = pyfits.getdata(image1)
    array2 = pyfits.getdata(image2)
    output = scipy_fftconvolve(array1, array2)
    if os.path.exists(output_image):
        os.remove(output_image)
    hdr1 = pyfits.getheader(image1)
    pyfits.append(output_image, output, hdr1)
Exemple #13
0
 def convolve(self, kernel, output):
     """
   Convolves self with a kernel, and save the output.
   """
     kernelimg = pyfits.getdata(kernel)
     convimg = hconvolve(self.data, kernelimg)
     if os.path.exists(output):
         os.remove(output)
     pyfits.append(output, convimg, self.hdr)
Exemple #14
0
 def test_append(self):
     hdul = pyfits.open(self.data('tb.fits'))
     hdul.writeto(self.temp('tmp.fits'), clobber=True)
     n = np.arange(100)
     pyfits.append(self.temp('tmp.fits'), n, checksum=True)
     hdul.close()
     hdul = pyfits.open(self.temp('tmp.fits'), checksum=True)
     assert_equal(hdul[0]._checksum, None)
     hdul.close()
Exemple #15
0
 def swarp(self):
    ### Runs Swarp on high-res images; can be run on more than 2 bands.
    # for sp in self.swarp_params:
    #    align_images.swarp_images(sp)
    for b in self.hr_bands:
       sp = {}
       sp['swarp_file'] = self.swarp_file
       sp['hires_dir'] = self.hr_dir
       # sp['lores_dir'] = os.path.join(self.lr_dir, self.lr_bands[0])
       sp['lores_dir'] = self.lr_dir
       sp['hires_input_drz'] = self.hr_input_drz[b]
       sp['hires_input_wht'] = self.hr_input_wht[b]
       # sp['hires_output_drz'] = self.hr_resample_drz[b]
       # sp['hires_output_wht'] = self.hr_resample_wht[b]
       sp['hires_output_drz'] = self.hr_output_drz[b]
       sp['hires_output_wht'] = self.hr_output_wht[b]
       sp['hr_scale'] = self.hr_scale
       sp['lores_drz'] = self.lr_drz[self.lr_bands[0]]
       sp['lores_unc'] = self.lr_unc[self.lr_bands[0]]
       sw_name = '%s_%s.swarp.yml' % (b, self.cluster_name.lower())
       yaml.dump(sp, open(sw_name, 'wb'), default_flow_style=False)
       align_images.swarp_images(sw_name)
       # Make flag image for photometry band
       if b == self.hr_bands[1]:
          ### Calculate flag image for the detection band (=self.hr_bands[1])
          if os.path.exists(self.hr_output_flg):
             os.remove(self.hr_output_flg)
          # wht_img = pyfits.getdata(self.hr_resample_wht[b]).astype(np.float)
          wht_img = pyfits.getdata(self.hr_output_wht[b]).astype(np.float)
          print "wht_img.dtype", wht_img.dtype
          # wht_hdr = pyfits.getheader(self.hr_resample_wht[b])
          wht_hdr = pyfits.getheader(self.hr_output_wht[b])
          flg_img = np.where(wht_img > 0, 0, 1).astype(np.int32)
          print "flg_img.dtype", flg_img.dtype
          # pyfits.append(self.hr_resample_flg, flg_img, wht_hdr)
          pyfits.append(self.hr_output_flg, flg_img, wht_hdr)
          # flg_hdu = pyfits.PrimaryHDU(flg_img)
          # for k in wht_hdr.keys():
          #    if k not in flg_hdu.header.keys():
          #       try:
          #          flg_hdu.header.set(k, wht_hdr[k])
          #       except:
          #          print "Unable to add keyword %s to the header; skip..." % k
          # flg_hdulist = pyfits.HDUList([flg_hdu])
          # flg_hdulist.writeto(self.hr_resample_flg)
    # Now shift the WCS for the other low-res bands as well
    lr_hdr = pyfits.getheader(os.path.join(self.lr_dir, self.lr_drz[self.lr_bands[0]]))
    crval1 = lr_hdr['crval1']
    crval2 = lr_hdr['crval2']
    crpix1 = lr_hdr['crpix1']
    crpix2 = lr_hdr['crpix2']
    crvals_new = [crval1, crval2]
    crpixs_new = [crpix1, crpix2]
    for lb in self.lr_bands[1:]:
       align_images.update_crvals_crpixs(os.path.join(self.lr_dir, self.lr_drz[lb]), crvals_new, crpixs_new)
       align_images.update_crvals_crpixs(os.path.join(self.lr_dir, self.lr_unc[lb]), crvals_new, crpixs_new)
Exemple #16
0
def main(argv=None):
  if argv is None:
    argv = sys.argv
  try:
    try:
      opts, args = getopt.getopt(argv[1:], "hi:o:v", ["help", "inputfile=", "output="])
    except getopt.error, msg:
      raise Usage(msg)
    
    # option processing
    for option, value in opts:
      if option == "-v":
        verbose = True
      if option in ("-h", "--help"):
        raise Usage(help_message)
      if option in ("-o", "--output"):
        outputFile = value
      if option in ("-i", "--inputfile"):
        inputfile = value
  
    hdulist = pf.open(inputfile) # Read in input file
    # startingPixel = int(hdulist[0].header['CRPIX1'])
    # print "Starting pixel: ", startingPixel
    startingWavelength = hdulist[0].header['CRVAL1']
    print "Starting wavelength: ", startingWavelength
    stepSize = hdulist[0].header['CDELT1']
    print "Step size: ", stepSize
  
    fluxArray = hdulist[0].data
    wavelengthArray = np.arange(startingWavelength, len(fluxArray)*stepSize + startingWavelength, stepSize)
    
    if len(wavelengthArray) == len(fluxArray):
      print "Wavelength and flux arrays are the same length"
    else:
      print "Problems: wavelength and flux arrays are unequal lengths!"
      if len(wavelengthArray) > len(fluxArray):
        repeat = len(wavelengthArray) - len(fluxArray)
        wavelist = list(wavelengthArray)
        for x in range(repeat):
          wavelist.pop(-1)
        wavelengthArray = np.array(wavelist)
      print len(wavelengthArray)
      print len(fluxArray)
  
    Overlap = 50.0 # Angstroms
    orders = 20 # number of orders to break it into. 
    slicelength = len(wavelengthArray)/orders
    wav = []
    flx = []
    err = []
    outputFile = str("jw_" + inputfile)
    for i in range(orders): 
      temp1 = np.array([wavelengthArray[slicelength * i: slicelength*(i+1) + 5000], \
        fluxArray[slicelength * i: slicelength*(i+1) + 5000],\
        np.sqrt(np.abs(fluxArray[slicelength * i: slicelength*(i+1) + 5000]))])
      pf.append(outputFile, temp1)
Exemple #17
0
def saveTable(filepath,table,log=default_log,append=False,dtype=None):

    log = setLog(log)

    import numpy
    formats = { numpy.dtype('int64') : '% 12d' ,
                numpy.dtype('int32') : '% 12d' ,
                numpy.dtype('float32') : '% .10e' ,
                numpy.dtype('float64') : '% .10e' ,
                numpy.dtype('>i8') : '% 12d',
                numpy.dtype('>i4') : '% 12d',
                numpy.dtype('>f8') : '% .10f',
                numpy.dtype('S1024') : '%s',
                numpy.dtype('S64') : '%s',
                numpy.dtype('S32') : '%s',
                numpy.dtype('S16') : '%s'}


    if dtype!=None:
        table = array2recarray(table,dtype=dtype)

    if filepath.split('.')[-1] == 'pp':

        if append == True:
            log.error('appending a pickle not supported yet')
            raise Exception('appending a pickle not supported yet');

        import cPickle as pickle
        file_pickle = open(filepath,'w')
        pickle.dump(table,file_pickle,protocol=2)
        file_pickle.close()



    elif filepath.split('.')[-1] == 'fits' or filepath.split('.')[-2] == 'fits':
        
        import pyfits
        if append:
            pyfits.append(filepath,table)
            log.info('appended table %s %d rows' % (filepath,len(table)))
        else:
            try: 
                if len(table.dtype.names)>0:
                    pyfits.writeto(filepath,fits_obj_to_write,clobber=True)    
                    return

            except Exception,errmsg:
                pass

            if type(table) is pyfits.core.HDUList:
                fits_obj_to_write = table
            else:
                fits_obj_to_write = getFITSTable(table)
            fits_obj_to_write.writeto(filepath,clobber=True)
            log.info('saved table %s %d rows' % (filepath,len(table)))
Exemple #18
0
    def test_append_uint_data(self):
        """Regression test for #56 (BZERO and BSCALE added in the wrong location
        when appending scaled data)
        """

        pyfits.writeto(self.temp('test_new.fits'), data=np.array([],
                       dtype='uint8'))
        d = np.zeros([100, 100]).astype('uint16')
        pyfits.append(self.temp('test_new.fits'), data=d)
        f = pyfits.open(self.temp('test_new.fits'), uint=True)
        assert_equal(f[1].data.dtype, 'uint16')
Exemple #19
0
def _save_derived_units(filename, du, comm):
    if not du:
        return
    if comm is None:
        return
    if comm.Get_rank() == 0:
        buffer = StringIO.StringIO()
        pickle.dump(du, buffer, pickle.HIGHEST_PROTOCOL)
        data = np.frombuffer(buffer.getvalue(), np.uint8)
        header = create_fitsheader(fromdata=data, extname='derived_units')
        pyfits.append(filename, data, header)
    comm.Barrier()
 def median_filter(self, skyimagename, newimagename, size=51):
     """
   Performs a median filtering to estimate ICL, and then subtract from the 
   original image.
   """
     self.skyimage = signal.medfilt2d(self.image, (size, size))
     if os.path.exists(skyimagename):
         os.remove(skyimagename)
     if os.path.exists(newimagename):
         os.remove(newimagename)
     pyfits.append(skyimagename, self.skyimage, self.hdr)
     pyfits.append(newimagename, self.image - self.skyimage, self.hdr)
Exemple #21
0
def _save_derived_units(filename, du, comm):
    if not du:
        return
    if comm is None:
        return
    if comm.Get_rank() == 0:
        buffer = StringIO.StringIO()
        pickle.dump(du, buffer, pickle.HIGHEST_PROTOCOL)
        data = np.frombuffer(buffer.getvalue(), np.uint8)
        header = create_fitsheader(fromdata=data, extname='derived_units')
        pyfits.append(filename, data, header)
    comm.Barrier()
Exemple #22
0
def sharpen_iracpsf_zoom(psfname, zoom_factor):
    """
   Sharpen PSF using scipy.ndimage.interpolation.zoom.
   zoom < 1.0 makes PSF SHARPER.
   """
    newpsf = os.path.splitext(psfname)[0] + '_zoom%.2f.fits' % zoom_factor
    psf = pyfits.getdata(psfname)
    hdr = pyfits.getheader(psfname)
    zoomed = zoom(psf, zoom_factor)
    if os.path.exists(newpsf):
        os.remove(newpsf)
    pyfits.append(newpsf, zoomed, hdr)
Exemple #23
0
 def export(self, name=None):
     """
   Export current header and data to a file.
   """
     assert name, "Invalid file name to export to."
     if os.path.exists(name):
         h = pyfits.open(name, mode='update')
         h[0].header = self.hdr
         h[0].data = self.data
         h.flush()
         h.close()
     else:
         pyfits.append(name, self.data, self.hdr)
Exemple #24
0
 def make_rms(self, rmsimage, cval=1.e10, norm=False):
    """
    Make an RMS image out of a weight image.
    """
    if norm:
       wht = self.data / self.header['exptime']  
       # remember to divide by exposure time, otherwise the unit of rms will 
       # be wrong
    else:
       wht = self.data
    rms = np.where(wht>0, 1./np.sqrt(wht), cval)
    if os.path.exists(rmsimage):
       os.remove(rmsimage)
    pyfits.append(rmsimage, rms, self.header)
 def make_flag(self, overwrite=False):
     """
 Make flag images from input weight images. Assume that input weight images
 have file names like *_wht.fits.
 """
     for f in self.filters:
         w = self.wht_images[f]
         hdr = pyfits.getheader(w)
         wht_array = pyfits.getdata(w)
         flg_name = w.replace('wht', 'flg')
         if (overwrite == True) or (os.path.exists(flg_name) == False):
             flg_array = np.where(wht_array > 0, 0, 1).astype('int16')
             pyfits.append(flg_name, flg_array, hdr)
             print "Made flag image from %s..." % w
Exemple #26
0
def createVoronoiInput():
    # makes a version of the median flux map that is all positive, and a
    # version of the error array that is directly scaled from this new
    # flux map and the SNR map created empirically (via ifu.map_snr)
    cubefits = pyfits.open(datadir + cuberoot + '.fits')
    
    cube = cubefits[0].data
    hdr = cubefits[0].header
    errors = cubefits[1].data
    quality = cubefits[2].data
    nframes = cubefits[3].data

    #snrimg = pyfits.getdata(datadir + cuberoot + '_snr.fits')
    snrimg = pyfits.getdata(datadir+'m31_all_scalederr_cleanhdr_snr.fits')
    
    xx = np.arange(cube.shape[0])
    yy = np.arange(cube.shape[1])
    imgShape = (cube.shape[0],cube.shape[1])

    cubeVor = np.zeros(imgShape, dtype=float)
    errVor = np.zeros(imgShape, dtype=float)

    for nx in xx:
        for ny in yy:
            tmpcube = cube[nx,ny,:]
            # add a constant to the spectrum to make it above 0
            #print "old cube mean is %f " % tmpcube.mean()
            minFlux = tmpcube.mean() - (1.0 * tmpcube.std())
            #print "minflux is %f" % minFlux
            tmpcube += np.abs(minFlux)
            #print "new cube mean is %f " % tmpcube.mean()
            tmpcubeavg = tmpcube.mean()
            #tmpsnr = np.abs(snrimg[ny,nx])
            tmpsnr = np.abs(snrimg[nx,ny])
            # calc errors for tessellation based on the empirical
            # S/N already calculated
            tmperr = tmpcubeavg/tmpsnr
            cubeVor[nx,ny] = tmpcubeavg
            errVor[nx,ny] = tmperr
            #if ny==71:
            #    print 'ny = 71'
            #    if nx==28:
            #        pdb.set_trace()
            
    # change NaN to 0
    errVor = np.nan_to_num(errVor)
    
    outfile = datadir + cuberoot + '_vor.fits'
    pyfits.writeto(outfile, cubeVor, header=hdr)
    pyfits.append(outfile, errVor)
Exemple #27
0
 def writeto(self, name=None, overwrite=False):
    # update the FITS image with the current values of data and header
    if name == None:
       # overwrite the FITS file
       h = pyfits.open(self.filename, mode='update')
       h[0].data = self.data
       h[0].header = self.header
       h.flush()
       h.close()
    else:
       if os.path.exists(name):
          if (overwrite==False):
             raise ValueError, "%s already exists. Set overwrite to True?" % name
          else:
             os.remove(name)
       pyfits.append(name, self.data, self.header)
Exemple #28
0
def imcopy(input_file, section, output_file):
    if os.path.exists(output_file):
        raise IOError, "%s already exists." % output_file
    try:
        xmin, xmax, ymin, ymax = section
    except ValueError:
        raise ValueError, "Please give a list or array for the second argument."
    hdr = pyfits.getheader(input_file)
    hdr['crpix1'] = hdr['crpix1'] - (xmin - 1)
    hdr['crpix2'] = hdr['crpix2'] - (ymin - 1)
    input_image = pyfits.getdata(input_file)
    xmin = np.maximum(xmin, 1)
    ymin = np.maximum(ymin, 1)
    print xmin, xmax, ymin, ymax
    new_image = input_image[ymin - 1:ymax, xmin - 1:xmax]
    pyfits.append(output_file, new_image, hdr)
Exemple #29
0
def imhconvolve(imagefile, kernelfile, outputfile, pad=True, overwrite=False):
   """
   Arguments are FITS images instead of just arrays. It is a wrapper
   around the function hconvolve.
   """
   assert os.path.exists(imagefile), "File %s does not exist." % imagefile
   if os.path.exists(outputfile):
      if not overwrite:
         raise NameError, "Image %s already exists; set overwrite=True to overwrite it." % outputfile
      else:
         os.remove(outputfile)
   image = pyfits.getdata(imagefile)
   kernel = pyfits.getdata(kernelfile)
   header = pyfits.getheader(imagefile)
   conved = hconvolve(image,kernel,pad=pad)
   pyfits.append(outputfile, conved, header)
Exemple #30
0
def imfftconvolve(imagefile, kernelfile, outputfile):
   """
   Use scipy.signal.fftconvolve instead of anfft or pyfftw. It is slower, but 
   it gives more robust results...
   """
   assert os.path.exists(imagefile)
   if os.path.exists(outputfile):
      if not overwrite:
         raise NameError, "Image %s already exists; set overwrite=True to overwrite it." % outputfile
      else:
         os.remove(outputfile)
   image = pyfits.getdata(imagefile)
   kernel = pyfits.getdata(kernelfile)
   header = pyfits.getheader(imagefile)
   conved = signal.fftconvolve(image, kernel, mode='same')
   pyfits.append(outputfile, conved, header)
Exemple #31
0
def psfmatch(psf_ref, psf_2match, kernelname):
   """
   Derive the kernel that matches psf_2match to psf_ref, so that psf_2match,
   when convolved with the kernel, gives psf_ref.
   Make sure that both PSF images have the same pixel scales, are centered, and
   have the same image size.
   """
   psf1 = pyfits.getdata(psf_ref)
   psf2 = pyfits.getdata(psf_2match)
   kernel = fftdeconvolve(psf1, psf2)   
   # normalize the kernel
   kernel = kernel / kernel.sum()
   hdr2 = pyfits.getheader(psf_2match)
   if os.path.exists(kernelname):
      os.remove(kernelname)
   pyfits.append(kernelname, kernel.real, hdr2)
Exemple #32
0
def im2rgbfits(infile,
               rgbfile='',
               overwrite=False,
               headerfile=None,
               flip=False):
    if rgbfile == '':
        rgbfile = decapfile(infile) + '_RGB.fits'

    if exists(rgbfile):
        if overwrite:
            delfile(rgbfile)
        else:
            print rgbfile, 'EXISTS'
            sys.exit(1)

    #im = Image.open(infile)
    #print 'Loading data...'
    #data = array(im.getdata())
    #nxc, nyc = im.size
    #data.shape = (nyc,nxc,3)
    #data = transpose(data, (2,0,1))
    data = loadrgb(infile)

    #hdu = pyfits.PrimaryHDU()
    header = headerfile and pyfits.getheader(headerfile)
    hdu = pyfits.PrimaryHDU(None, header)
    hdulist = pyfits.HDUList([hdu])
    hdulist.writeto(rgbfile)

    try:  # If there's a 'SCI' extension, then that's where the WCS is
        header = pyfits.getheader(headerfile, 'SCI')
    except:
        pass

    if header <> None:
        if 'EXTNAME' in header.keys():
            del (header['EXTNAME'])

    for i in range(3):
        print 'RGB'[i]
        data1 = data[i]
        if flip:
            data1 = flipud(data1)
        pyfits.append(rgbfile, data1, header)

    print rgbfile, 'NOW READY FOR "Open RGB Fits Image" in ds9'
Exemple #33
0
    def test_structured(self):
        fname = self.data('stddata.fits')

        print_('Reading from ', fname)
        data1, h1 = fits.getdata(fname, ext=1, header=True)
        data2, h2 = fits.getdata(fname, ext=2, header=True)

        st = get_test_data()

        outfile = self.temp('test.fits')
        print_('Writing to file data1:', outfile)
        fits.writeto(outfile, data1, clobber=True)
        print_('Appending to file: data2', outfile)
        fits.append(outfile, data2)

        print_('Appending to file: st', outfile)
        fits.append(outfile, st)
        print_(st.dtype.descr)
        print_(st)
        assert st.dtype.isnative
        assert np.all(st['f1'] == [1, 3, 5])

        print_('Reading data back')
        data1check, h1check = fits.getdata(outfile, ext=1, header=True)
        data2check, h2check = fits.getdata(outfile, ext=2, header=True)
        stcheck, sthcheck = fits.getdata(outfile, ext=3, header=True)

        if not compare_arrays(data1, data1check, verbose=True):
            raise ValueError('Fail')
        if not compare_arrays(data2, data2check, verbose=True):
            raise ValueError('Fail')
        print_(st, stcheck)
        if not compare_arrays(st, stcheck, verbose=True):
            raise ValueError('Fail')

        # try reading with view
        print_('Reading with ndarray view')
        dataviewcheck, hviewcheck = fits.getdata(outfile,
                                                 ext=2,
                                                 header=True,
                                                 view=np.ndarray)
        if not compare_arrays(data2, dataviewcheck, verbose=True):
            raise ValueError('Fail')
Exemple #34
0
def im2rgbfits(infile, rgbfile='', overwrite=False, headerfile=None, flip=False):
    if rgbfile == '':
        rgbfile = decapfile(infile) + '_RGB.fits'
        
    if exists(rgbfile):
        if overwrite:
            delfile(rgbfile)
        else:
            print rgbfile, 'EXISTS'
            sys.exit(1)
    
    #im = Image.open(infile)
    #print 'Loading data...'
    #data = array(im.getdata())
    #nxc, nyc = im.size
    #data.shape = (nyc,nxc,3)
    #data = transpose(data, (2,0,1))
    data = loadrgb(infile)
    
    #hdu = pyfits.PrimaryHDU()
    header = headerfile and pyfits.getheader(headerfile)
    hdu = pyfits.PrimaryHDU(None, header)
    hdulist = pyfits.HDUList([hdu])
    hdulist.writeto(rgbfile)

    try:  # If there's a 'SCI' extension, then that's where the WCS is
        header = pyfits.getheader(headerfile, 'SCI')
    except:
        pass
    
    if header <> None:
        if 'EXTNAME' in header.keys():
            del(header['EXTNAME'])
    
    for i in range(3):
        print 'RGB'[i]
        data1 = data[i]
        if flip:
            data1 = flipud(data1)
        pyfits.append(rgbfile, data1, header)
        
    print rgbfile, 'NOW READY FOR "Open RGB Fits Image" in ds9'
Exemple #35
0
def filter_segmap(segimage, id_keep, output, blur_kernel="", threshold=0.1):
    """
   Specify a list of ID numbers to keep, and zero-out the rest of the 
   segmentation map.
   """
    seg = pyfits.getdata(segimage)
    mask = np.zeros(seg.shape, 'int')
    # Loop through all IDs... is there a better way??
    for x in id_keep:
        mask = np.where(seg == x, 1, mask)
    seg_masked = np.where(mask == 1, 1, 0)
    if os.path.exists(output):
        os.system('rm %s' % output)
    # Now convolve with a blurring kernel if desired
    if len(blur_kernel):
        mask = blur_mask(mask, blur_kernel, threshold=threshold)
        # k = pyfits.getdata(blur_kernel)
        # mask = hconvolve.hconvolve(mask, )
    pyfits.append(output, data=seg_masked, header=pyfits.getheader(segimage))
    return mask
Exemple #36
0
 def addsimulated(self,
                  galfile,
                  root,
                  realimage,
                  gain=1.0,
                  psffile="",
                  save=0):
     # simulation = root+'_sim.fits'
     assert self.noiseless != None, "Noiseless image with artificial galaxies not calculated."
     outimage = root + '.fits'
     if os.path.exists(outimage):
         os.remove(outimage)
     # makenoiselessimage(galfile,root+'_sim',magz,xmax,ymax,
     #    save=save,gain=gain,psffile=psffile)
     noiseless_img = pyfits.getdata(self.noiseless)
     simulated_img = self.data + noiseless_img
     pyfits.append(outimage, simulated_img, self.hdr)
     # iraf.imcalc(realimage+","+simulation,outimage,"im1+im2")
     if not save:
         os.remove(self.noiseless)
    def test_structured(self):
        fname = self.data('stddata.fits')

        print_('Reading from ', fname)
        data1, h1 = fits.getdata(fname, ext=1, header=True)
        data2, h2 = fits.getdata(fname, ext=2, header=True)

        st = get_test_data()

        outfile = self.temp('test.fits')
        print_('Writing to file data1:', outfile)
        fits.writeto(outfile, data1, clobber=True)
        print_('Appending to file: data2', outfile)
        fits.append(outfile, data2)

        print_('Appending to file: st', outfile)
        fits.append(outfile, st)
        print_(st.dtype.descr)
        print_(st)
        assert st.dtype.isnative
        assert np.all(st['f1'] == [1, 3, 5])

        print_('Reading data back')
        data1check, h1check = fits.getdata(outfile, ext=1, header=True)
        data2check, h2check = fits.getdata(outfile, ext=2, header=True)
        stcheck, sthcheck = fits.getdata(outfile, ext=3, header=True)

        if not compare_arrays(data1, data1check, verbose=True):
            raise ValueError('Fail')
        if not compare_arrays(data2, data2check, verbose=True):
            raise ValueError('Fail')
        print_(st, stcheck)
        if not compare_arrays(st, stcheck, verbose=True):
            raise ValueError('Fail')

        # try reading with view
        print_('Reading with ndarray view')
        dataviewcheck, hviewcheck = fits.getdata(outfile, ext=2, header=True,
                                                   view=np.ndarray)
        if not compare_arrays(data2, dataviewcheck, verbose=True):
            raise ValueError('Fail')
Exemple #38
0
def make_irac_lightmap(id_keep,
                       hr_segmap,
                       hr_mask,
                       irac_psf,
                       irac_drz,
                       irac_output,
                       blur_threshold=0.1,
                       sigma=1.0):
    """
   Make an IRAC map for cluster members (or an arbitrary set of ID numbers).
   id_keep: ID numbers in the high-res segmentation map to keep
   hr_segmap: high-res segmentation map
   hr_mask: high-res mask image (an intermediate product)
   irac_psf: PSF in IRAC, used to blur the high-res mask image
   irac_drz: IRAC science image, onto which we drizzle the high-res mask image
   irac_output: file name of the output IRAC light map
   blur_threshold: the threshold (between 0 and 1) in the step of blurring the 
                   high-res mask image.
   """
    # First step, zero-out the non cluster members
    mask = filter_segmap(hr_segmap,
                         id_keep,
                         hr_mask,
                         blur_kernel=irac_psf,
                         threshold=blur_threshold)
    # Now we have a mask image in high-res, drizzle the pixels onto the low-res
    # pixel grid
    if os.path.exists("irac_mask.fits"):
        os.system('rm irac_mask.fits')
    drizzle_mask(hr_mask, irac_drz, "irac_mask.fits")
    irac_input = pyfits.getdata(irac_drz)
    irac_mask = pyfits.getdata("irac_mask.fits")
    irac_map = np.where(irac_mask > 0, irac_input, 0.)
    # Also smooth the output light map with a Gaussian kernel
    if sigma > 0:
        print "Smoothing the IRAC mask..."
        irac_map = filters.gaussian_filter(irac_map, sigma)
    irac_hdr = pyfits.getheader(irac_drz)
    os.system('rm %s' % irac_output)
    pyfits.append(irac_output, data=irac_map, header=irac_hdr)
    print "Done."
Exemple #39
0
 def addsimulated_galfit(self, band, save=0):
     """
   Add the noiseless images of artificial galaxies to the real images. 
   Specifically for GALFIT measurement images with different pixel scales 
   as the detection image.
   """
     # simulation = root+'_sim.fits'
     assert os.path.exists(self.noiselessimages[band]), \
        "Noiseless image with artificial galaxies not calculated."
     broot = self.root + '_%s' % band
     outimage = broot + '_galfit.fits'
     if os.path.exists(outimage):
         os.remove(outimage)
     noiseless_img = pyfits.getdata(self.noiselessimages[band])
     realimage_img = pyfits.getdata(self.realimages_galfit[band])
     hdr = pyfits.getheader(self.realimages_galfit[band])
     simulated_img = realimage_img + noiseless_img
     pyfits.append(outimage, simulated_img, hdr)
     self.fakeimages_galfit[band] = outimage
     if not save:
         os.remove(self.noiselessimages[band])
Exemple #40
0
def createVoronoiOutput(inputFile=datadir+cuberoot+'.fits',inputVoronoiFile=datadir+'voronoi_2d_binning_output.txt'):
    cubefits = pyfits.open(inputFile)
    
    cube = cubefits[0].data
    hdr = cubefits[0].header
    errors = cubefits[1].data
    quality = cubefits[2].data
    nframes = cubefits[3].data

    cubeShape = (cube.shape[0],cube.shape[1],cube.shape[2])

    yy, xx, binnum = np.loadtxt(inputVoronoiFile,unpack=True)
    xx = xx.astype(int)
    yy = yy.astype(int)
    binnum = binnum.astype(int)

    newCube = np.zeros(cubeShape, dtype=float)
    newErr = np.zeros(cubeShape, dtype=float)

    for nb in range(binnum.max()+1):
        idx = np.where(binnum == nb)
        nx = xx[idx]
        ny = yy[idx]
        nbins = len(idx[0])
        tmpCube = np.sum(cube[nx,ny,:],axis=0)/nbins
        tmpErr = np.sqrt(np.sum(errors[nx,ny,:]**2,axis=0))/nbins
        newCube[nx,ny,:] = tmpCube
        newErr[nx,ny,:] = tmpErr

    #pdb.set_trace()
    outfile = datadir + cuberoot + '_vorcube.fits'
    pyfits.writeto(outfile,newCube,header=hdr)
    pyfits.append(outfile,newErr)
    pyfits.append(outfile,quality)
    pyfits.append(outfile,nframes)
def match_images(inputimage, refimage, output=None):
    # def match_images(segimage, drzimage_v2, output=None):
    """
   Match the footprint of inputimage to that of refimage.
   """
    hdr1 = pyfits.getheader(inputimage)
    hdr2 = pyfits.getheader(refimage)
    wcs1 = pywcs.WCS(hdr1)
    wcs2 = pywcs.WCS(hdr2)
    sky00 = wcs2.wcs_pix2sky([[1, 1]], 1)
    corner00 = wcs1.wcs_sky2pix(sky00, 1)[0]
    corner00 = np.around(corner00).astype('int')
    nx2 = hdr2['naxis1']
    ny2 = hdr2['naxis2']
    sky11 = wcs2.wcs_pix2sky([[nx2, ny2]], 1)
    corner11 = wcs1.wcs_sky2pix(sky11, 1)[0]
    corner11 = np.around(corner11).astype('int')
    xlo1, ylo1 = corner00
    xhi1, yhi1 = corner11
    print "[xlo:xhi, ylo:yhi]", xlo1, xhi1, ylo1, yhi1
    if output == None:
        return xlo1, xhi1, ylo1, yhi1
    newshape = (xhi1 - xlo1 + 1, yhi1 - ylo1 + 1)
    assert newshape == (
        nx2, ny2
    ), "Shape of new seg map does not match the shape of the input drz image..."
    # Now make a cutout of the seg map
    if os.path.exists(output):
        os.remove(output)
    im1 = pyfits.getdata(inputimage)
    im1_new = seg[ylo1:yhi1 + 1, xlo1:xhi1 + 1]
    # hdr1['crpix1'] = hdr1['crpix1'] - xlo1
    # hdr1['crpix2'] = hdr1['crpix2'] - ylo1
    hdr1['crpix1'] = hdr2['crpix1']
    hdr1['crpix2'] = hdr2['crpix2']
    print "shape of GOODS/ACS v2 mosaic: [%d, %d]" % (hdr2['naxis1'],
                                                      hdr2['naxis2'])
    print "new shape of the segmap: [%d, %d]" % (im1_new.shape[1],
                                                 im1_new.shape[0])
    pyfits.append(output, im1_new, hdr1)
Exemple #42
0
def write_fits(filename, data, header, shape_global, extension, comm,
               extname=None):
    """Write local images into a FITS file"""

    if comm is None:
        comm = MPI.COMM_SELF

    if not extension:
        try:
            os.remove(filename)
        except:
            pass

    if header is None:
        header = create_fitsheader(shape_global[::-1], data.dtype,
                                   extname=extname)

    rank = comm.Get_rank()
    size = comm.Get_size()
    files = comm.allgather(filename)
    allsame = all([f == files[0] for f in files])
    alldiff = len(files) == len(np.unique(files))
    if not alldiff and not allsame:
        raise ValueError('Some target filenames are equal, but not all.')
    if alldiff or size == 1:
        if not extension:
            hdu = pyfits.PrimaryHDU(data, header)
            hdu.writeto(filename, clobber=True)
        else:
            pyfits.append(filename, data, header)
        return

    if rank == 0:
        shdu = pyfits.StreamingHDU(filename, header)
        data_loc = shdu._datLoc
        shdu.close()
    else:
        data_loc = None
    data_loc = comm.bcast(data_loc)

    # get a communicator excluding the processes which have no work to do
    # (Create_subarray does not allow 0-sized subarrays)
    nglobal = shape_global[0]
    chunk = np.product(shape_global[1:])
    s = split_work(nglobal)
    nlocal = s.stop - s.start
    nmax = int(np.ceil(nglobal / size))
    rank_nowork = int(np.ceil(nglobal / nmax))
    group = comm.Get_group()
    group.Incl(range(rank_nowork))
    newcomm = comm.Create(group)
    
    if rank < rank_nowork:
        mtype = {1:MPI.BYTE, 4: MPI.FLOAT, 8:MPI.DOUBLE}[data.dtype.itemsize]
        ftype = mtype.Create_subarray([nglobal*chunk], [nlocal*chunk],
                                      [s.start*chunk])
        ftype.Commit()
        f = MPI.File.Open(newcomm, filename, amode=MPI.MODE_APPEND+MPI.MODE_WRONLY+MPI.MODE_CREATE)
        f.Set_view(data_loc, mtype, ftype, 'native', MPI.INFO_NULL)
        # mpi4py 1.2.2: pb with viewing data as big endian KeyError '>d'
        if sys.byteorder == 'little' and data.dtype.byteorder in ('=', '<'):
            data = data.byteswap()
        else:
            data = data.newbyteorder('=')
        f.Write_all(data[0:nlocal])
        f.Close()

    if rank == 0:
        shdu._ffo = pyfits.core._File(filename, 'append')
        shdu._ffo.getfile().seek(0,2)
        pyfitstype = {8:'uint8', 16:'int16', 32:'int32', 64:'int64', -32:'float32', -64:'float64'}[header['BITPIX']]
        completed = shdu.write(np.empty(0, dtype=pyfitstype))
        shdu.close()
        if not completed:
            raise RuntimeError('File is not completely written')

    comm.Barrier()
Exemple #43
0
def keppixseries(infile,outfile,plotfile,plottype,filter,function,cutoff,clobber,verbose,logfile,status, cmdLine=False): 

# input arguments

    status = 0
    seterr(all="ignore") 

# log the call 

    hashline = '----------------------------------------------------------------------------'
    kepmsg.log(logfile,hashline,verbose)
    call = 'KEPPIXSERIES -- '
    call += 'infile='+infile+' '
    call += 'outfile='+outfile+' '
    call += 'plotfile='+plotfile+' '
    call += 'plottype='+plottype+' '
    filt = 'n'
    if (filter): filt = 'y'
    call += 'filter='+filt+ ' '
    call += 'function='+function+' '
    call += 'cutoff='+str(cutoff)+' '
    overwrite = 'n'
    if (clobber): overwrite = 'y'
    call += 'clobber='+overwrite+ ' '
    chatter = 'n'
    if (verbose): chatter = 'y'
    call += 'verbose='+chatter+' '
    call += 'logfile='+logfile
    kepmsg.log(logfile,call+'\n',verbose)

# start time

    kepmsg.clock('KEPPIXSERIES started at',logfile,verbose)

# test log file

    logfile = kepmsg.test(logfile)

# clobber output file

    if clobber: status = kepio.clobber(outfile,logfile,verbose)
    if kepio.fileexists(outfile): 
        message = 'ERROR -- KEPPIXSERIES: ' + outfile + ' exists. Use --clobber'
        status = kepmsg.err(logfile,message,verbose)

# open TPF FITS file

    if status == 0:
        kepid, channel, skygroup, module, output, quarter, season, \
            ra, dec, column, row, kepmag, xdim, ydim, barytime, status = \
            kepio.readTPF(infile,'TIME',logfile,verbose)
    if status == 0:
        kepid, channel, skygroup, module, output, quarter, season, \
            ra, dec, column, row, kepmag, xdim, ydim, tcorr, status = \
            kepio.readTPF(infile,'TIMECORR',logfile,verbose)
    if status == 0:
        kepid, channel, skygroup, module, output, quarter, season, \
            ra, dec, column, row, kepmag, xdim, ydim, cadno, status = \
            kepio.readTPF(infile,'CADENCENO',logfile,verbose)
    if status == 0:
        kepid, channel, skygroup, module, output, quarter, season, \
            ra, dec, column, row, kepmag, xdim, ydim, fluxpixels, status = \
            kepio.readTPF(infile,'FLUX',logfile,verbose)
    if status == 0:
        kepid, channel, skygroup, module, output, quarter, season, \
            ra, dec, column, row, kepmag, xdim, ydim, errpixels, status = \
            kepio.readTPF(infile,'FLUX_ERR',logfile,verbose)
    if status == 0:
        kepid, channel, skygroup, module, output, quarter, season, \
            ra, dec, column, row, kepmag, xdim, ydim, qual, status = \
            kepio.readTPF(infile,'QUALITY',logfile,verbose)

# read mask defintion data from TPF file

    if status == 0:
        maskimg, pixcoord1, pixcoord2, status = kepio.readMaskDefinition(infile,logfile,verbose)

# print target data

    if status == 0:
        print ''
        print '      KepID:  %s' % kepid
        print ' RA (J2000):  %s' % ra
        print 'Dec (J2000): %s' % dec
        print '     KepMag:  %s' % kepmag
        print '   SkyGroup:    %2s' % skygroup
        print '     Season:    %2s' % str(season)
        print '    Channel:    %2s' % channel
        print '     Module:    %2s' % module
        print '     Output:     %1s' % output
        print ''

# how many quality = 0 rows?

    if status == 0:
        npts = 0
        nrows = len(fluxpixels)
        for i in range(nrows):
            if qual[i] == 0 and \
                    numpy.isfinite(barytime[i]) and \
                    numpy.isfinite(fluxpixels[i,ydim*xdim/2]):
                npts += 1
        time = empty((npts))
        timecorr = empty((npts))
        cadenceno = empty((npts))
        quality = empty((npts))
        pixseries = empty((ydim,xdim,npts))
        errseries = empty((ydim,xdim,npts))

# construct output light curves

    if status == 0:
        np = 0
        for i in range(ydim):
            for j in range(xdim):
                npts = 0
                for k in range(nrows):
                    if qual[k] == 0 and \
                    numpy.isfinite(barytime[k]) and \
                    numpy.isfinite(fluxpixels[k,ydim*xdim/2]):
                        time[npts] = barytime[k]
                        timecorr[npts] = tcorr[k]
                        cadenceno[npts] = cadno[k]
                        quality[npts] = qual[k]
                        pixseries[i,j,npts] = fluxpixels[k,np]
                        errseries[i,j,npts] = errpixels[k,np]
                        npts += 1
                np += 1

# define data sampling

    if status == 0 and filter:
        tpf, status = kepio.openfits(infile,'readonly',logfile,verbose)
    if status == 0 and filter:
        cadence, status = kepkey.cadence(tpf[1],infile,logfile,verbose)     
        tr = 1.0 / (cadence / 86400)
        timescale = 1.0 / (cutoff / tr)

# define convolution function

    if status == 0 and filter:
        if function == 'boxcar':
            filtfunc = numpy.ones(numpy.ceil(timescale))
        elif function == 'gauss':
            timescale /= 2
            dx = numpy.ceil(timescale * 10 + 1)
            filtfunc = kepfunc.gauss()
            filtfunc = filtfunc([1.0,dx/2-1.0,timescale],linspace(0,dx-1,dx))
        elif function == 'sinc':
            dx = numpy.ceil(timescale * 12 + 1)
            fx = linspace(0,dx-1,dx)
            fx = fx - dx / 2 + 0.5
            fx /= timescale
            filtfunc = numpy.sinc(fx)
        filtfunc /= numpy.sum(filtfunc)

# pad time series at both ends with noise model

    if status == 0 and filter:
        for i in range(ydim):
            for j in range(xdim):
                ave, sigma  = kepstat.stdev(pixseries[i,j,:len(filtfunc)])
                padded = numpy.append(kepstat.randarray(numpy.ones(len(filtfunc)) * ave, \
                                                            numpy.ones(len(filtfunc)) * sigma), pixseries[i,j,:])
                ave, sigma  = kepstat.stdev(pixseries[i,j,-len(filtfunc):])
                padded = numpy.append(padded, kepstat.randarray(numpy.ones(len(filtfunc)) * ave, \
                                                                    numpy.ones(len(filtfunc)) * sigma))

# convolve data

                if status == 0:
                    convolved = convolve(padded,filtfunc,'same')

# remove padding from the output array

                if status == 0:
                    outdata = convolved[len(filtfunc):-len(filtfunc)]
            
# subtract low frequencies

                if status == 0:
                    outmedian = median(outdata)
                    pixseries[i,j,:] = pixseries[i,j,:] - outdata + outmedian

# construct output file

    if status == 0 and ydim*xdim < 1000:
        instruct, status = kepio.openfits(infile,'readonly',logfile,verbose)
        status = kepkey.history(call,instruct[0],outfile,logfile,verbose)
        hdulist = HDUList(instruct[0])
        cols = []
        cols.append(Column(name='TIME',format='D',unit='BJD - 2454833',disp='D12.7',array=time))
        cols.append(Column(name='TIMECORR',format='E',unit='d',disp='E13.6',array=timecorr))
        cols.append(Column(name='CADENCENO',format='J',disp='I10',array=cadenceno))
        cols.append(Column(name='QUALITY',format='J',array=quality))
        for i in range(ydim):
            for j in range(xdim):
                colname = 'COL%d_ROW%d' % (i+column,j+row)
                cols.append(Column(name=colname,format='E',disp='E13.6',array=pixseries[i,j,:]))
        hdu1 = new_table(ColDefs(cols))
        try:
            hdu1.header.update('INHERIT',True,'inherit the primary header')
        except:
            status = 0
        try:
            hdu1.header.update('EXTNAME','PIXELSERIES','name of extension')
        except:
            status = 0
        try:
            hdu1.header.update('EXTVER',instruct[1].header['EXTVER'],'extension version number (not format version)')
        except:
            status = 0
        try:
            hdu1.header.update('TELESCOP',instruct[1].header['TELESCOP'],'telescope')
        except:
            status = 0
        try:
            hdu1.header.update('INSTRUME',instruct[1].header['INSTRUME'],'detector type')
        except:
            status = 0
        try:
            hdu1.header.update('OBJECT',instruct[1].header['OBJECT'],'string version of KEPLERID')
        except:
            status = 0
        try:
            hdu1.header.update('KEPLERID',instruct[1].header['KEPLERID'],'unique Kepler target identifier')
        except:
            status = 0
        try:
            hdu1.header.update('RADESYS',instruct[1].header['RADESYS'],'reference frame of celestial coordinates')
        except:
            status = 0
        try:
            hdu1.header.update('RA_OBJ',instruct[1].header['RA_OBJ'],'[deg] right ascension from KIC')
        except:
            status = 0
        try:
            hdu1.header.update('DEC_OBJ',instruct[1].header['DEC_OBJ'],'[deg] declination from KIC')
        except:
            status = 0
        try:
            hdu1.header.update('EQUINOX',instruct[1].header['EQUINOX'],'equinox of celestial coordinate system')
        except:
            status = 0
        try:
            hdu1.header.update('TIMEREF',instruct[1].header['TIMEREF'],'barycentric correction applied to times')
        except:
            status = 0
        try:
            hdu1.header.update('TASSIGN',instruct[1].header['TASSIGN'],'where time is assigned')
        except:
            status = 0
        try:
            hdu1.header.update('TIMESYS',instruct[1].header['TIMESYS'],'time system is barycentric JD')
        except:
            status = 0
        try:
            hdu1.header.update('BJDREFI',instruct[1].header['BJDREFI'],'integer part of BJD reference date')
        except:
            status = 0
        try:
            hdu1.header.update('BJDREFF',instruct[1].header['BJDREFF'],'fraction of the day in BJD reference date')
        except:
            status = 0
        try:
            hdu1.header.update('TIMEUNIT',instruct[1].header['TIMEUNIT'],'time unit for TIME, TSTART and TSTOP')
        except:
            status = 0
        try:
            hdu1.header.update('TSTART',instruct[1].header['TSTART'],'observation start time in BJD-BJDREF')
        except:
            status = 0
        try:
            hdu1.header.update('TSTOP',instruct[1].header['TSTOP'],'observation stop time in BJD-BJDREF')
        except:
            status = 0
        try:
            hdu1.header.update('LC_START',instruct[1].header['LC_START'],'mid point of first cadence in MJD')
        except:
            status = 0
        try:
            hdu1.header.update('LC_END',instruct[1].header['LC_END'],'mid point of last cadence in MJD')
        except:
            status = 0
        try:
            hdu1.header.update('TELAPSE',instruct[1].header['TELAPSE'],'[d] TSTOP - TSTART')
        except:
            status = 0
        try:
            hdu1.header.update('LIVETIME',instruct[1].header['LIVETIME'],'[d] TELAPSE multiplied by DEADC')
        except:
            status = 0
        try:
            hdu1.header.update('EXPOSURE',instruct[1].header['EXPOSURE'],'[d] time on source')
        except:
            status = 0
        try:
            hdu1.header.update('DEADC',instruct[1].header['DEADC'],'deadtime correction')
        except:
            status = 0
        try:
            hdu1.header.update('TIMEPIXR',instruct[1].header['TIMEPIXR'],'bin time beginning=0 middle=0.5 end=1')
        except:
            status = 0
        try:
            hdu1.header.update('TIERRELA',instruct[1].header['TIERRELA'],'[d] relative time error')
        except:
            status = 0
        try:
            hdu1.header.update('TIERABSO',instruct[1].header['TIERABSO'],'[d] absolute time error')
        except:
            status = 0
        try:
            hdu1.header.update('INT_TIME',instruct[1].header['INT_TIME'],'[s] photon accumulation time per frame')
        except:
            status = 0
        try:
            hdu1.header.update('READTIME',instruct[1].header['READTIME'],'[s] readout time per frame')
        except:
            status = 0
        try:
            hdu1.header.update('FRAMETIM',instruct[1].header['FRAMETIM'],'[s] frame time (INT_TIME + READTIME)')
        except:
            status = 0
        try:
            hdu1.header.update('NUM_FRM',instruct[1].header['NUM_FRM'],'number of frames per time stamp')
        except:
            status = 0
        try:
            hdu1.header.update('TIMEDEL',instruct[1].header['TIMEDEL'],'[d] time resolution of data')
        except:
            status = 0
        try:
            hdu1.header.update('DATE-OBS',instruct[1].header['DATE-OBS'],'TSTART as UTC calendar date')
        except:
            status = 0
        try:
            hdu1.header.update('DATE-END',instruct[1].header['DATE-END'],'TSTOP as UTC calendar date')
        except:
            status = 0
        try:
            hdu1.header.update('BACKAPP',instruct[1].header['BACKAPP'],'background is subtracted')
        except:
            status = 0
        try:
            hdu1.header.update('DEADAPP',instruct[1].header['DEADAPP'],'deadtime applied')
        except:
            status = 0
        try:
            hdu1.header.update('VIGNAPP',instruct[1].header['VIGNAPP'],'vignetting or collimator correction applied')
        except:
            status = 0
        try:
            hdu1.header.update('GAIN',instruct[1].header['GAIN'],'[electrons/count] channel gain')
        except:
            status = 0
        try:
            hdu1.header.update('READNOIS',instruct[1].header['READNOIS'],'[electrons] read noise')
        except:
            status = 0
        try:
            hdu1.header.update('NREADOUT',instruct[1].header['NREADOUT'],'number of read per cadence')
        except:
            status = 0
        try:
            hdu1.header.update('TIMSLICE',instruct[1].header['TIMSLICE'],'time-slice readout sequence section')
        except:
            status = 0
        try:
            hdu1.header.update('MEANBLCK',instruct[1].header['MEANBLCK'],'[count] FSW mean black level')
        except:
            status = 0
        hdulist.append(hdu1)
        hdulist.writeto(outfile)
        status = kepkey.new('EXTNAME','APERTURE','name of extension',instruct[2],outfile,logfile,verbose)
        pyfits.append(outfile,instruct[2].data,instruct[2].header)
        status = kepio.closefits(instruct,logfile,verbose)
    else:
        message = 'WARNING -- KEPPIXSERIES: output FITS file requires > 999 columns. Non-compliant with FITS convention.'

        kepmsg.warn(logfile,message)

# plot style

    if status == 0:
        try:
            params = {'backend': 'png',
                      'axes.linewidth': 2.0,
                      'axes.labelsize': 32,
                      'axes.font': 'sans-serif',
                      'axes.fontweight' : 'bold',
                      'text.fontsize': 8,
                      'legend.fontsize': 8,
                      'xtick.labelsize': 12,
                      'ytick.labelsize': 12}
            pylab.rcParams.update(params)
        except:
            pass

# plot pixel array

    fmin = 1.0e33
    fmax = -1.033
    if status == 0:
	pylab.figure(num=None,figsize=[12,12])
        pylab.clf()
        dx = 0.93 / xdim
        dy = 0.94 / ydim
        ax = pylab.axes([0.06,0.05,0.93,0.94])
        pylab.gca().xaxis.set_major_formatter(pylab.ScalarFormatter(useOffset=False))
        pylab.gca().yaxis.set_major_formatter(pylab.ScalarFormatter(useOffset=False))
        pylab.gca().xaxis.set_major_locator(matplotlib.ticker.MaxNLocator(integer=True))
        pylab.gca().yaxis.set_major_locator(matplotlib.ticker.MaxNLocator(integer=True))
        labels = ax.get_yticklabels()
        setp(labels, 'rotation', 90, fontsize=12)
        pylab.xlim(numpy.min(pixcoord1) - 0.5,numpy.max(pixcoord1) + 0.5)
        pylab.ylim(numpy.min(pixcoord2) - 0.5,numpy.max(pixcoord2) + 0.5)
        pylab.xlabel('time', {'color' : 'k'})
        pylab.ylabel('arbitrary flux', {'color' : 'k'})
        for i in range(ydim):
            for j in range(xdim):
                tmin = amin(time)
                tmax = amax(time)
                try:
                    numpy.isfinite(amin(pixseries[i,j,:]))
                    numpy.isfinite(amin(pixseries[i,j,:]))
                    fmin = amin(pixseries[i,j,:])
                    fmax = amax(pixseries[i,j,:])
                except:
                    ugh = 1
                xmin = tmin - (tmax - tmin) / 40
                xmax = tmax + (tmax - tmin) / 40
                ymin = fmin - (fmax - fmin) / 20
                ymax = fmax + (fmax - fmin) / 20
                if kepstat.bitInBitmap(maskimg[i,j],2):
                    pylab.axes([0.06+float(j)*dx,0.05+i*dy,dx,dy],axisbg='lightslategray')
                elif maskimg[i,j] == 0:
                    pylab.axes([0.06+float(j)*dx,0.05+i*dy,dx,dy],axisbg='black')
                else:
                    pylab.axes([0.06+float(j)*dx,0.05+i*dy,dx,dy])
                if j == int(xdim / 2) and i == 0:
                    pylab.setp(pylab.gca(),xticklabels=[],yticklabels=[])
                elif j == 0 and i == int(ydim / 2):
                    pylab.setp(pylab.gca(),xticklabels=[],yticklabels=[])
                else:
                    pylab.setp(pylab.gca(),xticklabels=[],yticklabels=[])
                ptime = time * 1.0
                ptime = numpy.insert(ptime,[0],ptime[0])
                ptime = numpy.append(ptime,ptime[-1])
                pflux = pixseries[i,j,:] * 1.0
                pflux = numpy.insert(pflux,[0],-1000.0)
                pflux = numpy.append(pflux,-1000.0)
                pylab.plot(time,pixseries[i,j,:],color='#0000ff',linestyle='-',linewidth=0.5)
                if not kepstat.bitInBitmap(maskimg[i,j],2):
                    pylab.fill(ptime,pflux,fc='lightslategray',linewidth=0.0,alpha=1.0)
                pylab.fill(ptime,pflux,fc='#FFF380',linewidth=0.0,alpha=1.0)
                if 'loc' in plottype:
                    pylab.xlim(xmin,xmax)
                    pylab.ylim(ymin,ymax)
                if 'glob' in plottype:
                    pylab.xlim(xmin,xmax)
                    pylab.ylim(1.0e-10,numpy.nanmax(pixseries) * 1.05)
                if 'full' in plottype:
                    pylab.xlim(xmin,xmax)
                    pylab.ylim(1.0e-10,ymax * 1.05)

# render plot

        if cmdLine: 
            pylab.show()
        else: 
            pylab.ion()
            pylab.plot([])
            pylab.ioff()	
        if plotfile.lower() != 'none':
            pylab.savefig(plotfile)

# stop time

    if status == 0:
        kepmsg.clock('KEPPIXSERIES ended at',logfile,verbose)

    return
Exemple #44
0
 def cutout(filename, ra_or_l, dec_or_b, coordsys, radius, outfile, clobber=True):
     """
     Inputs:
         file  - .fits filename or pyfits HDUList. If HDU is 3d (data-cube), the 3rd dimension
                 (the one which is not a sky coordinate) will be kept untouched. Dimensions > 3
                 are not supported
         ra_or_l,dec_or_b - Longitude and latitude of the center of the new image. The longitude
                            coordinate (R.A. or L) goes from 0 to 360, while the latitude coordinate
                            goes from -90 to 90.
         coordsys - Coordinate system for the center of the new image ('galactic' or 'equatorial')
         radius - radius of the region of interest (deg)
         outfile - output file
         clobber - overwrite the file if existing? (True or False)
     """
     
     if coordsys not in ['equatorial','galactic']:
       raise ValueError("Unknown coordinate system '%s'" %(coordsys))
     
     if(ra_or_l < 0 or ra_or_l > 360):
       raise RuntimeError("The longitude coordinate must be 0 < longitude < 360")
     
     if(dec_or_b < -90 or dec_or_b > 90):
       raise RuntimeError("The latitude coordinate must be -90 < latitude < 90")
         
     with pyfits.open(filename) as f:
       
       if(f[0].data.ndim==3):
         isCube                     = True
       elif(f[0].data.ndim==2):
         isCube                     = False
       else:
         raise RuntimeError("Do not know how to handle a cube with %s dimensions" %(f[0].data.shape[0]))
       pass
       
       head                         = f[0].header.copy()
       
       cd1                          = head.get('CDELT1') if head.get('CDELT1') else head.get('CD1_1')
       cd2                          = head.get('CDELT2') if head.get('CDELT2') else head.get('CD2_2')
       if cd1 is None or cd2 is None:
           raise Exception("Missing CD or CDELT keywords in header")
       
       wcs                         = pywcs.WCS(head)
       
       #Ensure that the center is expressed in the same coordinate system as the original
       #image
       if coordsys=='equatorial' and wcs.wcs.lngtyp=='GLON':
       
             #Convert RA,Dec in Galactic coordinates
             sdir                        = SkyDir(ra_or_l,dec_or_b,SkyDir.EQUATORIAL)
             xc,yc                       = (sdir.l(),sdir.b())
       
       elif coordsys=='galactic' and wcs.wcs.lngtyp=='RA':
             
             #Convert L,B in Equatorial coordinates
             sdir                        = SkyDir(ra_or_l,dec_or_b,SkyDir.EQUATORIAL)
             xc,yc                       = (sdir.ra(),sdir.dec())
 
       else:
       
            #Image and input are in the same system already
            xc,yc                        = (ra_or_l,dec_or_b)
       
       pass
       
       #Find the pixel corresponding to the center
       if(isCube):
         #Data cube
         coord                      = numpy.array([[xc],[yc],[1]]).T
         xx,yy,z                    = wcs.wcs_sky2pix(coord,0)[0]
         shapez,shapey,shapex       = f[0].data.shape
         
         #Compute the sky coordinates of all pixels 
         #(will use them later for the angular distance)
         
         #The code below is much faster, but does the same thing as this
         #one here:
         
         #coord                        = numpy.zeros((shapex*shapey,3))
         #h                            = 0
         #for i in range(shapex):
         #  for j in range(shapey):
         #    coord[h]                 = [i+1,j+1,1]
         #    h                       += 1
         
         coord                        = numpy.ones((shapex*shapey,3))
         firstColBase                 = numpy.arange(shapex)+1
         firstColumn                  = numpy.repeat(firstColBase,shapey)
         secondColumn                 = numpy.array(range(shapey)*shapex)+1
         coord[:,0]                   = firstColumn
         coord[:,1]                   = secondColumn
                         
         #Note that pix2sky always return the latitude from 0 to 360 deg
         res                          = wcs.wcs_pix2sky(coord,1)
         ras                          = res[:,0]
         decs                         = res[:,1]
         
       else:
         #Normal image
         coord                       = numpy.array([[xc],[yc]]).T
         xx,yy                       = wcs.wcs_sky2pix(coord,0)[0]
         shapey,shapex               = f[0].data.shape
         
         #Compute the sky coordinates of all pixels 
         #(will use them later for the angular distance)
         coord                        = numpy.ones((shapex*shapey,2))
         firstColBase                 = numpy.arange(shapex)+1
         firstColumn                  = numpy.repeat(firstColBase,shapey)
         secondColumn                 = numpy.array(range(shapey)*shapex)+1
         coord[:,0]                   = firstColumn
         coord[:,1]                   = secondColumn
                         
         #Note that pix2sky always return the latitude from 0 to 360 deg
         res                          = wcs.wcs_pix2sky(coord,1)
         ras                          = res[:,0]
         decs                         = res[:,1]
       pass
         
       print("Center is (%s,%s) pixel, (%s,%s) sky" %(xx,yy,xc,yc))
       
       #Cannot deal with fractional pixel
       if(xx - int(xx) >= 1e-4):
         print("Approximating the X pixel: %s -> %s" %(xx,int(xx)))
         xx                          = int(xx)
       if(yy - int(yy) >= 1e-4):
         print("Approximating the Y pixel: %s -> %s" %(yy,int(yy)))
         yy                          = int(yy)
       pass
       
       #Now select the pixels to keep
       #Pre-select according to a bounding box
       #(huge gain of speed in the computation of distances)
       ra_min_,ra_max_,dec_min_,dec_max_,pole = getBoundingCoordinates(xc,yc,radius)
       
       if(pole):
         #Nothing we can really do, except masking out (zeroing) all the useless parts
         print("\nOne of the poles is within the region. Cannot cut the image. I will zero-out useless parts")
         img                           = f[0].data
         
         #Find the pixel corresponding to (xc,yc-radius)
         coord                         = numpy.array([[xc],[yc-radius],[1]]).T
         res                           = wcs.wcs_sky2pix(coord,0)[0]
         mask_ymax                     = int(numpy.ceil(res[1]))
         
         #Now find the pixel corresponding to (xc,yc+radius)
         coord                         = numpy.array([[xc],[yc+radius-180.0],[1]]).T
         res                           = wcs.wcs_sky2pix(coord,0)[0]
         mask_ymin                     = int(numpy.floor(res[1]))
                 
         img[:,mask_ymin:mask_ymax,:]  = img[:,mask_ymin:mask_ymax,:]*0.0
         
       else:
         if(ra_min_ > ra_max_):
           #Circular inversion (ex: ra_min_ = 340.0 and ra_max_ = 20.0)
           idx                         = (ra_min_ <= ras) | (ras <= ra_max_)
         else:
           idx                         = (ra_min_ <= ras) & (ras <= ra_max_)
         pass
         
         if(dec_min_ > dec_max_):
           #Circular inversion (ex: ra_min_ = 340.0 and ra_max_ = 20.0)
           idx                         = ((dec_min_ <= decs) | (decs <= dec_max_)) & idx
         else:
           idx                         = ((dec_min_ <= decs) & (decs <= dec_max_)) & idx
         pass
               
         ras                           = ras[idx]
         decs                          = decs[idx]
         
         #Compute all angular distances of remaining pixels
         distances                     = getAngularDistance(xc,yc,ras,decs)
         
         #Select all pixels within the provided radius
         idx                           = (distances <= radius)
         selected_ras                  = ras[idx]
         selected_decs                 = decs[idx]
                 
         #Now transform back into pixels values
         if(isCube):
           
           coord                      = numpy.vstack([selected_ras,selected_decs,[1]*selected_ras.shape[0]]).T
                     
         else:
           
           coord                      = numpy.vstack([selected_ras,selected_decs]).T
         
         pass
         
         res                        = wcs.wcs_sky2pix(coord,0)
         
         #Now check if the range of x is not continuous (i.e., we are
         #wrapping around the borders)
         uniquex                    = numpy.unique(res[:,0])
         deltas                     = uniquex[1:]-uniquex[:-1]
         if(deltas.max()>1):
           #We are wrapping around the borders
           # |-------x1             x2-------|
           #We want to express x2 as a negative index starting
           #from the right border, and set it as xmin.
           #Then we set x1 as xmax.
           #This way the .take method below will start accumulating
           #the image from x2 to the right border |, then from the left
           #border | to x1, in this order
           #Find x2
           x2id                     = deltas.argmax()+1
           x2                       = int(uniquex[x2id])
           x1                       = int(uniquex[x2id-1])
           xmin                     = x2-shapex
           xmax                     = x1
           
           ymin,ymax                = (int(res[:,1].min()),int(res[:,1].max()))
           
         else:          
         
           xmin,xmax,ymin,ymax      = (int(res[:,0].min()),int(res[:,0].max()),int(res[:,1].min()),int(res[:,1].max()))
         
         pass        
         
         print("X range -> %s - %s" %(xmin,xmax))
         print("Y range -> %s - %s" %(ymin,ymax))
         print("Input image shape is ([z],y,x) = %s" %(str(f[0].shape)))
         
         #Using the mode='wrap' option we wrap around the edges of the image,
         #if ymin is negative
         if(isCube):
           
           img                        = f[0].data.take(range(ymin,ymax),mode='wrap', axis=1).take(range(xmin,xmax),mode='wrap',axis=2)
         
         else:
           
           img                        = f[0].data.take(range(ymin,ymax),mode='wrap', axis=0).take(range(xmin,xmax),mode='wrap',axis=1)
         
         pass
         
         #Put the origin of the projection in the right place
         #in the new image
         head['CRPIX1']               -= xmin
         head['CRPIX2']               -= ymin
         
         #Update the length of the axis
         head['NAXIS1']               = int(xmax-xmin)
         head['NAXIS2']               = int(ymax-ymin)
     
         if head.get('NAXIS1') == 0 or head.get('NAXIS2') == 0:
             raise ValueError("Map has a 0 dimension: %i,%i." % (head.get('NAXIS1'),head.get('NAXIS2')))
       pass
       
       newfile = pyfits.PrimaryHDU(data=img,header=head)
       
       newfile.writeto(outfile,clobber=clobber)
       #Append the other extension, if present
       for i in range(1,len(f)):
         pyfits.append(outfile,f[i].data,header=f[i].header)
     
     pass #Close the input file
     
     #Now re-open the output file and fix the wcs 
     #by moving the reference pixel to the 1,1 pixel
     #This guarantee that no pixel will be at a distance larger than 180 deg
     #from the reference pixel, which would confuse downstream software
     if(not pole):
       with pyfits.open(outfile,'update') as outf:
         head                         = outf[0].header
         #Get the 
         newwcs                          = pywcs.WCS(head)
         
         #Find the sky coordinates of the 1,1 pixel
         if(isCube):
           #Data cube
           coord                       = numpy.array([[1],[1],[1]]).T
           sx,sy,z                     = newwcs.wcs_pix2sky(coord,1)[0]
         else:
           #Normal image
           coord                       = numpy.array([[1],[1]]).T
           sx,sy                       = newwcs.wcs_pix2sky(coord,1)[0]
         pass
                 
         head['CRPIX1']                = 1
         head['CRVAL1']                = sx
       
       
     pass
Exemple #45
0
    p.add_option('--outfile', action='store', type='string',
                 default='./', help='Output FITS file')
    p.add_option('--start', action='store', type='int', 
                 help='start of window')
    p.add_option('--end', action='store', type='int',help="End of window")
    p.add_option('--period', action='store', type='int',help="ripple period")

    (par, args) = p.parse_args()

    if len(sys.argv) == 1:
       print '\n'
       p.print_help()
       sys.exit(0)

    infits = pf.open(par.infile)
    spectrum = infits['SCI'].data

    outspectrum = deripple(spectrum, par.start, par.end, par.period)
     # Create a new file with the input PHU and the SCI header
    phu = infits[0]
    pf.writeto(par.outfile, None, header=infits[0].header, clobber=True)
    pf.append(par.outfile, outspectrum, header=infits['SCI'].header)

    infits.close()

    

    # pf.writeto(par.ourfile,out, header)


Exemple #46
0
def sky_subtraction(input_filename,output_filename,sky_fibers_file):
	if plot == 'plot':
		fig=plt.figure()
		plt.title('Dummy plot to start things off',fontsize=16)
		plt.plot(range(10),range(10))
		plt.ion()
		plt.show()
		sleep_time=3
	sky_fibers = np.zeros(2)
	if not os.path.isfile(sky_fibers_file):
		d = ds9()
		d.set("file "+input_filename)
		d.set("mode crosshair")
		d.set("scale mode 90")
		d.set("raise")
		d.set("raise")
		dummy = raw_input("Press enter once crosshairs are on the lower sky fiber");
		sky_fibers[0] = int(d.get("crosshair").split()[1])
		print("Lower Sky Fiber: "+str(sky_fibers[0]))
		d.set("raise")
		d.set("raise")
		dummy = raw_input("Press enter once crosshairs are on the upper sky fiber");
		sky_fibers[1] = int(d.get("crosshair").split()[1])
		print("Upper Sky Fiber: "+str(sky_fibers[1]))
		np.savetxt(sky_fibers_file, sky_fibers, fmt='%d')
	#read in the offsets file
	sky_fibers = np.genfromtxt(sky_fibers_file,dtype=None)
	print("Sky Fibers read in: "+str(sky_fibers))
	input_data = pyfits.getdata(input_filename)
	#print(input_filename)
	data_header = pyfits.getheader(input_filename, 'DATA',1)
	gain = float(data_header["GAIN1"]) #probably don't need this, but it was in my IDL, so I'm bringing it over
	cdelt = float(data_header["CDELT1"])
	crval = float(data_header["CRVAL1"])
	crpix = float(data_header["CRPIX1"])
	print("cdelt: "+str(cdelt))
	print("crval: "+str(crval))
	print("crpix: "+str(crpix))
	
	
	
	#input_data is the RSS image data
	
	wave_pix = np.zeros(2)
	number_of_pixels = input_data[0,:].size
	number_of_fibers = input_data[:,0].size
	print('Number of pixels in wavelength axis: '+str(number_of_pixels))
	print('Number of pixels in the fiber axis: '+str(number_of_fibers))
	#find first non-zero value of the array, use the skylines because we've already checked that they're good
	wave_pix[0] = next((i for i, x in enumerate(input_data[sky_fibers[0],:]) if x), None)
	wave_pix[1] = wave_pix[0]+int(0.1*number_of_pixels)
	print("Wave pix: "+str(wave_pix))
	Nrd = np.std(input_data[sky_fibers[0]:sky_fibers[1],wave_pix[0]:wave_pix[1]])
	
	
	
	if plot == 'plot_not':
		plt.title('Sky fibers (also where Nrd is being determined)',fontsize=16)
		fiber = np.array(range(number_of_fibers))
		fiber_intensity = np.array(range(number_of_fibers))
		for index in np.array(range(number_of_fibers)):
			fiber_intensity[index] = sum(input_data[index,:])
		plt.plot(fiber,fiber_intensity)
		plt.axvline(x=sky_fibers[0])
		plt.axvline(x=sky_fibers[1])
		plt.draw()
		time.sleep(sleep_time)
		plt.clf()


	var = np.zeros((number_of_fibers,number_of_pixels))
	var.fill(Nrd)
	
	
	#CALCULATE AND SUBTRACT THE NOISE FROM OUR SIGNAL
	#Calculate the vertical offset, which is just background noise, and subtract that away.
	target_skyline_window = int(40*cdelt) #based on cdelt, we pick a reasonable range  around the window to be fitting around. (~xx pixels for VIMOS HR ~80 pixels for VIMOS LR)
	
	#offset = linear fit to continuium
	#corrected_image = image-offset
	
	#;DETERMINE FLUX NORMALIZATION
	#;Perform a Gaussian fit on the data with the offset removed, and then come up with a normalization factor for each fiber.
	#Subtract the gaussian fit from the spectrum
	
	target_skyline_pixels = convert_to_pixels(target_skyline,cdelt,crval)
	#
	#
	#
	target_skyline_pixels = target_skyline_pixels+15 #!!!!!!!This is a cheat, need to implement the wavelength shifting code
	#
	#
	#
	original_peaks = np.zeros(number_of_fibers)
	integrated_skyline_flux = np.zeros(number_of_fibers)
	for index, spectrum in enumerate(input_data):
		original_peak = max(spectrum) #I am assuming that the sky line is the brightest pixel in the image
		original_peak_position = target_skyline_pixels
		p0 = [original_peak, original_peak_position, 1.]
		#print(original_peak_position)
		#if index == 65:
		#	print(spectrum)
		#	print(original_peak)
		#	
		try:
			coeff, gauss_matrix = curve_fit(gauss, np.array(range(number_of_pixels)), spectrum, p0=p0)
			gauss_fit = gauss(np.array(range(number_of_pixels)), *coeff)
			original_peaks[index] = coeff[1]
			peak = coeff[1] # peak
			sigma = coeff[2] #sigma
		except:
			print("Gaussian fit failed for some reason, defaulting to dumb detection of peak")
			original_peaks[index] = original_peak_position
			peak = original_peak_position
			sigma = cdelt #default to 4x the resolution
		
		p0 = [1.0, 1000]
		#from peak-5sigma to peak-3sigme and peak+3sigma to peak+5sigma
		if sigma > (0.1*number_of_pixels):
			print("Sigma is too high, resetting to 1/100 of wavelength range.")
			sigma = 0.01*number_of_pixels
		if sigma < cdelt/4:
			sigma = cdelt/3
		#print(sigma)
		before_peak = np.array(range(int(round((peak-10*sigma))),int(round((peak-5*sigma)))))
		after_peak = np.array(range(int(round((peak+5*sigma))),int(round((peak+10*sigma)))))
		linear_pixels = np.concatenate((before_peak, after_peak), axis=0)
		#stop()
		#if len(linear_pixels) <= 1:
		#	linear_pixels = 
		#print(linear_pixels)
		#print(len(linear_pixels))
		#coeff, linear_matrix = curve_fit(line, [0,1,2,3,4,5,6,7,8,9], [ 1100.2154541  ,1145.8223877 , 1194.49658203 , 1173.9029541 ,  1254.51281738,  1212.34179688 , 1264.14782715,  1208.92175293 , 1092.0267334  , 1016.29296875], p0=p0)
		if len(linear_pixels) > 1:
			#print(index)
			#print('Length of linear pixels:' + str(len(linear_pixels)))
			#try:
			coeff, linear_matrix = curve_fit(line, np.array(range(len(linear_pixels))), input_data[index,linear_pixels], p0=p0)
			line_fit = line(np.array(range(2*target_skyline_window)), *coeff)
			#except:
			#	coeff = [1.0, 1000]
			#	line_fit = line(range(2*target_skyline_window), *coeff)
			if plot == 'plot1':
				plt.title('Skyline)',fontsize=16)
				plt.plot(np.array(range(2*target_skyline_window)),input_data[index,target_skyline_pixels-target_skyline_window:target_skyline_pixels+target_skyline_window])
				plt.plot(np.array(range(2*target_skyline_window)),line_fit)
				plt.ylim(0,5000)
				plt.draw()
				#time.sleep(sleep_time)
				plt.clf()
			offset = line(np.array(range(number_of_pixels)), *coeff)
			offset_spectrum = spectrum-offset#/2 #not sure if this 2 is required...
			if plot == 'plot1':
				plt.title('Skyline)',fontsize=16)
				plt.plot(range(100), spectrum[790:890])
				plt.plot(range(100), offset_spectrum[790:890])
				plt.ylim(-1000,5000)
				plt.draw()
				plt.clf()
			p0 = [original_peak, original_peak_position, 1.]
			#print(index)
			#
			#Problem here with some fits not converging
			#
			#print(offset_spectrum)
			#fig=plt.figure()
			#plt.plot(range(number_of_pixels),offset_spectrum)
			#plt.show()
			try:
				coeff, gauss_matrix = curve_fit(gauss, np.array(range(number_of_pixels)), offset_spectrum, p0=p0)
				gauss_fit = gauss(np.array(range(number_of_pixels)), *coeff)
			except:
				print("Fit didn't work, defaulting to backup fit.")
				coeff, gauss_matrix = curve_fit(gauss, np.array(range(number_of_pixels)), spectrum, p0=p0)
				gauss_fit = gauss(np.array(range(number_of_pixels)), *coeff)
			if plot == 'plot1':
				plt.title('Skyline)',fontsize=16)
				plt.plot(range(100), spectrum[790:890])
				plt.plot(range(100), gauss_fit[790:890])
				plt.ylim(-1000,5000)
				plt.draw()
				plt.clf()
			#print(coeff[0])
			#print(coeff[2])
			integrated_skyline_flux[index]=coeff[0]*coeff[2]*math.sqrt(2*math.pi) #0 is height, 2 is width
			#print(integrated_skyline_flux)
		#else:
		#	line_fit = [0,0]
	median_skyline_flux = np.median(integrated_skyline_flux)
	#print(median_skyline_flux)
	scale = integrated_skyline_flux/median_skyline_flux
	if plot == 'plot1':
		plt.title('Skyline)',fontsize=16)
		plt.plot(range(number_of_fibers),scale)
		plt.draw()
		time.sleep(sleep_time)
		plt.clf()
	print('Scale is broken, do not use it yet.')
	#for index, spectrum in enumerate(input_data):
	#	#print('Scale: '+str(scale[index]))
	#	if scale[index] > 0.5:
	#		print('Before scale: ',str(input_data[index,10:20]))
	#		input_data[index,:] = input_data[index,:]/scale[index]
	#		print('After scale: ',str(input_data[index,10:20]))
	#		var[index,:] = var[index,:]/scale[index]
	#	else:
	#		#input_data[index,:] = input_data[index,:]*0
	#		var[index,:] = 99999999^2
	average_sky_vector = np.zeros((number_of_pixels))
	temp_storage = np.zeros((sky_fibers[1]-sky_fibers[0]))
	for spectrum_index in np.array(range(number_of_pixels)):
		for fiber_index in np.array(range(sky_fibers[0],sky_fibers[1])):
			#print('fiber_index: '+str(fiber_index))
			temp_storage[fiber_index-sky_fibers[0]] = input_data[fiber_index,spectrum_index]
			#print(input_data[fiber_index,spectrum_index])
		clipped_vector = sigma_clip(temp_storage, 5, 1) #5sigma clipped mean of temp_storage
		#print(temp_storage)
		average_sky_vector[spectrum_index] = np.mean(clipped_vector)
	#print(average_sky_vector)
	if plot == 'plot1':
		plt.title('Average Skyline to be subtract',fontsize=16)
		plt.plot(np.array(range(number_of_pixels)),average_sky_vector)
		plt.ylim(0,1000)
		plt.draw()
		time.sleep(sleep_time)
		plt.clf()
	for index, spectrum in enumerate(input_data):
		if plot == 'plot':
				plt.title('Skyline subtraction in action',fontsize=16)
				plt.plot(np.array(range(number_of_pixels)),input_data[index,:])
				#input_data[index,:] = input_data[index,:]-average_sky_vector
				plt.plot(np.array(range(number_of_pixels)),input_data[index,:]-average_sky_vector)
				plt.plot(np.array(range(number_of_pixels)),average_sky_vector)
				plt.ylim(0,5000)
				plt.draw()
				#time.sleep(sleep_time)
				plt.clf()
		input_data[index,:] = input_data[index,:]-average_sky_vector
	#print("Saving to file: "+output_filename)
	pyfits.writeto(output_filename,input_data,data_header,clobber=True)
	pyfits.append(output_filename, var, data_header)
Exemple #47
0
col6 = pf.Column(name='subclass',    format='6A', array = subclass)
col7 = pf.Column(name='length',      format='I',  array = length)

cols = pf.ColDefs([col1, col2, col3, col4, col5, col6, col7])
tablehdu = pf.new_table(cols)
imagehdu = pf.PrimaryHDU(newflux)

hdulist = pf.HDUList([imagehdu, tablehdu])

filename = 'pcaspectra_rest_new_{0}.fits'.format(datetime.datetime.now().strftime('%y%m%d-%H%M'))

hdulist.writeto(filename, clobber=True)

#pf.writeto(filename, newflux, clobber=True)
#pf.append(filename, newwave)
#pf.append(filename, z)
#pf.append(filename, plate)
#pf.append(filename, mjd)
#pf.append(filename, fiber)
#pf.append(filename, classification)
#mytables.write(fitstable, output=filename) 

#pf.append(filename, neweigen)
pf.append(filename, magnitudes)
pf.append(filename, newwave)
pf.append(filename, newinvvar)
#pf.append(filename, andmaskarray)
#pf.append(filename, ormaskarray)


def get_psf_images():



    filename_lores = os.path.join(args.out_dir,'nbc.psf.lores.fits')
    filename_hires = os.path.join(args.out_dir,'nbc.psf.hires.fits')
    filename_field = os.path.join(args.out_dir,'nbc.psf.field.fits')
    
    if os.path.isfile(filename_lores): os.remove(filename_lores); log.info('removed existing %s',filename_lores)
    if os.path.isfile(filename_hires): os.remove(filename_hires); log.info('removed existing %s',filename_hires)
    if os.path.isfile(filename_field): os.remove(filename_field); log.info('removed existing %s',filename_field)

    orig_pixel_scale = config['pixel_scale']
    orig_image_size = config['cutout_size']

    # make a master PSF config copy
    config_psf = copy.deepcopy(config)

    filename_cat = 'psf_key.fits'
    config_psf['input']['catalog']['file_name'] = filename_cat

    # make a dummy galaxy to simplify PSF generation
    config_psf['gal'] = {}
    config_psf['gal']['type'] = 'Exponential'
    config_psf['gal']['half_light_radius'] = 3

    n_psfs = len(config['bins_fwhm_centers'])*len(config['bins_ell_centers'])**2
    
    iall = 0
    for ifwhm,fwhm in enumerate(config['bins_fwhm_centers']):
        for ie1,e1 in enumerate(config['bins_ell_centers']):
            for ie2,e2 in enumerate(config['bins_ell_centers']):
                                                          
                log.debug('getting single PSF at the pixel scale of a galaxy')


                config_copy1=copy.deepcopy(config_psf)
                config_copy1['psf']['fwhm'] = fwhm
                config_copy1['psf']['ellip']['g1'] = e1
                config_copy1['psf']['ellip']['g2'] = e2
                img_gal,img_psf,_,_  = galsim.config.BuildImages(config=config_copy1,image_num=0,obj_num=0,make_psf_image=True,nimages=1)   
                img_psf = img_psf[0]
                img_psf = img_psf[galsim.BoundsI(1, orig_image_size, 1, orig_image_size)]
                pyfits.append(filename_lores,img_psf.array)
                # filename_lores = 'nbc.psf.lores.%03d.fits' % iall
                # img_psf.write(filename_lores)
                                                              
                # now the hires PSF, centers in the middle
                # log.debug('getting single PSF at high resolution')                 
                # config_copy2=copy.deepcopy(config_psf)
                # n_sub = config['upsampling']
                # n_pad = config['padding']
                # n_pix_hires = (orig_image_size + n_pad) * n_sub
                # pixel_scale_hires = float(config_copy2['image']['pixel_scale']) / float(n_sub)
                # config_copy2['image']['pixel_scale'] = pixel_scale_hires
                # config_copy2['image']['size'] = n_pix_hires
                # config_copy2['psf']['fwhm'] = fwhm
                # config_copy2['psf']['ellip']['g1'] = e1
                # config_copy2['psf']['ellip']['g2'] = e2            

                # # no pixel convolut
                # config_copy2['pix'] = {}
                # config_copy2['pix']['type'] = 'Pixel'
                # config_copy2['pix']['xw'] = orig_pixel_scale

                # img_gal,img_psf,_,_ = galsim.config.BuildImages(config=config_copy2,image_num=0,obj_num=0,make_psf_image=True,nimages=1)      
                # img_psf = img_psf[0]
                # img_psf = img_psf[galsim.BoundsI(1, int(n_pix_hires), 1, int(n_pix_hires))]             
                # pyfits.append(filename_hires,img_psf.array)
                
                log.debug('getting single PSF at high resolution')                 
                config_copy2=copy.deepcopy(config_psf)
                n_sub = config['upsampling']
                n_pad = config['padding']
                n_pix_hires = (orig_image_size + n_pad) * n_sub
                pixel_scale_hires = float(config_copy2['image']['pixel_scale']) / float(n_sub)

                img_psf=galsim.ImageD(n_pix_hires,n_pix_hires)
                psf = galsim.Kolmogorov(fwhm=fwhm)
                psf.applyShear(g1=e1,g2=e2)
                pix = galsim.Pixel(scale=orig_pixel_scale)
                psfpix = galsim.Convolve([psf,pix])
                psfpix.draw(img_psf,scale=pixel_scale_hires)

                pyfits.append(filename_hires,img_psf.array)



                # img_loaded=pyfits.getdata(filename_hires,iall)
                # pl.subplot(1,3,1)
                # pl.imshow(img_psf.array,interpolation='nearest');
                # pl.subplot(1,3,2)
                # pl.imshow(img_loaded,interpolation='nearest'); 
                # pl.subplot(1,3,3)
                # pl.imshow(img_loaded-img_psf.array,interpolation='nearest'); 
                # pl.show()

                # filename_hires = 'nbc.psf.hires.%03d.fits' % iall
                # img_psf.write(filename_hires)

                # now field
                log.debug('getting low res PSF in a field')
                config_copy3=copy.deepcopy(config_psf) 
                config_copy3['image']['pixel_scale'] = orig_pixel_scale
                config_copy3['image']['stamp_size'] = orig_image_size
                config_copy3['image']['type'] = 'Tiled'
                config_copy3['image']['nx_tiles'] = 10
                config_copy3['image']['ny_tiles'] = 10
                config_copy3['image']['stamp_xsize'] = orig_image_size
                config_copy3['image']['stamp_ysize'] = orig_image_size
                config_copy3['psf']['fwhm'] = fwhm
                config_copy3['psf']['ellip']['g1'] = e1
                config_copy3['psf']['ellip']['g2'] = e2
                if 'size' in config_copy3['image']:    del(config_copy3['image']['size'])
                img_gal,img_psf,_,_ = galsim.config.BuildImage(config=config_copy3,image_num=0,obj_num=0,make_psf_image=True)    
                pyfits.append(filename_field,img_psf.array)
                # filename_field = 'nbc.psf.field.%03d.fits' % iall
                # img_psf.write(filename_field)

                log.info('generated id=%3d %3d/%d psfs fwhm=%2.5f e1=% 2.5f e2=% 2.5f ifwhm=%d ie1=%d ie2=%d ' , iall , iall+1, n_psfs , fwhm, e1, e2, ifwhm, ie1,ie2)
                iall += 1



    hdus_lores=pyfits.open(filename_lores)
    hdus_hires=pyfits.open(filename_hires)
    hdus_field=pyfits.open(filename_field)

    log.info('finished writing %s with %d hdus' , filename_lores , len(hdus_lores))
    log.info('finished writing %s with %d hdus' , filename_hires , len(hdus_hires))
    log.info('finished writing %s with %d hdus' , filename_field , len(hdus_field))
import pyfits
import numpy as np
imgname = '../data/VCC1043_k_sig.fits'
img = pyfits.getdata(imgname)
h = pyfits.getheader(imgname)
img2 = (1./img)**2
print len(img2)
hdr = h.copy()
filename = '../data/VCC1043_k_weight.fits'
pyfits.writeto(filename, img2, hdr)
pyfits.append(imgname, img2, hdr)
#pyfits.update(filename, img2, hdr, ext)
Exemple #50
0
    def split_fits(filename=None, split_dir='.', size_limit = 1024.0):
        fits_list = []
        filestat = os.stat(filename)
        filesize = filestat.st_size/(1024.0**2)
        if filesize <= size_limit:
            logger.error("This file is only %f MB, smaller than our size limit %f MB, no split."%(filesize, size_limit))
            return []
        try:
            bighdulist = pyfits.open(filename, memmap=True)
            first_row = bighdulist[0]
            header = first_row.header
        except:
            logger.error("Error encountered when trying to open FITS file %s"%filename)
            return []

        fn = filename[filename.rfind('/')+1:filename.rfind('.fits')]
        deltaf = header['DELTAF']
        fftlen = header['NAXIS1']
        fcntr = header['FCNTR']
        frange = [fcntr - fftlen*deltaf/2, fcntr + fftlen*deltaf/2]

        nfiles_min = int(math.ceil(filesize/size_limit))
        new_width_max = fftlen/nfiles_min
        new_width = 2**math.floor(np.log2(new_width_max))
        nfiles = int(math.ceil(fftlen/new_width))
        new_files = []
        new_fcntrs = []
        new_filenames = []
        indices = []
        new_primary_header = copy.deepcopy(header)
        to_create = []
        for i in range(0, nfiles):
            new_filename = split_dir + '/' + fn + '_%d'%i + '.fits'
            new_filenames.append(new_filename)
            new_fcntr_tmp = frange[0] + deltaf * new_width * (i + 0.5)
            new_fcntrs.append(new_fcntr_tmp)
            new_primary_header['FCNTR'] = new_fcntr_tmp
            ind = (i*new_width, min(fftlen, (i+1)*new_width))
            indices.append(ind)
            if os.path.isfile(new_filename):
                logger.error("file %s already existed!"%new_filename)
                to_create.append(False)
                continue
            to_create.append(True)
            data = first_row.data[0][ind[0]:ind[1]]
            prihdu = pyfits.PrimaryHDU([data], header = new_primary_header)
            prihdu.writeto(new_filename)
            logger.info("Created new file: %s"%new_filename)

        for i, ohdu in enumerate(bighdulist[1:]):
            logger.debug("Dealing with row %d"%i)
            new_header = copy.deepcopy(ohdu.header)
            for j, new_filename in enumerate(new_filenames):
                if not to_create[j]:
                    continue
                new_header['FCNTR'] = new_fcntrs[j]
                ind = indices[j]
                data = ohdu.data[0][ind[0]:ind[1]]
                pyfits.append(new_filename, [data], new_header)

        for new_filename in new_filenames:
            fits_obj = FITS(new_filename)
            fits_list.append(fits_obj)

        return fits_list
Exemple #51
0
#define a single wavelength spectrum
initialpixel = np.log10(minwavelength)
finalpixel   = np.log10(maxwavelength)
deltapix     = 1e-4 #10.**(np.min(deredloglambda0))*(10.**1e-4 - 1.)
npix         = (finalpixel - initialpixel)/deltapix + 1.
newwave      = initialpixel + deltapix*np.arange(npix)

newflux      = np.zeros((len(flux), npix+1))
chisq        = np.zeros(len(flux))

#resample spectra at single wavelength spectrum defined above
smoothing_parameter = 3.0
spline_order = 3
number_of_knots = -1

for p in range(len(flux)):
    nonzero = np.where(wavevector[p,:] != 0.)
    fitcoeff = cspline1d(flux[p], lamb=smoothing_parameter)
    newflux[p,:] = cspline1d_eval(fitcoeff, newwave, dx=wave1[p], x0 = wavevector[p,0])
    oldfit = cspline1d_eval(fitcoeff, wavevector[p,nonzero][0], dx=wave1[p], x0 = wavevector[p,0])
    chisq[p] = np.sum(np.sqrt((oldfit - flux[p])**2.*invvar[p]))/np.shape(flux[p])[0]

filename = 'pcaspectra_rest.fits'
pf.writeto(filename, newflux, clobber=True)
pf.append(filename, newwave)
pf.append(filename, z)

t1 = time.time()
print t1-t0

Exemple #52
0
            im = nt.restore_nans(im,bp)

            #print 'NUMBER OF bp>1',np.sum(bp>1.)
            #print 'NUMBER OF NANS',np.sum(np.isnan(im))

            log.info('%s %s: xcen: %.2f ycen: %.2f' % 
                                      (outfile,
                                      ['red','blue'][ext-1],
                                      xcen,ycen))  

            # Order wcs -in place.
            nt.order_wcs(hdr)
            hdr.update("EXTNAME",'SCI', "SCIENCE extension",after='GCOUNT')
            hdr.__delitem__("EXTVER")
            hdr.update("EXTVER",ext, "SCIENCE ext version",after='EXTNAME')
            pf.append(outfile,im,hdr)
            out.flush()

        out.close()

    return 

def remove_badpix(im,log):
    """
    Aggresive bad pixel removal. (Nan removal)
    If 3 or more of the neighbouring pixels are NOT NaNs, 
    we assume that the value for the NaN pixel is the
    mean of the non-NaNs. If not, leave it as a NaN

    """
Exemple #53
0
def stack_galfit_images(infns, label='all', par='n', debug=False):
    #fns = random.sample(infns, min(len(infns),maxstack))
    fns = infns[:min(len(infns),maxstack)]
    #for j in range(len(fns)):
    #    fns[j] = fns[j].replace('/home/boris/gama/galapagos/galapagos_2.0.3_galfit_0.1.2.1_GAMA_9', '/Users/spb/gala9')
    bandscales = numpy.array([0.06, 0.1, 0.4])/10.0
    beta=2.0
    outsize = size
    n = len(fns)
    if n == 0:
        return None
    if maxstack > 5 and n < 5:
        return None
    print label, len(infns), n
    if n < 5:
        print fns
    nbands = len(bands)
    coadd = []
    modelcoadd = []
    for i, band in enumerate(bands):
        valueoffsets = []
        valuescales = []
        xcentres = []
        ycentres = []
        radii = []
        angles = []
        axisratios = []
        for j, fn in enumerate(fns):
            p = pyfits.open(fn)
            results = p['final_band'].data
            # the following scales to the largest image
            # not suitable when building residual images (need same shape for all stacked images)
            #outsize = max(outsize, max(p[0].shape))
            # index to scale according to the parameters in this band
            bandidx = (results.field('BAND')==band).nonzero()[0][0]
            # index to scale according to r-band parameters
            rbandidx = (results.field('BAND')=='r').nonzero()[0][0]
            valueoffsets.append(-results.field('COMP1_SKY')[bandidx])
            valuescales.append(10**(0.4*(results.field('COMP2_MAG')[rbandidx]-18)))
            xcentres.append(results.field('COMP2_YC')[rbandidx])
            ycentres.append(results.field('COMP2_XC')[rbandidx])
            if par == 're':
                radii.append(results.field('COMP2_RE')[rbandidx])
            elif par == 'n':
                radii.append(results.field('COMP2_RE')[bandidx])
            angles.append(results.field('COMP2_PA')[rbandidx])
            axisratios.append(results.field('COMP2_AR')[rbandidx])
        axisratios = 1.0
        coaddi, stacki = stack_images(fns, outsize, 'input_%s'%band, valueoffsets, valuescales,
                                      xcentres, ycentres, scales, angles, axisratios, clip=3.0)
        coadd.append(coaddi)
        pyfits.writeto('stack_%s_%s.fits'%(label, band), coaddi, clobber=True)
        for si in stacki:
            pyfits.append('stackall_%s_%s.fits'%(label, band), si)
        # stack the models to verify
        coaddi, stacki = stack_images(fns, outsize, 'model_%s'%band, valueoffsets, valuescales,
                                      xcentres, ycentres, scales, angles, axisratios, clip=3.0)
        modelcoadd.append(coaddi)
        pyfits.writeto('stackmodel_%s_%s.fits'%(label, band), coaddi, clobber=True)
        for si in stacki:
            pyfits.append('stackmodelall_%s_%s.fits'%(label, band), si)
    coadd = numpy.array(coadd)
    for i, b in enumerate(bandscales):
        coadd[i] *= b
    colimg = RGBImage(coadd[0], coadd[1], coadd[2], beta=beta)
    colimg.save_as('stack_%s.png'%label)
    coadd = numpy.array(modelcoadd)
    for i, b in enumerate(bandscales):
        coadd[i] *= b
    colimg = RGBImage(coadd[0], coadd[1], coadd[2], beta=beta)
    colimg.save_as('stackmodel_%s.png'%label)
    return coadd
				kinematic_measurements[:,32] = OIII_5007_amplitude_array
				kinematic_measurements[:,34] = OIII_5007_dispersion_array
				kinematic_measurements[:,21] = h_beta_flux_array
				kinematic_measurements[:,23] = h_beta_wavelength_array
				kinematic_measurements[:,22] = h_beta_amplitude_array
				kinematic_measurements[:,24] = h_beta_dispersion_array
				kinematic_measurements[:,46] = NII_6584_flux_array
				kinematic_measurements[:,48] = NII_6584_wavelength_array
				kinematic_measurements[:,47] = NII_6584_amplitude_array
				kinematic_measurements[:,49] = NII_6584_dispersion_array
				kinematic_measurements[:,41] = h_alpha_flux_array
				kinematic_measurements[:,43] = h_alpha_wavelength_array
				kinematic_measurements[:,42] = h_alpha_amplitude_array
				kinematic_measurements[:,44] = h_alpha_dispersion_array
			pyfits.writeto(pink_gandalf_file, kinematic_measurements, img_header, clobber=True)
			pyfits.append(pink_gandalf_file, kinematic_measurements, img_header)
			pyfits.append(pink_gandalf_file, kinematic_measurements, img_header)
			pyfits.append(pink_gandalf_file, kinematic_measurements, img_header)
			pyfits.append(pink_gandalf_file, kinematic_measurements, img_header)
			pyfits.append(pink_gandalf_file, kinematic_measurements, img_header)
			pyfits.append(pink_gandalf_file, kinematic_measurements, img_header)
		if mode == "one":
			gandalf_table = open('/Users/jimmy/Astro/reduced/'+galaxy+'pro/all/'+sncut+'/gandalf_table.txt', "w")
			gandalf_table.write("PP04_O3N2\t"+str(LogOH_PP04_O3N2)+"\n")
			gandalf_table.write("PP04_N2\t"+str(LogOH_PP04_N2)+"\n")
			gandalf_table.write("D02\t"+str(LogOH_D02)+"\n")
			gandalf_table.write("OIII_5007_flux\t"+str(OIII_5007_flux_array[0])+"\n")
			gandalf_table.write("h_beta_flux\t"+str(h_beta_flux_array[0])+"\n")
			gandalf_table.write("NII_6584_flux\t"+str(NII_6584_flux_array[0])+"\n")
			gandalf_table.write("h_alpha_flux\t"+str(h_alpha_flux_array[0])+"\n")
			gandalf_table.close()
Exemple #55
0
def virus(input_filename, output_filename):
	#create a 400x400 pixel blank field
	#277x267
	radius = 5	#radius of the spaxels in pixels
	pixels_per_fiber = math.pi*radius*radius #this should calculate the "surface area" of the spaxels in pixels
	pixels_per_fiber = 81 #cheat for r=5 because the line above isn't working
	starting_y, starting_x = 5, 15
	increment_y, increment_x = 16, 19
	spaxels_in_x_direction = 15
	spaxels_in_y_direction = 17
	field_size = [(radius*2*spaxels_in_y_direction)+((increment_y-(2*radius))*(spaxels_in_y_direction-1))+1,(radius*2*spaxels_in_x_direction)+((increment_x-(2*radius))*(spaxels_in_x_direction-1))+1] #x and y dimension of field
	
	
	fiber_number = 0

	input_data = pyfits.getdata(input_filename)
	var_data, hdr = pyfits.getdata(input_filename, 1, header=True)
	input_header = pyfits.getheader(input_filename)

	#In order to perserve flux, divide by the number of pixels going in to make each fiber 
	input_data = input_data/pixels_per_fiber

	number_of_fibers = input_data[:,0].size
	number_of_pixels = input_data[0,:].size
	field = np.zeros((number_of_pixels,field_size[0],field_size[1]))
	var_field = np.zeros((number_of_pixels,field_size[0],field_size[1]))
	fov_field = np.zeros((field_size[0],field_size[1]))
	
	

	for y_index in range(17):
		for x_index in range(15):
			if y_index % 2:
				shift = 10
			else:
				shift = 0
			center_y, center_x = starting_y+y_index*increment_y, starting_x-shift+(x_index*increment_x)
			y,x = np.ogrid[-center_y:field_size[0]-center_y, -center_x:field_size[1]-center_x]
			mask = x*x + y*y <= radius*radius
			#spectrum = input_data[fiber_number,:]
			if y_index % 2:
				for spectrum_index in range(number_of_pixels):
					field[spectrum_index,mask] = input_data[fiber_number,spectrum_index]
					var_field[spectrum_index,mask] = var_data[fiber_number,spectrum_index]
					fov_field[mask] = sum(input_data[fiber_number,:])
				fiber_number = fiber_number+1
			else:
				if x_index <= 13:
					for spectrum_index in range(number_of_pixels):
						field[spectrum_index,mask] = input_data[fiber_number,spectrum_index]
						var_field[spectrum_index,mask] = var_data[fiber_number,spectrum_index]
						fov_field[mask] = sum(input_data[fiber_number,:])
					fiber_number = fiber_number+1
	cdelt = float(input_header["CDELT1"])
	crval = float(input_header["CRVAL1"])
	crpix = float(input_header["CRPIX1"])
	input_header["CDELT3"] = cdelt
	input_header["CRVAL3"] = crval
	input_header["CRPIX3"] = crpix
	pyfits.writeto(output_filename,field,input_header,clobber=True)
	pyfits.append(output_filename, var_field, input_header)
	pyfits.writeto(os.path.splitext(output_filename)[0]+"_fov.fits",fov_field,input_header,clobber=True)
Exemple #56
0
def kepdiffim(infile,outfile,plotfile,imscale,colmap,filter,function,cutoff,clobber,verbose,logfile,status,cmdLine=False): 

# input arguments

    status = 0
    seterr(all="ignore") 

# log the call 

    hashline = '----------------------------------------------------------------------------'
    kepmsg.log(logfile,hashline,verbose)
    call = 'KEPDIFFIM -- '
    call += 'infile='+infile+' '
    call += 'outfile='+outfile+' '
    call += 'plotfile='+plotfile+' '
    call += 'imscale='+imscale+' '
    call += 'colmap='+colmap+' '
    filt = 'n'
    if (filter): filt = 'y'
    call += 'filter='+filt+ ' '
    call += 'function='+function+' '
    call += 'cutoff='+str(cutoff)+' '
    overwrite = 'n'
    if (clobber): overwrite = 'y'
    call += 'clobber='+overwrite+ ' '
    chatter = 'n'
    if (verbose): chatter = 'y'
    call += 'verbose='+chatter+' '
    call += 'logfile='+logfile
    kepmsg.log(logfile,call+'\n',verbose)

# start time

    kepmsg.clock('KEPDIFFIM started at: ',logfile,verbose)

# test log file

    logfile = kepmsg.test(logfile)

# clobber output file

    if clobber: status = kepio.clobber(outfile,logfile,verbose)
    if kepio.fileexists(outfile): 
        message = 'ERROR -- KEPDIFFIM: ' + outfile + ' exists. Use --clobber'
        status = kepmsg.err(logfile,message,verbose)

# reference color map

    if colmap == 'browse':
        status = cmap_plot()

# open TPF FITS file

    if status == 0:
        kepid, channel, skygroup, module, output, quarter, season, \
            ra, dec, column, row, kepmag, xdim, ydim, barytime, status = \
            kepio.readTPF(infile,'TIME',logfile,verbose)
    if status == 0:
        kepid, channel, skygroup, module, output, quarter, season, \
            ra, dec, column, row, kepmag, xdim, ydim, tcorr, status = \
            kepio.readTPF(infile,'TIMECORR',logfile,verbose)
    if status == 0:
        kepid, channel, skygroup, module, output, quarter, season, \
            ra, dec, column, row, kepmag, xdim, ydim, cadno, status = \
            kepio.readTPF(infile,'CADENCENO',logfile,verbose)
    if status == 0:
        kepid, channel, skygroup, module, output, quarter, season, \
            ra, dec, column, row, kepmag, xdim, ydim, fluxpixels, status = \
            kepio.readTPF(infile,'FLUX',logfile,verbose)
    if status == 0:
        kepid, channel, skygroup, module, output, quarter, season, \
            ra, dec, column, row, kepmag, xdim, ydim, errpixels, status = \
            kepio.readTPF(infile,'FLUX_ERR',logfile,verbose)
    if status == 0:
        kepid, channel, skygroup, module, output, quarter, season, \
            ra, dec, column, row, kepmag, xdim, ydim, qual, status = \
            kepio.readTPF(infile,'QUALITY',logfile,verbose)

# read mask defintion data from TPF file

    if status == 0:
        maskimg, pixcoord1, pixcoord2, status = kepio.readMaskDefinition(infile,logfile,verbose)

# print target data

    if status == 0:
        print ''
        print '      KepID:  %s' % kepid
        print ' RA (J2000):  %s' % ra
        print 'Dec (J2000): %s' % dec
        print '     KepMag:  %s' % kepmag
        print '   SkyGroup:    %2s' % skygroup
        print '     Season:    %2s' % str(season)
        print '    Channel:    %2s' % channel
        print '     Module:    %2s' % module
        print '     Output:     %1s' % output
        print ''

# how many quality = 0 rows?

    if status == 0:
        npts = 0
        nrows = len(fluxpixels)
        for i in range(nrows):
            if qual[i] == 0 and \
                    numpy.isfinite(barytime[i]) and \
                    numpy.isfinite(fluxpixels[i,ydim*xdim/2]):
                npts += 1
        time = empty((npts))
        timecorr = empty((npts))
        cadenceno = empty((npts))
        quality = empty((npts))
        pixseries = empty((ydim*xdim,npts))
        errseries = empty((ydim*xdim,npts))

# construct output light curves

    if status == 0:
        np = 0
        for i in range(ydim*xdim):
            npts = 0
            for k in range(nrows):
                if qual[k] == 0 and \
                        numpy.isfinite(barytime[k]) and \
                        numpy.isfinite(fluxpixels[k,ydim*xdim/2]):
                    time[npts] = barytime[k]
                    timecorr[npts] = tcorr[k]
                    cadenceno[npts] = cadno[k]
                    quality[npts] = qual[k]
                    pixseries[i,npts] = fluxpixels[k,np]
                    errseries[i,npts] = errpixels[k,np]
                    npts += 1
            np += 1

# define data sampling

    if status == 0 and filter:
        tpf, status = kepio.openfits(infile,'readonly',logfile,verbose)
    if status == 0 and filter:
        cadence, status = kepkey.cadence(tpf[1],infile,logfile,verbose)     
        tr = 1.0 / (cadence / 86400)
        timescale = 1.0 / (cutoff / tr)

# define convolution function

    if status == 0 and filter:
        if function == 'boxcar':
            filtfunc = numpy.ones(numpy.ceil(timescale))
        elif function == 'gauss':
            timescale /= 2
            dx = numpy.ceil(timescale * 10 + 1)
            filtfunc = kepfunc.gauss()
            filtfunc = filtfunc([1.0,dx/2-1.0,timescale],linspace(0,dx-1,dx))
        elif function == 'sinc':
            dx = numpy.ceil(timescale * 12 + 1)
            fx = linspace(0,dx-1,dx)
            fx = fx - dx / 2 + 0.5
            fx /= timescale
            filtfunc = numpy.sinc(fx)
        filtfunc /= numpy.sum(filtfunc)

# pad time series at both ends with noise model

    if status == 0 and filter:
        for i in range(ydim*xdim):
            ave, sigma  = kepstat.stdev(pixseries[i,:len(filtfunc)])
            padded = numpy.append(kepstat.randarray(numpy.ones(len(filtfunc)) * ave, \
                                                        numpy.ones(len(filtfunc)) * sigma), pixseries[i,:])
            ave, sigma  = kepstat.stdev(pixseries[i,-len(filtfunc):])
            padded = numpy.append(padded, kepstat.randarray(numpy.ones(len(filtfunc)) * ave, \
                                                                numpy.ones(len(filtfunc)) * sigma))

# convolve data

            if status == 0:
                convolved = convolve(padded,filtfunc,'same')

# remove padding from the output array

            if status == 0:
                outdata = convolved[len(filtfunc):-len(filtfunc)]
                            
# subtract low frequencies

            if status == 0:
                outmedian = median(outdata)
                pixseries[i,:] = pixseries[i,:] - outdata + outmedian

# sum pixels over cadence

    if status == 0:
        np = 0
        nrows = len(fluxpixels)
        pixsum = zeros((ydim*xdim))
        errsum = zeros((ydim*xdim))
        for i in range(npts):
            if quality[i] == 0:
                pixsum += pixseries[:,i]
                errsum += errseries[:,i]**2
                np += 1
        pixsum /= np
        errsum = sqrt(errsum) / np

# calculate standard deviation pixels

    if status == 0:
        pixvar = zeros((ydim*xdim))
        for i in range(npts):
            if quality[i] == 0:
                pixvar += (pixsum - pixseries[:,i] / errseries[:,i])**2 
        pixvar = numpy.sqrt(pixvar)

# median pixel errors

    if status == 0:
        errmed = empty((ydim*xdim))
        for i in range(ydim*xdim):
            errmed[i] = numpy.median(errseries[:,i])

# calculate chi distribution pixels

    if status == 0:
        pixdev = zeros((ydim*xdim))
        for i in range(npts):
            if quality[i] == 0:
                pixdev += ((pixsum - pixseries[:,i]) / pixsum)**2 
        pixdev = numpy.sqrt(pixdev)


#        pixdev = numpy.sqrt(pixvar) / errsum #errmed

# image scale and intensity limits

    if status == 0:
        pixsum_pl, zminsum, zmaxsum = kepplot.intScale1D(pixsum,imscale)
        pixvar_pl, zminvar, zmaxvar = kepplot.intScale1D(pixvar,imscale)
        pixdev_pl, zmindev, zmaxdev = kepplot.intScale1D(pixdev,imscale)

# construct output summed image

    if status == 0:
        imgsum = empty((ydim,xdim))
        imgvar = empty((ydim,xdim))
        imgdev = empty((ydim,xdim))
        imgsum_pl = empty((ydim,xdim))
        imgvar_pl = empty((ydim,xdim))
        imgdev_pl = empty((ydim,xdim))
        n = 0
        for i in range(ydim):
            for j in range(xdim):
                imgsum[i,j] = pixsum[n]
                imgvar[i,j] = pixvar[n]
                imgdev[i,j] = pixdev[n]
                imgsum_pl[i,j] = pixsum_pl[n]
                imgvar_pl[i,j] = pixvar_pl[n]
                imgdev_pl[i,j] = pixdev_pl[n]
                n += 1

# construct output file

    if status == 0:
        instruct, status = kepio.openfits(infile,'readonly',logfile,verbose)
        status = kepkey.history(call,instruct[0],outfile,logfile,verbose)
        hdulist = HDUList(instruct[0])
        hdulist.writeto(outfile)
        status = kepkey.new('EXTNAME','FLUX','name of extension',instruct[2],outfile,logfile,verbose)
        pyfits.append(outfile,imgsum,instruct[2].header)
        status = kepkey.new('EXTNAME','CHI','name of extension',instruct[2],outfile,logfile,verbose)
        pyfits.append(outfile,imgvar,instruct[2].header)
        status = kepkey.new('EXTNAME','STDDEV','name of extension',instruct[2],outfile,logfile,verbose)
        pyfits.append(outfile,imgdev,instruct[2].header)
        status = kepkey.new('EXTNAME','APERTURE','name of extension',instruct[2],outfile,logfile,verbose)
        pyfits.append(outfile,instruct[2].data,instruct[2].header)
        status = kepio.closefits(instruct,logfile,verbose)

# pixel limits of the subimage

    if status == 0:
        ymin = row
        ymax = ymin + ydim
        xmin = column
        xmax = xmin + xdim

# plot limits for summed image

        ymin = float(ymin) - 0.5
        ymax = float(ymax) - 0.5
        xmin = float(xmin) - 0.5
        xmax = float(xmax) - 0.5

# plot style

        try:
            params = {'backend': 'png',
                      'axes.linewidth': 2.5,
                      'axes.labelsize': 24,
                      'axes.font': 'sans-serif',
                      'axes.fontweight' : 'bold',
                      'text.fontsize': 12,
                      'legend.fontsize': 12,
                      'xtick.labelsize': 10,
                      'ytick.labelsize': 10}
            pylab.rcParams.update(params)
        except:
            'ERROR -- KEPDIFFIM: install latex for scientific plotting'
            status = 1

    if status == 0:
        plotimage(imgsum_pl,imgvar_pl,imgdev_pl,zminsum,zminvar,zmindev,
                  zmaxsum,zmaxvar,zmaxdev,xmin,xmax,ymin,ymax,colmap,plotfile,cmdLine)
        
# stop time

    kepmsg.clock('KEPDIFFIM ended at: ',logfile,verbose)

    return
def subtractBulge(inputFile=None,inputPPXF=None,bulgeProfile='C11E',matchR=5.):
    # routine to subtract the bulge spectrum from a reduced
    # data cube
    
    # pass in a reduced data cube (not tessellated yet) and
    # some parameters for the bulge profile (the profile to use
    # and matchR, the matching radius), and the routine returns
    # an output cube

    cubefits = pyfits.open(inputFile)
    
    cube = cubefits[0].data
    hdr = cubefits[0].header
    errors = cubefits[1].data
    quality = cubefits[2].data
    nframes = cubefits[3].data
    cubeimg = np.median(cube,axis=2)

    # trim spectra to match template lengths
    # Get the wavelength solution so we can specify range:
    w0 = hdr['CRVAL1']
    dw = hdr['CDELT1']
    wavelength = w0 + dw * np.arange(cube.shape[2], dtype=float)
    wavelength /= 1000.0   # Convert to microns
    # make the blue-end cut (no cut on red end)
    waveCut = 2.18
    idx = np.where(wavelength >= waveCut)[0]
    waveClip = wavelength[idx]
    cube = cube[:,:,idx]
    errors = errors[:,:,idx]
    quality = quality[:,:,idx]
    nframes = nframes[:,:,idx]
    # update header to match new blue end
    #pdb.set_trace()
    hdr['CRVAL1'] = 2180.0
    #pdb.set_trace()

    #nirc2scalefits = pyfits.open('/Users/kel/Documents/Projects/M31/analysis_old/align/m31_2125nm_osiris_rot_scale.fits')
    #nirc2scalefits = pyfits.open('/Users/kel/Documents/Projects/M31/analysis_old/align/m31_2125nm_w_osiris_rot_scale_maxpsf2.fits')
    #nirc2scalefits = pyfits.open('/Users/kel/Documents/Projects/M31/analysis_old/align/m31_2125nm_w_osiris_rot_scale_numclip15.fits')
    #nirc2scalefits = pyfits.open('/Users/kel/Documents/Projects/M31/analysis_old/align/m31_2125nm_w_osiris_rot_scale_allnewshifts.fits')
    #nirc2scalefits = pyfits.open('/Users/kel/Documents/Projects/M31/analysis_old/align/m31_2125nm_w_osiris_rot_scale_telshift.fits')
    #nirc2scalefits = pyfits.open('/Users/kel/Documents/Projects/M31/analysis_old/align/m31_2125nm_w_osiris_rot_scale_100815.fits')
    #nirc2scalefits = pyfits.open('/Users/kel/Documents/Projects/M31/analysis_old/align/m31_2125nm_w_osiris_rot_scale_100828.fits')
    #nirc2scalefits = pyfits.open('/Users/kel/Documents/Projects/M31/analysis_old/align/m31_2125nm_w_osiris_rot_scale_combshift.fits')
    #nirc2scalefits = pyfits.open('/Users/kel/Documents/Projects/M31/analysis_old/align/m31_2125nm_w_osiris_rot_scale_comb2shift.fits')
    #nirc2scalefits = pyfits.open('/Users/kel/Documents/Projects/M31/analysis_old/align/m31_2125nm_w_osiris_rot_scale_comb3shift.fits')
    #nirc2scalefits = pyfits.open('/Users/kel/Documents/Projects/M31/analysis_old/align/m31_2125nm_w_osiris_rot_scale_NIRC2_DTOTOFF.fits')
    #nirc2scalefits = pyfits.open('/Users/kel/Documents/Projects/M31/analysis_old/align/m31_2125nm_w_osiris_rot_scale_CC_mos_DTOTOFF.fits')
    #nirc2scalefits = pyfits.open('/Users/kel/Documents/Projects/M31/analysis_old/align/m31_2125nm_w_osiris_rot_scale_NIRC2_DTOTOFF_2.fits')
    #nirc2scalefits = pyfits.open('/Users/kel/Documents/Projects/M31/analysis_old/align/m31_2125nm_w_osiris_rot_scale_CC_mos_DTOTOFF_2.fits')
    nirc2scalefits = pyfits.open('/Users/kel/Documents/Projects/M31/analysis_old/align/m31_2125nm_w_osiris_rot_scale_NIRC2_DTOTOFF_no100828.fits')
    #nirc2scalefits = pyfits.open('/Users/kel/Documents/Projects/M31/analysis_old/align/m31_2125nm_w_osiris_rot_scale_NIRC2_DTOTOFF_no100828_29.fits')
    #nirc2scalefits = pyfits.open('/Users/kel/Documents/Projects/M31/analysis_old/align/m31_2125nm_w_osiris_rot_scale_CC_mos_DTOTOFF_no100828.fits')
    #nirc2scalefits = pyfits.open('/Users/kel/Documents/Projects/M31/analysis_old/align/m31_2125nm_w_osiris_rot_scale_CC_mos_DTOTOFF_no100828_100829.fits')
    
    nirc2scale = nirc2scalefits[0].data
    # convert to e-/s (wide: 5s, narrow: 60s)
    #nirc2flux = nirc2scale*4./60.
    nirc2flux = nirc2scale*4./5.
    # area of a spaxel, in spaxels (=1)
    area = 1.**2
    # in arcsec^2
    area *= (0.05**2)
    # deprojected area
    thetai = np.radians(77.5)
    dearea = area/np.cos(thetai)
    # convert to SB in e-/s/arcsec^2 (deproj)
    nirc2sb = nirc2flux/dearea
    # reorient for plotting
    nirc2sb = np.rot90(nirc2sb,3)

    cubeShape = (cube.shape[0],cube.shape[1],cube.shape[2])

    # get the bulge luminosity across the field
    # first, get the corresponding semimajor axis length for every point
    # parameters from Dorman+2013
    ell_C11=0.277
    # 360 - PA of the frame - PA of the bulge (Dorman 2013) - 90 (to convert from PA of semimajor to PA of semiminor)
    pa_C11 = 360. - 56. - 6.632 - 90.
    # returns semi-major axis length in pixels
    a_all = a_ellipse(n=[cubeShape[0],cubeShape[1]],cent=[bhpos_pix[1],bhpos_pix[0]],ell=ell_C11,pa=pa_C11)
    # OSIRIS: 1 pixel = 0.05"
    aarcsec = a_all*0.05
    # M31: 1" = 3.73 pc
    apc = aarcsec*3.73
    # then get the bulge contribution at each semimajor axis length
    # for now, hard-coding in outSB (from C11E, matchR=5.5)
    #outSB_C11E = -4.76470713
    # area is *not* deprojected
    #outSB_C11E = -4.8263725
    # using wide camera image, matchR=10., C11E (binsbmag[matchidx]+diffmag_C11e)
    outSB_C11E = -8.61758645
    outSB_C11M = -9.02365025 #(matchR = 10)
    #outSB_C11M = -8.96452571 # (matchR = 15)
    #outSB_C11E = 16.01241355 # can't use flux calibrated, as we're in intensities, not magitudes, here
    # returns mag, SB
    #litMu, litI = getLitProfile2D(outSB=outSB_C11E,rpc2D=apc,inprofile='C11E')
    litMu, litI = getLitProfile2D(outSB=outSB_C11M,rpc2D=apc,inprofile='C11M')
    # reorient lit maps
    litMu = np.rot90(litMu.T,3)
    litI = np.rot90(litI.T,3)
    
    # smooth the bulge and NIRC2 maps by the seeing
    PSFparams = ppxf_m31.readPSFparams('/Users/kel/Documents/Projects/M31/data/osiris_mosaics/drf/sigclip/all_NIRC2_CC_DTOTOFF_2/no100828/data/osiris_perf/sig_1.05/params.txt',twoGauss=False)
    PSFsig = PSFparams.sig1[0]
    gauss = ifu.gauss_kernel(PSFparams.sig1[0],PSFparams.amp1[0],half_box=PSFsig*5.)
    litIsm = signal.convolve2d(litI,gauss,mode='same',boundary='wrap')
    nirc2sbsm = signal.convolve2d(nirc2sb,gauss,mode='same',boundary='wrap')

    # take ratio of bulge_lit to observed to get SB ratio map
    sbratiomap = litIsm/nirc2sbsm

    #pdb.set_trace()
    # plot the SB ratio map
    py.close(2)
    py.figure(2,figsize=(8,3))
    xaxis = (np.arange(sbratiomap.shape[1], dtype=float) - bhpos_pix[0])*0.05
    yaxis = (np.arange(sbratiomap.shape[0], dtype=float) - bhpos_pix[1])*0.05
    py.imshow(sbratiomap,extent=[xaxis[0],xaxis[-1],yaxis[0],yaxis[-1]])
    py.plot([0],'kx',markeredgewidth=2)
    py.axis('image')
    cbar = py.colorbar(orientation='vertical',ticks=[.15,.3,.45])
    cbar.set_label('$\Sigma$ ratio')
    
    pdb.set_trace()
    
    # make the model bulge spectrum
    # first read in the ppxf files to get the template weights
    pIn=ppxf_m31.PPXFresults(inputPPXF,bestfit=True)
    tw = pIn.tweights
    tw = np.nan_to_num(tw)
    # changed bulge spaxel selection to be spaxels that are above a certain bulge ratio
    #bfrac = 0.5
    #bfrac = 0.45
    bfrac = 0.42
    bulgeidx = np.where((sbratiomap >= bfrac) & (cubeimg > 0) & (tw.sum(axis=2) != 0))
    nobulgeidx = np.where(sbratiomap < bfrac)
    # scale by the luminosity contribution
    # match the bulge spectrum median flux to that of the science spectrum
    medsciflux = np.median(cube,axis=2)
    # get the cube's model templates
    # normalize so the sum of the weights in each spaxel = 1
    logWaveSpec, modSpecCubeOut = ppxf_m31.create_model_templates(tw,norm=False,rebinWave=False)#,pWeights=pIn.pweights)#,normmask=medsciflux)
    # for plotting, later
    twnorm = np.zeros(tw.shape)
    twsum = tw.sum(axis=2)
    for i in range(tw.shape[2]):
        twnorm[:,:,i] = tw[:,:,i]/twsum
    # clip the red end to match the cube (same wavelength scale and
    # blue cut off, so can drop the wavelength vector)
    #modSpecCube = modSpecCube[:,:,0:cubeShape[2]]
    waveSpec = waveClip
    # interpolate templates onto the same wavelength grid
    modSpecCubeNoCont = np.zeros([modSpecCubeOut.shape[0],modSpecCubeOut.shape[1],cube.shape[2]])
    for i in np.arange(modSpecCubeOut.shape[0]):
        for j in np.arange(modSpecCubeOut.shape[1]):
            tck = scipy.interpolate.splrep(logWaveSpec, modSpecCubeOut[i,j,:], s=0)
            modSpecCubeNoCont[i,j,:] = scipy.interpolate.splev(waveClip, tck)
    # add the ppxf continuum to the templates (should about match the science spaxels now, except for the LOSVD)
    modSpecCube = np.zeros([modSpecCubeOut.shape[0],modSpecCubeOut.shape[1],cube.shape[2]])
    x = np.linspace(-1, 1, len(waveClip))
    for i in range(modSpecCubeOut.shape[0]):
        for j in range(modSpecCubeOut.shape[1]):
            apoly = np.polynomial.legendre.legval(x, pIn.pweights[i,j,:])
            modSpecCube[i,j,:] = modSpecCubeNoCont[i,j,:] + apoly
    # normalize, to compare line depths
    modSpecCubeNorm = np.zeros(modSpecCube.shape)
    for i in range(modSpecCube.shape[2]):
        modSpecCubeNorm[:,:,i] = modSpecCube[:,:,i] / np.median(modSpecCube,axis=2)
    #pdb.set_trace()
    modSpec = np.median(modSpecCubeNorm[bulgeidx[0],bulgeidx[1],:],axis=0)
        
    
    #pdb.set_trace()
    # set the velocity
    # have two choices - systemic velocity, or scaled w/ distance from SMBH (probably same, within errors)
    # going w/ systemic velocity for now
    # velocity in km/s
    #v = -308.
    v = -1.*ppxf_m31.vsys
    # convert to pixels - manually calc v/pixel
    vScale = cc.c*((waveSpec[401]-waveSpec[400])/waveSpec[400])
    vPix = v/vScale
    
    # set the dispersion
    # sticking with a single value for now
    # grabbing the value from the edge of the data cube (tessellated)
    # first in km/s
    disp = 110.
    #disp = 150.
    # now in pixels
    dispPix = disp/vScale

    # if using a single velocity/dispersion, can avoid for loops
    # create the bulge LOSVD kernel from the set velocity/dispersion (assuming Gaussian)
    # (partly adapted from ppxf.py)
    # create a window at least 5*sigma wide to avoid edge effects
    dx = int(np.ceil(np.max(abs(vPix)+5*dispPix)))
    nl = 2*dx+1
    x = np.linspace(-dx,dx,nl)
    # fill in the kernel with a Gaussian
    w = (x-vPix)/dispPix
    w2 = w**2
    gausstmp = np.exp(-0.5*w2)
    # normalize
    gausskern = gausstmp/gausstmp.sum()

    # also convolve by the diff between the OSIRIS and GNIRS resolution, before the LOSVD
    
    # convolve the model bulge spectrum by the LOSVD Gaussian
    newSpec = signal.convolve(modSpec, gausskern, mode='same')

    # flip the ratio map to match the orientation of the cube
    sbratiomap = np.rot90(sbratiomap.T,3)
    # combine the scaling factors
    totscale = medsciflux * sbratiomap
    # make a 3D array of the ratio map, with the same factor at each spectral channel
    #tmp1 = np.tile(sbratiomap,(len(waveSpec),1,1))
    tmp1 = np.tile(totscale,(len(waveSpec),1,1))
    # swap the axes around so it's the correct dimensions
    tmp2 = np.swapaxes(tmp1,0,2)
    ratiomap3d = np.swapaxes(tmp2,0,1)
    
    specScale = ratiomap3d*newSpec

    # subtract from the data cube
    newCube = cube - specScale

    # example plot for a single spaxel
    py.close(1)
    py.figure(1,figsize=(7,5))
    py.subplots_adjust(left=0.14, right=0.94, top=0.95,bottom=.15)
    py.plot(waveClip,cube[20,40,:],'b-',label='Original science spectrum')
    py.plot(waveClip,specScale[20,40,:],'g-',label='Scaled bulge spectrum')
    py.plot(waveClip,newCube[20,40,:],'r-',label='Bulge-subtracted science spectrum')
    py.xlim(waveClip[0],waveClip[-1])
    py.ylim(0,.65)
    py.xlabel('Wavelength ($\mu$m)')
    py.ylabel('Flux (DN s$^{-1}$)')
    py.legend(loc=0)
    
    pdb.set_trace()

    mask0 = np.where(cube[:,:,500] == 0.)
    newCube[mask0[0],mask0[1],:] = 0.

    #pdb.set_trace()
    # how to do the errors?
    outFile = inputFile.replace('.fits', '_bulgesub.fits')
    pyfits.writeto(outFile, newCube, header=hdr, clobber=True,output_verify='warn')
    pyfits.append(outFile,errors)
    pyfits.append(outFile,quality)
    pyfits.append(outFile,nframes)
Exemple #58
0
				emission_line = gauss(wavelength_angstroms, *params)
				img[:,y_coord,x_coord] = img[:,y_coord,x_coord]+((emission_line))
			#if amplitude > amplitude_threshhold:
			#params = [amplitude, wavelength, fake_dispersion]
			#emission_line = gauss(wavelength_angstroms, *params)
			#img[:,y_coord,x_coord] = img[:,y_coord,x_coord]+((emission_line))

one_bin_bins_emission.write('0\t0\t5'+str(counter)+'\t')
voronoi_2d_binning_emission.close()
voronoi_2d_binning_output_emission.close()
voronoi_2d_bins_emission.close()
one_bin_bins_emission.close()
one_bin_output_emission.close()

text_file = open(HOME+"/Astro/reduced/AGC666pro/h_alpha2.txt", "w")
text_file.write(str(np.sum(h_alpha_flux_array)))
text_file.close()
text_file = open(HOME+"/Astro/reduced/AGC666pro/h_beta2.txt", "w")
text_file.write(str(np.sum(h_beta_flux_array)))
text_file.close()

#cube_hdu[0].data = img
output_filename = HOME+"/Astro/reduced/AGC666pro/temp"+str(desired_metallicity)+".fits"
pyfits.writeto(output_filename, img, img_header, clobber=True)
pyfits.append(output_filename, var_field, img_header)
segmentation_filename = HOME+"/Astro/reduced/AGC666pro/segmentation.fits"
pyfits.writeto(segmentation_filename, segmentation_img, clobber=True)
#np.savetxt(HOME+"/Astro/reduced/AGC666pro/input_fake"+str(desired_metallicity)+".txt", np.column_stack([x_array,y_array,oiii_5007_flux_array,oiii_5007_wavelength_array,oiii_5007_amplitude_array,h_beta_flux_array,h_beta_wavelength_array,h_beta_amplitude_array,h_alpha_flux_array,h_alpha_wavelength_array,h_alpha_amplitude_array,nii_6584_flux_array,nii_6584_wavelength_array,nii_6584_amplitude_array,dispersion_array]), fmt='%8i %8i %10.6f %10.6f %10.6f %10.6f %10.6f %10.6f %10.6f %10.6f %10.6f %10.6f %10.6f %10.6f %10.6f', header="	x	y	oiii_5007_flux	oiii_5007_wavelength	oiii_5007_amplitude	h_beta_flux	h_beta_wavelength	h_beta_amplitude	nii_6584_flux	nii_6584_wavelength	nii_6584_amplitude	h_alpha_flux	h_alpha_wavelength	h_alpha_amplitude	dispersion")
text_file = open(HOME+"/Astro/reduced/AGC666pro/fake_metallicity.sh", "w")
text_file.write('export fake_metallicity='+str(desired_metallicity))
text_file.close()
Exemple #59
0
    
#    if j == 500:
#        fitsio.write('Base_point_500k_sample.fits', base_point_dist)
#        fitsio.write('Base_pdf_500k_sample.fits', base_pdf_dist)        
#        fitsio.write('Matias_point_500k_sample.fits', matias_point_dist)
#        fitsio.write('Matias_pdf_500k_sample.fits', matias_pdf_dist)
#        
#    if j % 1000 == 0:
#        fitsio.write('Base_point_500k_sample.fits', base_point_dist)
#        fitsio.write('Base_pdf_500k_sample.fits', base_pdf_dist)        
#        fitsio.write('Matias_point_500k_sample.fits', matias_point_dist)
#        fitsio.write('Matias_pdf_500k_sample.fits', matias_pdf_dist)
        
    if obs_num % 10000 == 0:
        pf.append('/mnt/lcdm2/True_base_point_full'+str(rank)+'.fits', array(base_point_dist))
        pf.append('/mnt/lcdm2/True_base_pdf_full'+str(rank)+'.fits', array(base_pdf_dist))        
        pf.append('/mnt/lcdm2/True_matias_point_full'+str(rank)+'.fits', array(matias_point_dist))
        pf.append('/mnt/lcdm2/True_matias_pdf_full'+str(rank)+'.fits', array(matias_pdf_dist))
        #pf.append('Bootstrap_point_full'+str(rank)+'.fits', array(bootstrap_point_dist))
        #pf.append('Bootstrap_pdf_full'+str(rank)+'.fits', array(bootstrap_pdf_dist))
        #pf.append('/mnt/lcdm/Pseudo_boot_full'+str(rank)+'.fits',array(pseudo_boot_dist))
        
        base_point_dist = []
        base_pdf_dist = []
        matias_point_dist =[]
        matias_pdf_dist =[]
        #bootstrap_point_dist = []
        #bootstrap_pdf_dist = []
        #pseudo_boot_dist=[]
    
Exemple #60
0
def main(working_dir, stamp, run, nproc):
    data_dir = '%s/data' % working_dir
    log.info('''
SDSS III Make FITS: 
Working dir: %s
Data dir   : %s
RUN        : %s
Stamp      : %s
''' % (working_dir, data_dir, run, stamp))

    # set top directory
    run2d, run1d = (run, run)
    spfilename   = 'spAll-' + run2d + '.fits'
    spfile       = data_dir + '/' + spfilename

    # find want we want to extract
    zcut = (-1e3, 0.36)
    sncut = 10
    rfilter = 2
    wavelengthcut = (3856,9186)
    spfiltered = FilterObjects(working_dir, spfile, zcut = zcut, sncut = sncut, wavelengthcut = wavelengthcut,rfilter = rfilter)
    if len(spfiltered) == 0: raise RuntimeError('no object to extract')

    gc.collect()

    ################### extract data

    log.info('Reading plate data')
    uniq_plates = np.unique(spfiltered['PLATE'])

    data = ExtractPlate(uniq_plates[0], spfiltered, rfilter, data_dir, run1d, run2d)
    for p in range(1, len(uniq_plates)):
        if p % 100 == 0: log.info('%d / %d' % (p + 1, len(uniq_plates)))
        dat = ExtractPlate(uniq_plates[p], spfiltered, rfilter, data_dir, run1d, run2d)
        for ind in range(len(data)): data[ind].extend(dat[ind])
    flux,invvar,eigenspectrum,magnitudes = data[:4]
    magnitudes = np.vstack(magnitudes)
    z,ra,dec,c0,c1,plate,mjd,fiber,sdssid,clas,subclas,s2n = [np.array(e) for e in data[4:]]
    del spfiltered, data

    ######### unredshift spectra

    nflux = len(flux)
    log.info('%d object actually extracted' % nflux)
    if nflux == 0: raise RuntimeError('no object actually extracted')

    #since all different lengths, determine largest, fill, then only select out nonzsero entries for fit
    length = np.array([np.shape(f)[0] for f in flux])
    dered_loglambda0 = c0 - np.log(1. + z)

    #define a single wavelength spectrum
    init_pixel   = np.log10(wavelengthcut[0])
    final_pixel  = np.log10(wavelengthcut[1])
    delta_pixel  = 1e-4 #10.**(np.min(dered_loglambda0))*(10.**1e-4 - 1.)
    new_wave     = np.arange(init_pixel, final_pixel, delta_pixel)

    log.info('Transforming spectra...')
    jobs = [(flux[i],invvar[i],dered_loglambda0[i],c1[i], new_wave)
            for i in range(nflux)]
    del flux, invvar
    gc.collect()

    results = ProcJobs(TransformSpectrum, jobs, nproc)
    newflux, newinvvar = zip(*results)
    newflux, newinvvar = np.vstack(newflux), np.vstack(newinvvar)

    log.info('Assembling FITS')

    col1 = pf.Column(name='z',           format='E',  array = z)
    col2 = pf.Column(name='ra',          format='E',  array = ra)
    col3 = pf.Column(name='dec',         format='E',  array = dec)
    col4 = pf.Column(name='plate',       format='I',  array = plate)
    col5 = pf.Column(name='mjd',         format='J',  array = mjd)
    col6 = pf.Column(name='fiber',       format='I',  array = fiber)
    col7 = pf.Column(name='class',       format='6A', array = clas)
    col8 = pf.Column(name='subclass',    format='6A', array = subclas)
    col9 = pf.Column(name='length',      format='I',  array = length)
    col10 = pf.Column(name='s2n',        format='E',  array = s2n)
    col11 = pf.Column(name='sdss_id',    format='20A',array = sdssid)

    cols = pf.ColDefs([col1, col2, col3, col4, col5, col6, 
                       col7, col8, col9, col10, col11])
    tablehdu = pf.new_table(cols)
    tablehdu.header.update('initp', init_pixel)
    tablehdu.header.update('finalp', final_pixel)
    tablehdu.header.update('stamp', np.int64(stamp.replace('-','')))
    imagehdu = pf.PrimaryHDU(newflux)

    hdulist = pf.HDUList([imagehdu, tablehdu])
    filename = '%s/spectra_restframe_%s.fits' % (working_dir, stamp)
    log.info('Saving FITS to %s' % filename)
    hdulist.writeto(filename, clobber=True)
    pf.append(filename, magnitudes)
    pf.append(filename, new_wave)
    pf.append(filename, newinvvar)

    log.info('Backing up FITS')
    os.system('cp %s %s/backup/' % (filename, working_dir))