Example #1
0
    def makeFlat(self, combine='median', flatlist=[], output='flatima.fits'):
        '''
        Combines the flat field frames to a single flat frame.

        :note: this does not do any sigma clipping at the moment
        '''
        self.combinedFlat = output

        if len(flatlist) == 0:
            flatlist = self.flatFiles

        if self.dataWeird:
            filedata = [pf.getdata(x, ignore_missing_end=True)[0] for x in flatlist]
        else:
            filedata = [pf.getdata(x, ignore_missing_end=True) for x in flatlist]

        if len(set(x.shape for x in filedata)) > 1:
            self.log.info('FLAT images are not of same size! Program will exit..')
            sys.exit('FLAT images are not of same size! Program will exit..')

        #combine the files
        cmd = 'np.' + combine + '(filedata, axis=0)'
        self.flat = eval(cmd)

        self.log.info('FLAT frames combined with {0:>s}'.format(cmd))

        #write the output
        hdu = pf.PrimaryHDU(self.flat)
        hdulist = pf.HDUList([hdu])
        hdulist.writeto(self.combinedFlat)
        self.log.info('Combined flat saved to {0:>s}'.format(self.combinedFlat))

        return self.flat
Example #2
0
def do_trace_align(im,fibers2,params):


    imdat, imhead = pf.getdata(im), pf.getheader(im)

    outname = im[:-5]+'_t.fits'
    #outer = pl.array( [ pl.arange(len(dat[0]))*0 for i in range(params.FIBER_WIDTH*len(fibers2)) ] )
    outer = pf.getdata(im)*0

    # DONT DO IT IF THE FILE EXISTS
    globber = gl.glob(outname)
    if len(globber)==1: return outname

    for fib in fibers2:

        fib_cnt = 1

        for f in range(len(fib.xy)):

            xy = fib.xy[f]
            rescale = fib.rescale[f]
            #rescaled_vals = dat[ xy[1] , xy[0] ] * rescale

            #            print 'xy = ',xy
            #            print 'rescale = ',rescale
            #            print 'rescaled = ',rescaled_vals

            for y in range( len(rescale) ):
                rescaled_val = imdat[ xy[1][y] ][ xy[0] ] ###* rescale[y]
                outer[(len(rescale)+1)*fib.id+y][xy[0]] = rescaled_val

        fib_cnt += 1

    pf.writeto( outname, outer, header=imhead )
    return outname
Example #3
0
def bfixpix(image_file, mask_file, outsuffix='_f', msksuffix='_s'):
    """
    Inputs
    ---------
    image_file : string
        input image file to fix bad pixels on

    mask_file : string
        mask file (0 == good pixels, >0 == bad pixels

    outsuffix : string
        suffix for fixed image. default = '_f'

    msksuffix : string
        suffix for bad pixels significance mask. default = '_s'
    """
    outf = image_file.replace('.fits', outsuffix + '.fits')
    outm = image_file.replace('.fits', msksuffix + '.fits')
    
    util.rmall([outf, outm])
    print("bfixpix: {0} -> {1}".format(image_file, outf))

    # fetch the image, fetch the mask
    img, hdr = pyfits.getdata(image_file, header=True)
    msk = pyfits.getdata(mask_file)

    # median the image
    medimg = ndimage.median_filter(img, 3, mode='nearest')

    # generate the pixel files
    outf_img = np.where(msk == 0, img, medimg)
    outm_img = np.where(msk == 1, (img - medimg), 0)

    pyfits.writeto(outf, outf_img, hdr)
    pyfits.writeto(outm, outm_img, hdr)
Example #4
0
def whole_image(X,Y,imagefile,PARAMS,preset):

    centroids = zip(X,Y);
    if len(centroids) == 0:
        logging.warning("No objects in given (X,Y lists). Finishing.");
        return False;

    # Run Sextractor over the whole image:
    _Dsex = sextractor.run_segobj(imagefile,PARAMS,preset=preset);
    segimg = pyfits.getdata(_Dsex['SEGMENTATION']);
    objimg = pyfits.getdata(_Dsex['OBJECTS']);

    objIDs = [ segimg[o_o[1],o_o[0]]  for o_o in centroids ];
    objIDs = list(set(objIDs)-set([0]));

    # Take only the objects of interest...
    # from image:
    selobjimg = imcp.copy_objects(objimg,segimg,objIDs);
    del segimg,objimg;
    # and catalog:
    cathdu = pyfits.open(_Dsex['CATALOG'])[1];
    selobjhdu = fts.select_entries(cathdu,'NUMBER',*objIDs);
    del cathdu;

    # Re-identify each object:
    for i in range(len(objIDs)):
        selobjhdu.data.field('NUMBER')[i] = i;
    
    return (selobjimg,selobjhdu);
Example #5
0
def get_1d_2d_spectra(IDtuple):
    """ Given a tuple of mask id, quadrant number, slit_id and object
    number, return the 1d extracted flux, error, the 2d image of the
    slit in which the object falls, and the y-coordinate of the objet
    in the slit.

    Note each slit can have several objects (up to 9?), each one has a
    different object number. The object y centres and edges are in
    object_?, start_?  and end_?, where '?' is the object number in
    the slit.

    Returns
    -------
    wa, fl, er, sky, image, (ystart, yobj, yend)
    """
    quad, iext, slit_id, obj = IDtuple
    image = pyfits.getdata('mos_science_flux_extracted.fits', iext)
    fluxes = pyfits.getdata('mos_science_flux_reduced.fits', iext)
    skies = pyfits.getdata('mos_sci_sky_reduced.fits', iext)
    errors = pyfits.getdata('mos_sci_error_flux_reduced.fits', iext)
    hd = pyfits.getheader('mos_science_flux_extracted.fits', iext)

    wa = hd['CRVAL1'] + np.arange(hd['NAXIS1']) * hd['CD1_1']

    i0, i1, pos, ind = get_pos_ind(IDtuple)

    fl = fluxes[ind]
    er = errors[ind]
    sky = skies[ind]

    return wa, fl, er, sky, image[i0:i1, :], pos-i0
def SavePreviewRGC(config,filename_rgc,n_gals_preview=10):
    """
    Function for eyeballing the contents of the created mock RGC catalogs.
    Arguments
    ---------
    config              config dict 
    filename_rgc        filename of the newly created real galaxy catalog fits 
    n_gals_preview      how many plots to produce (default=10)
    """

    # open the RGC
    table = pyfits.open(filename_rgc)[1].data

    # get the image and PSF filenames
    fits_gal = table[0]['GAL_FILENAME']
    fits_psf = table[0]['PSF_FILENAME']
    import pylab

    # loop over galaxies and save plots
    for n in range(n_gals_preview):

        img_gal = pyfits.getdata(fits_gal,ext=n)
        img_psf = pyfits.getdata(fits_psf,ext=n)

        pylab.subplot(1,2,1)
        pylab.imshow(img_gal,interpolation='nearest')
        pylab.title('galaxy')
        
        pylab.subplot(1,2,2)
        pylab.imshow(img_psf,interpolation='nearest')
        pylab.title('PSF')
        
        filename_fig = 'fig.previewRGC.%s.%d.png' % (config['args'].filename_config,n)

        pylab.savefig(filename_fig)
Example #7
0
def combine_bins(targetSN, bins=None, outfile="a.fits"):
    """ Combine spectra of given bins. """
    data = pf.getdata("binned_sn{0}.fits".format(targetSN), 0)
    error = pf.getdata("binned_sn{0}.fits".format(targetSN), 1)
    binimg = pf.getdata("voronoi_sn{0}.fits".format(targetSN)).flatten()
    for i,bin in enumerate(bins):
        N = np.where(binimg==bin)[0].size
        spec = data[bin-1,:]
        specerr = error[bin-1,:]
        if i == 0:
            combined = N * spec
            comberr = N * specerr
            Ntot = N
        else:
            combined += N * spec
            Ntot += N * spec
            comberr += N * specerr
    h = pf.getheader("binned_sn{0}.fits".format(targetSN))
    h["NAXIS"] = 1
    del h["NAXIS2"]
    hdu0 = pf.PrimaryHDU(combined, h)
    hdu1 = pf.ImageHDU(comberr, h)
    hdulist = pf.HDUList([hdu0, hdu1])
    hdulist.writeto(outfile, clobber=True)
    return
Example #8
0
def main():
    '''Input a set of R G B images and return a color image'''

    # Setup command line options
    parser = argparse.ArgumentParser(description='Coadd R,G,B fits images and return color png')
    parser.add_argument("rFile", help='Input R image', default='r.fits')
    parser.add_argument("gFile", help='Input G image', default='g.fits')
    parser.add_argument("bFile", help='Input B image', default='b.fits')
    parser.add_argument('--addNoise', dest="noise", nargs='+', type=float, help='Gaussian noise to add to each image', default=[0,0,0])
    parser.add_argument('--outputFile', dest="outputFile", help='Output PNG file name', default='rgb.png')
    args = parser.parse_args()

    images = []
    images.append(pyfits.getdata(args.rFile,0))
    images.append(pyfits.getdata(args.gFile,0))
    images.append(pyfits.getdata(args.bFile,0))

    # add noise
    if (args.noise != [0,0,0]):
        addNoise(images,args.noise)

    # scale image
    scaledImages = scaleAsinh(images, scales =[1.,1.,1.])

    # create RGB
    mode='RGB'
    pngImage = Image.new(mode, scaledImages[0].shape)
    pngImage.paste(createRGB(scaledImages),(0,0))

    pngImage.save(args.outputFile)
def spt_mapping_images_to_jpeg():

    spti = pyfits.getdata("swarp.SPT-CLJ0307-5042.2x2.i.fits")
    sptv = pyfits.getdata("swarp.SPT-CLJ0307-5042.2x2.r.fits")
    sptb = pyfits.getdata("swarp.SPT-CLJ0307-5042.2x2.g.fits")

    # sptb=sptb[5066-587:5066+587,5262-587:5262+587]
    ##sptv=sptv[5441-587:5441+587,5372-587:5372+587]
    # sptv=sptv[5066-587:5066+587,5262-587:5262+587]
    # spti=spti[5066-587:5066+587,5262-587:5262+587]

    spti = spti[5262 - 587 : 5262 + 587, 5066 - 587 : 5066 + 587]
    sptv = sptv[5372 - 587 : 5372 + 587, 5441 - 587 : 5441 + 587]
    sptb = sptb[5262 - 587 : 5262 + 587, 5066 - 587 : 5066 + 587]

    skypi, sigpi = plotim(spti)
    skypv, sigpv = plotim(sptv)
    skypb, sigpb = plotim(sptb)

    print skypi, sigpi, skypv, sigpv, skypb, sigpb

    sptii = spti - skypi
    sptii = sptii / sigpi
    sptiv = sptv - skypv
    sptiv = sptiv / sigpv
    sptib = sptb - skypb
    sptib = sptib / sigpb

    acut = 0.3
    icut = 50.0 * acut
    vcut = 60.0 * acut
    bcut = 30.0 * acut

    sptii = cut_out_tails(sptii, 0, icut)
    sptiv = cut_out_tails(sptiv, 0, vcut)
    sptib = cut_out_tails(sptib, 0, bcut)

    sptii = (sptii * 255 / icut).astype(int)
    sptiv = (sptiv * 255 / vcut).astype(int)
    sptib = (sptib * 255 / bcut).astype(int)

    im = np.zeros((3, 1174, 1174), dtype=np.uint8)

    im[0, :, :] = sptii
    im[1, :, :] = sptiv
    im[2, :, :] = sptib

    # figure()
    # contour(sptii)
    # figure()
    # contour(sptiv)
    # figure()
    # contour(sptib)
    # show()

    im = np.uint8(im)

    scipy.misc.imsave("spt_comp.jpg", im)

    return 0
Example #10
0
def twilightFlatMaker(flatImagesPath,flatDarkImagesPath,masterFlatSavePath,plots=False):
    '''
    Make a master flat using a series of images taken at twilight
    by fitting the individual pixel intensities over time using least-squares
    and use the intercept as the normalizing factor in the master flat.
    
    INPUTS: flatImagesPath - Path to the flat field exposures
    
            flatDarkImagesPath - Path to the flat field darks
            
            masterFlatSavePath - Where to save the master flat that is created
            
            plots - Plot the master flat on completion when plots=True
    '''
    ## Create zero array with the dimensions of the first image for the flat field
    [dim1, dim2] = np.shape(pyfits.getdata(flatImagesPath[0]))
    flatSum = np.zeros([dim1, dim2])

    ## Create N-dimensional array for N dark frames, where the first 
    ##    two dimensions are the dimensions of the first image
    darks = np.zeros([len(flatDarkImagesPath),dim1,dim2])

    ## Take mean of all darks
    for i in range(0,len(flatDarkImagesPath)):
        darks[i,:,:] = pyfits.getdata(flatDarkImagesPath[i])
    dark = np.mean(darks,axis=0)

    ## Create N-dimensional array for N flat frames, where the first 
    ##    two dimensions are the dimensions of the first image
    flats = np.zeros([len(flatImagesPath),dim1,dim2])

    ## Assemble data cube of flats
    for i in range(0,len(flatImagesPath)):
        flats[i,:,:] = pyfits.getdata(flatImagesPath[i]) - dark

    def linearFitIntercept(x,y):
        '''Use least-squares to find the best-fit y-intercept '''
        return np.linalg.lstsq(np.vstack([x,np.ones(len(x))]).T,y)[0][1] ## Returns intercept

    flat = np.zeros([dim1,dim2])
    for i in range(0,dim1):
        print 'Master flat computing step:',i+1,'of',dim1
        for j in range(0,dim2):
            flat[i,j] = linearFitIntercept(range(len(flats[:,i,j])),flats[:,i,j])

    masterFlat = flat/np.mean(flat)

    if plots:
        ## If plots == True, plot the resulting master flat
        fig = plt.figure()
        a = plt.imshow(masterFlat,interpolation='nearest')
        a.set_cmap('gray')
        plt.title('Normalized Master Flat Field')
        fig.colorbar(a)
        fig.canvas.set_window_title('oscaar2.0 - Master Flat') 
        plt.show()

    ## Write out both a Numpy pickle (.NPY) and a FITS file
    np.save(masterFlatSavePath+'.npy',masterFlat)
    pyfits.writeto(masterFlatSavePath+'.fits',masterFlat)
Example #11
0
	def __init__(self, nfiles, mode='obs'):
		if mode=='obs': self.files = glob.glob("Buzzard*.fit")
		self.nfil = nfiles
		self.cat = pyfits.getdata(self.files[0])
		for i in range(1,nfiles): self.cat = np.concatenate((self.cat,pyfits.getdata(self.files[i]))) 
		self.ngal = len(self.cat)
		print 'Got %2.2f M galaxies.' %(self.ngal/1.0e6)
def shiftRGB(redF,greenF,blueF,blueshiftr=0,blueshiftc=0,greenshiftr=0,greenshiftc=0,redshiftr=0,redshiftc=0,ext=None):
    """
    this code shift the pixels of three r, g, b images. Using g image as reference and shift the other two images. It will return the shifted r,g,b images.
    each row goes along ra direction
    each col goes along dec direction
    CRVAL1 ; ra direction
    CRVAL2: dec direction
    """
    blueHdr = pf.getheader(blueF,ext)
    greenHdr = pf.getheader(greenF,ext)
    redHdr = pf.getheader(redF,ext)
    bluerow = blueHdr['crval1']*3600./0.27
    bluecol = blueHdr['crval2']*3600./0.27
    greenrow = greenHdr['crval1']*3600./0.27
    greencol = greenHdr['crval2']*3600./0.27
    redrow = redHdr['crval1']*3600./0.27
    redcol = redHdr['crval2']*3600./0.27
    """
    col0=int(blueHdr['datasec'].split('[')[1].split(']')[0].split(',')[0].split(':')[0])-1
    col1=int(blueHdr['datasec'].split('[')[1].split(']')[0].split(',')[0].split(':')[1]) 
    row0=int(blueHdr['datasec'].split('[')[1].split(']')[0].split(',')[1].split(':')[0])-1
    row1=int(blueHdr['datasec'].split('[')[1].split(']')[0].split(',')[1].split(':')[1]) 
    """
    blue = pf.getdata(blueF,ext)
    green = pf.getdata(greenF,ext)
    red = pf.getdata(redF,ext)

    ctgreenrow = (bluerow+greenrow+redrow)/3.
    ctgreencol = (bluecol+greencol+redcol)/3.
    blue = nd.shift(blue,[bluerow - ctgreenrow+blueshiftr,bluecol-ctgreencol+blueshiftc],mode='nearest',order=1)
    green = nd.shift(green,[greenrow - ctgreenrow+greenshiftr,greencol-ctgreencol+greenshiftc],mode='nearest',order=1)
    red = nd.shift(red,[redrow - ctgreenrow+redshiftr, redcol-ctgreencol+redshiftc],mode='nearest',order=1)
    return red,green,blue
Example #13
0
def calc_sky_from_seg(infile,segfile):
   """
   Description: Calculates the sky level in an image using only 
      those regions in which SExtractor's segmentation file has
      a value of 0.

   Inputs:
    infile:   input fits file
    segfile:  SExtractor segmentation file associated with infile.
   """

   """ Load data """
   indat = pf.getdata(infile)
   segdat = pf.getdata(segfile)

   """ Set mask region and select associated regions of indat """
   mask = segdat == 0  
   sky = indat[mask]  
   # NB: These preceding 2 lines could have been combined as
   #   sky = indat[segdat==0]

   """ Calculate statistics """
   print "Statistics of sky outside masked regions"
   print "----------------------------------------"
   print "  N_pix  = %d" % sky.size
   print "  Median = %f" % n.median(sky)
   print "  Mean   = %f" % n.mean(sky)

   return
Example #14
0
def flatfield(flatF,dataF,outdir=None):
    flat = pyfits.getdata(flatF)
    data = pyfits.getdata(dataF)

    if get_HWP(flatF) != get_HWP(dataF):
        exit('HWP positions of flat and data do not match!')
        
    newData = data / flat

    hdr = pyfits.getheader(dataF)
    hdr['FLATFILE'] = (flatF,'Flat applied')
    hdr['FLATPROC'] = (True, 'Flat-fielded')
    HWP = get_HWP(dataF)
    hdr['FILTER'] = HWP
    hdr['HWP'] = HWP
        

    if outdir is None:
        newName = dataF
    else:
        newName = os.path.join(outdir,os.path.basename(dataF))

    print 'Writing to %s' % newName
    pyfits.writeto(newName,newData,header=hdr,clobber=True)
    return newName
Example #15
0
def collapse_cube(w1, w2):
    """ Collapse a MUSE data cube.

    Arguments

    cube : MUSE data cube name containing both data and stat extensions.
    iext : Initial extension to be used. Default is one for combined cubes.

    """
    fits = "slice_w{0}_{1}.fits".format(w1, w2)
    outfits = "collapsed_w{0}_{1}.fits".format(w1, w2)
    data = pf.getdata(fits, 0)
    error = pf.getdata(fits, 1)
    h = pf.getheader(fits, 0)
    h2 = pf.getheader(fits, 1)
    h["NAXIS"] = 2
    del h["NAXIS3"]
    h2["NAXIS"] = 2
    del h2["NAXIS3"]
    print "Starting collapsing process..."
    start = time.time()
    w = wavelength_array(fits)
    # newdata = np.trapz(data, dx=np.diff(w)[0], axis=0)
    # newdata = np.nansum(data, axis=0) * np.diff(w)[0]
    newdata = np.nanmedian(data, axis=0)
    noise = 1.482602 / np.sqrt(6.) * np.nanmedian(np.abs(2.* data - \
           np.roll(data, 2, axis=0) - np.roll(data, -2, axis=0)), \
           axis=0)
    end = time.time()
    print "Collapsing lasted {0} minutes.".format((end - start)/60.)
    hdu = pf.PrimaryHDU(newdata, h)
    hdu2 = pf.ImageHDU(noise, h2)
    hdulist = pf.HDUList([hdu, hdu2])
    hdulist.writeto(outfits, clobber=True)
    return
Example #16
0
def do_nods(filelist):
    """
    """

    headers = [pyfits.getheader(fn) for fn in filelist]

    for ii,fn in (enumerate(filelist)):
        if ii==len(filelist)-1:
            break

        try:
            if headers[ii]['BOREOFFX'] == 0 and headers[ii+1]['BOREOFFX'] == -5.9220000000000E-03:
                fitsfile = pyfits.open(fn)
                fitsfile[0].data -= pyfits.getdata(filelist[ii+1])

                matches = difflib.SequenceMatcher(None,fn,filelist[ii+1]).get_matching_blocks()
                outfilename = fn[matches[0].a:matches[0].size] + fn[matches[0].size:matches[1].a] + "-" + filelist[ii+1][matches[0].size:matches[1].b] + fn[matches[1].a:matches[1].size+matches[1].a]
                fitsfile.writeto(outfilename)
                print matches, outfilename

            elif headers[ii+1]['BOREOFFX'] == 0 and headers[ii]['BOREOFFX'] == -5.9220000000000E-03:
                fitsfile = pyfits.open(fn)
                fitsfile[0].data = pyfits.getdata(filelist[ii+1]) - fitsfile[0].data

                matches = difflib.SequenceMatcher(None,fn,filelist[ii+1]).get_matching_blocks()
                outfilename = fn[matches[0].a:matches[0].size] + filelist[ii+1][matches[0].size:matches[1].b] + "-" + fn[matches[0].size:matches[1].a] + fn[matches[1].a:matches[1].size+matches[1].a]
                fitsfile.writeto(outfilename)
                print matches, outfilename
        except IOError:
            pass
Example #17
0
def imWeightedAve( image1, image2, weight1, weight2, outfile, clobber=False, verbose=False):
    """
     construct a weighted average of image1 and image2:

     (weight1*image1 + weight2*image2) / (weight1+weight2)

     Mean image is written to outfile.
    """
    import os
    import pyfits
    from numpy import ndarray, nan_to_num
    import exceptions

    if os.path.isfile(outfile)  : 
        if clobber : 
            os.unlink( outfile )
        else : 
            print( "%s exists. Not clobbering."%outfile )
            return( outfile )
        
    # read in the sci and wht images 
    im1hdr = pyfits.getheader( image1 )
    im1 = pyfits.getdata( image1 )
    im2 = pyfits.getdata( image2 )
    wht1 = pyfits.getdata( weight1 )
    wht2 = pyfits.getdata( weight2 )

    meanim = nan_to_num( (wht1*im1 + wht2*im2)/(wht1+wht2) )
    
    # TODO : make a useful header
    outdir = os.path.dirname( outfile )
    if not os.path.isdir(outdir): 
        os.makedirs( outdir )
    pyfits.writeto( outfile, meanim, header=im1hdr )
    return( outfile )
Example #18
0
def irstack(myfiles):
    data = []
    tempfiles = glob("templist")+glob("*b.fits")+glob("*r.fits")+glob("flat.fits")
    for myfile in tempfiles:
        os.remove(myfile)
    for myfile in myfiles:
        data.append(pyfits.getdata(myfile))
    #sky subtract and replace with median
    mediansky = mean([median(data[i]) for i in range(len(data))])
    for i in range(len(myfiles)):
        fred = pyfits.open(myfiles[i])
        im = fred[0].data
        im2 = (im.transpose() - median(im,axis=0)).transpose() + mediansky
        fred[0].data = im2
        fred.writeto("%ibb.fits"%i,'ignore',True)
    iraf.imcomb("*bb.fits","flat",combine="median",reject="sigclip",lsigma=3,hsigma=2)
    flat = pyfits.getdata('flat.fits')
    flat /= median(flat)
    fred.writeto('flat.fits','ignore',True)
    for i in range(len(myfiles)):
        fred = pyfits.open(myfiles[i])
        im = fred[0].data
        im2 = ((im/flat).transpose() - median(im,axis=0)).transpose()
        fred[0].data = im2
        fred.writeto("%irb.fits"%i,'ignore',True)
    iraf.files("*rb.fits",Stdout="templist")
    iraf.stack("templist","output",1,'none')
def make_color_ao_image():
    h_img = pyfits.getdata(workdir + 'mag05jullgs_h_rot.fits')
    h_imgScl = img_scale.sqrt(h_img, scale_min=50, scale_max=5500)
    kp_img = pyfits.getdata(workdir + 'mag05jullgs_kp_rot.fits')
    kp_imgScl = img_scale.sqrt(kp_img, scale_min=10, scale_max=17000)
    lp_img = pyfits.getdata(workdir + 'mag05jullgs_lp_rot.fits')
    lp_imgScl = img_scale.sqrt(lp_img, scale_min=-100, scale_max=60000)

    sgra = np.array([540, 410])
    scale = 0.00995
    h_xextent = np.array([0, h_img.shape[0]])
    h_yextent = np.array([0, h_img.shape[0]])
    h_xextent = (h_xextent - sgra[0]) * -scale
    h_yextent = (h_yextent - sgra[1]) * scale
    h_extent = [h_xextent[0], h_xextent[-1], h_yextent[0], h_yextent[-1]]

    img = np.zeros((h_img.shape[0], h_img.shape[1], 3), dtype=float)
    img[:,:,0] = lp_imgScl
    img[:,:,1] = kp_imgScl
    img[:,:,2] = h_imgScl

    py.figure(4, figsize=(10, 10))
    py.clf()
    py.subplots_adjust(left=0, right=1, bottom=0, top=1)
    py.imshow(img, extent=h_extent)
    
    ax = py.gca()
    py.setp(ax.get_xticklabels(), visible=False)
    py.setp(ax.get_yticklabels(), visible=False)

    py.xlim(5, -5)
    py.ylim(-4, 6)

    py.savefig(workdir + 'img_lgsao_color.png')
Example #20
0
def reduceData(data):
    for filter in data:
        for file in data[filter]:
            fh = pyfits.open(file)
            images = []
            for ext in [1,2,3,4]:
                bias = pyfits.getdata('BIAS%i.fits' % ext, ext=0)
                flat = pyfits.getdata('FLAT_%s_%i.fits' % (filter, ext), ext=0)

                image = fh[ext].data
                hdr = fh[ext].header

                biassec = hdr['BIASSEC'].strip().replace('[', '').replace(']','').replace(',',':').split(':')

                overscan = numpy.median(image[int(biassec[2])+1:int(biassec[3])-1,
                                              int(biassec[0])+1:int(biassec[1])-1].copy().ravel())

                print 'subtracting bias of about ', numpy.mean(bias)

                if overscan > 5000:
                    img = (1.*image) - bias
                else:
                    img = (1.*image) - (bias/overscan) - overscan

                img /= flat
                images.append(img)

            fh.close()

            fh = pyfits.open(file)
            for ext in [1,2,3,4]:
                fh[ext].data = images[ext-1]

            fh.writeto('RED%s' % (file))
Example #21
0
def plot_image(fits,ax, rms=np.nan, F=np.nan, cont=None, contcol='r', stretch='sqrt'):
  ax.set_frame_color('k')
  ax.set_tick_color('k')
  ax.tick_labels.set_font(size='xx-small')
  ax.tick_labels.set_xformat('ddd.dd')
  ax.tick_labels.set_yformat('ddd.dd')
  
  
  head = pf.getheader(fits)
  try:
    image = pf.getdata(fits)
  except:
    print 'problem with image: ',fits
    return
  ximgsize = head.get('NAXIS1')
  yimgsize = head.get('NAXIS2')
  pixscale = head.get('CDELT1')   
   
    
  if np.isnan(rms):
    rms = np.std(image)
    av = np.median(image)
    n_one = np.ones(image.shape)
    for i in range(5):
      #print rms, av
      within1sigma = np.where((image - av*n_one) < 3.*rms*n_one)
      av = np.median(image[within1sigma])
      rms = np.std(image[within1sigma])
  if np.isnan(F):
    F = image.max()
  
  
  dat = pf.getdata(fits)
  mean = np.median(dat)
  sig = np.std(dat)
  for i in range(10):
    dat = np.ma.masked_where(abs(dat - mean) > 5.*sig,dat).compressed()
    mean = np.median(dat)
    sig = np.std(dat)
  #, vmid = mean+20*sig
  ax.show_grayscale(vmin = mean-0.5*sig, vmax = mean+15*sig, stretch=stretch, invert=True)
  

  
  if cont!=None:
    image = pf.getdata(cont)
    rms = np.std(image)
    av = np.median(image)
    n_one = np.ones(image.shape)
    for i in range(5):
      within1sigma = np.where((image - av*n_one) < 3.*rms*n_one)
      av = np.median(image[within1sigma])
      rms = np.std(image[within1sigma])
    cont_levels = 3.*rms*np.array([-2.**(1 * j ) for j in range(0, 2 )] + [ 2.**(1. * j ) for j in range(0, 10 ) ] )
    ax.show_contour(cont, hdu=0, layer='contour', levels=cont_levels, filled=False, cmap=None, colors=contcol, returnlevels=False, convention=None, slices=[0,1], smooth=None, kernel='gauss')
  #else:
    #cont_levels = 3.*rms*np.array([-2.**(1 * j ) for j in range(0, 2 )] + [ 2.**(1. * j ) for j in range(0, 10 ) ] )
    #ax.show_contour(fits, hdu=0, layer='contour', levels=cont_levels, filled=False, cmap=None, colors=contcol, returnlevels=False, convention=None, slices=[0,1], smooth=None, kernel='gauss')
    
  return
Example #22
0
def run(fields, targetSN, bins=None, Nsim=30, run_parallel=False):
    """ Interface to calculate the errors of the Lick indices. """
    for field in fields:
        os.chdir(os.path.join(data_dir, "combined_{0}".format(field)))
        specs = "binned_sn{0}.fits".format(targetSN)
        data = pf.getdata(specs)
        w = wavelength_array(specs, axis=1, extension=0)
        ssps_file = os.path.join(templates_dir, 'miles_FWHM_2.51.fits')
        ssps = pf.getdata(ssps_file, 0)
        wssps = np.power(np.e, pf.getdata(ssps_file, 1))
        databins = wavelength_array(specs, axis=2, extension=0)
        os.chdir(os.path.join(data_dir, "combined_{0}".format(field),
                              "logs_sn{0}".format(targetSN)))
        ######################################################################
        # If bins are not specified, then look for all available bins
        if bins == None:
            bins = databins
        ######################################################################
        if run_parallel:
            pool = mp.Pool()
            for bin in bins:
                spec = data[bin - 1]
                pool.apply_async(run_mc,
                                 args=(field, bin, w, spec, wssps, ssps, Nsim))
            pool.close()
            pool.join()
        else:
            for bin in bins:
                spec = data[bin - 1]
                run_mc(field, bin, w, spec, wssps, ssps, Nsim)
Example #23
0
def unionmask( imfile1, imfile2, outfile, clobber=False, verbose=False):
    """
    construct the union of image1 and image2 badpix masks
    Returns the name of the output union bad pixel mask file.
    """
    import os
    import pyfits
    import exceptions
    from numpy import array, uint8

    # read in the images
    im1head = pyfits.getheader( imfile1 )
    imdat1 = pyfits.getdata( imfile1 )
    imdat2 = pyfits.getdata( imfile2 )

    if os.path.exists( outfile ) :
        if not clobber :
            print( "%s exists. Not clobbering."%outfile)
            return( outfile )
        else :
            os.remove( outfile )

    uniondat = array(imdat1,dtype=uint8) | array(imdat2,dtype=uint8)

    # TODO : make a useful header
    im1head.update("SRCIM1",imfile1,"First source image  for badpixunion")
    im1head.update("SRCIM2",imfile2,"Second source image for badpixunion")
    outdir = os.path.split( outfile )[0]
    if outdir :
        if not os.path.isdir(outdir):
            os.makedirs( outdir )
    pyfits.writeto( outfile, uniondat, header=im1head,clobber=clobber,
                    output_verify='fix')
    return( outfile )
Example #24
0
def OpenLight():
    #Open if they are the light frames
    global lightimages
    lightimages = askopenfilenames()
    
    #This next part is to display the choices made to alow for checking
    lightbox = Frame(master)
    lightbox.grid(row=2, rowspan=3)
    
    scrolllight = Scrollbar(lightbox, orient=HORIZONTAL)
    fileslist = Listbox(lightbox, xscrollcommand=scrolllight.set)
    scrolllight.config(command=fileslist.xview)
    for item in lightimages:
        fileslist.insert(END, item)
    
    fileslist.pack()
    scrolllight.pack(fill=X)
    
    numlight = len(lightimages)
    tempfile = PF.getdata(lightimages[0], header=False)
    ny, nx = tempfile.shape
    
    global lightframes
    lightframes = N.zeros((numlight, ny, nx), dtype=float)
    
    #Lets actualy read in the lights now
    for i in N.arange(numlight):
        lightframes[i] = PF.getdata(lightimages[i], header=False)
Example #25
0
def make_voronoi_intens(targetSN, w1, w2):
    """ Make image"""
    image = "collapsed_w{0}_{1}.fits".format(w1, w2)
    intens = pf.getdata(image)
    extent = calc_extent(image)
    vordata = pf.getdata("voronoi_sn{0}_w{1}_{2}.fits".format(targetSN, w1,
                                                              w2))
    vordata = np.ma.array(vordata, mask=np.isnan(vordata))
    bins = np.unique(vordata)[:-1]
    combined = np.zeros_like(intens)
    combined[:] = np.nan
    for j, bin in enumerate(bins):
        idx, idy = np.where(vordata == bin)
        flux = intens[idx,idy]
        combined[idx,idy] = np.nanmean(flux)
    vmax = np.nanmedian(intens) + 4 * np.nanstd(intens)
    fig = plt.figure(1)
    plt.minorticks_on()
    make_contours()
    plt.imshow(combined, cmap="cubehelix_r", origin="bottom", vmax=vmax,
                    extent=extent, vmin=0)
    plt.xlabel("X [kpc]")
    plt.ylabel("Y [kpc]")
    cbar = plt.colorbar()
    cbar.set_label("Flux [$10^{-20}$ erg s$^{-1}$ cm$^{-2}$]")
    plt.savefig("figs/intens_sn{0}.png".format(targetSN), dpi=300)
    pf.writeto("figs/intens_sn{0}.fits".format(targetSN), combined,
               clobber=True)
    return
Example #26
0
    def __init__(self):
        '''
        Run oscaar.parseRegionsFile() to get the inital guesses for the 
        initial centroids of the stars from the DS9 regions file, create
        dictionaries in which to store all of the data collected
        for each star. Allocate the memory for these arrays wherever possible.
        Parse the init.par file to grab the paths and initial parameters for 
        the run.
        INPUTS: None.
        '''
        self.parseInit() ## parse init.par using the parseInit() method
        self.parseObservatory()
        assert len(self.imagesPaths) > 1, 'Must have at least one data image'

        if self.flatPath != 'None':
            self.masterFlat = pyfits.getdata(self.flatPath)
            self.masterFlatPath = self.flatPath
        elif self.flatPath == 'None':
            print 'Using an isotropic ("placebo") master-flat (array of ones)'
            dim1,dim2 = np.shape(pyfits.getdata(self.imagesPaths[0]))
            self.masterFlat = np.ones([dim1,dim2])
        self.allStarsDict = {}
        init_x_list,init_y_list = parseRegionsFile(self.regsPath)        
        zeroArray = np.zeros_like(self.imagesPaths,dtype=np.float32)
        self.times = np.zeros_like(self.imagesPaths,dtype=np.float64)
        self.keys = []
        self.targetKey = '000'
        for i in range(0,len(init_x_list)):
            self.allStarsDict[paddedStr(i,3)] = {'x-pos':np.copy(zeroArray), 'y-pos':np.copy(zeroArray),\
                            'rawFlux':np.copy(zeroArray), 'rawError':np.copy(zeroArray),'flag':False,\
                            'scaledFlux':np.copy(zeroArray), 'scaledError':np.copy(zeroArray), 'chisq':0}
            self.allStarsDict[paddedStr(i,3)]['x-pos'][0] = init_x_list[i]
            self.allStarsDict[paddedStr(i,3)]['y-pos'][0] = init_y_list[i]
            self.keys.append(paddedStr(i,3))
Example #27
0
def plot_fits_reg_vs_out(fits_dir, regular, outliers, objids2name):
    reg_fits = [os.path.join(fits_dir, objids2name[p]) for p in regular]
    outl_fits = [os.path.join(fits_dir, objids2name[p]) for p in outliers]
    reg_fits_data = [pyfits.getdata(fit) for fit in reg_fits]
    outl_fits_data = [pyfits.getdata(fit) for fit in outl_fits]
    plot_fits_by_size(reg_fits_data, name='-reg')
    plot_fits_by_size(outl_fits_data, name='-outl')
Example #28
0
def get_model(t,g,z):
	if os.access(get_oriname(t,g,z),os.F_OK):
		if library == 'P':
			mf = pyfits.getdata(get_oriname(t,g,z))[Iwav]
			mhd = pyfits.getheader(get_oriname(t,g,z))
			if 'PHXXI_L' in mhd.keys():
				mict = float(mhd['PHXXI_L'])
			else:
				mict = 2.0
		elif library == 'C':
			mf = pyfits.getdata(get_oriname(t,g,z))[0][Iwav]
			if g>=2.75:
				mict = 1.
			elif g >=1.25 and g<=2.75:
				mict = 1.8
			elif g < 1.25:
				mict = 2.5
		elif library == 'R':
			mf = pyfits.getdata(get_oriname(t,g,z))[Iwav]
			if g>=2.75:
				mict = 1.
			elif g >=1.25 and g<=2.75:
				mict = 1.8
			elif g < 1.25:
				mict = 2.5
	
	else:
		if linear_interpolation:
			mf,mict = get_linear_interpol(t,g,z)
		else:
			mf,mict = get_interpolated(t,g,z)
	return mf,mict
Example #29
0
def mk_image(galaxy):
    base = './../../images_v5/GS_2.5as_matched/gs_all_'

    i_img = pyf.getdata(base+str(galaxy)+'_I.fits')
    j_img = pyf.getdata(base+str(galaxy)+'_J.fits')
    h_img = pyf.getdata(base+str(galaxy)+'_H.fits')

    #include 90% of pixels
    x = pyl.hstack(i_img)
    i_lim = scoreatpercentile(x,99)
    x = pyl.hstack(j_img)
    j_lim = scoreatpercentile(x,99)
    x = pyl.hstack(h_img)
    h_lim = scoreatpercentile(x,99)

    print galaxy, i_lim, j_lim, h_lim

    img = pyl.zeros((h_img.shape[0], h_img.shape[1], 3), dtype=float)
    img[:,:,0] = img_scale.asinh(h_img, scale_min=-0.1*h_lim, scale_max=h_lim,
            non_linear=0.5)
    img[:,:,1] = img_scale.asinh(j_img, scale_min=-0.1*j_lim, scale_max=j_lim,
            non_linear=0.5)
    img[:,:,2] = img_scale.asinh(i_img, scale_min=-0.1*i_lim, scale_max=i_lim,
            non_linear=0.5)

    return img
Example #30
0
def fitsread(imgname, header = False):
    """
    Read CSUSB telescope FITS image cube.
    
    Parameters
    ----------
    image : string
        FITS image name
        
    header : boolean
        Return FITS image header?
        
    Returns
    -------
    img_data : numpy array
        2D or 3D numpy array
    """
    try:
        if header:
            img_data, header = pyfits.getdata(imgname, ignore_missing_end = True, header = True)
            return img_data, header
        else:
            img_data = pyfits.getdata(imgname, ignore_missing_end = True)
            return img_data
    except IOError:
        print "FITSREAD: Unable to open FITS image %s" %imgname
    
    return
Example #31
0
def mk_galaxy_struc():
    from mk_galaxy_struc_fortable import galaxy
    galaxies = []

    # Add Sample
    data = pyf.getdata('../samples/sample_1.5_3.5_gs_all.fits')
    for i in range(len(data)):
        galaxies.append(
            galaxy(data['ID'][i], data['RA'][i], data['DEC'][i],
                   data['Imag'][i], data['Zmag'][i], data['Jmag'][i],
                   data['Hmag'][i], data['z'][i]))

    # Add ICD info
    data = pyf.getdata('../results/icd_IH.fits')
    #data = pyf.getdata('../results/IH_icd_20140806.fits')
    for i in range(len(data)):
        look = data['ID'][i]
        for galaxy in galaxies:
            if look == galaxy.ID:
                galaxy.ICD_IH = data['ICD_IH'][i]
                galaxy.ICD_IH_ERR = data['ICD_IH_ERR'][i]
                galaxy.ston_I = data['ston_I'][i]

    data = pyf.getdata('../results/icd_JH.fits')
    #data = pyf.getdata('../results/JH_icd_20140806.fits')
    for i in range(len(data)):
        look = data['ID'][i]
        for galaxy in galaxies:
            if look == galaxy.ID:
                galaxy.ICD_JH = data['ICD_JH'][i]
                galaxy.ICD_JH_ERR = data['ICD_JH_ERR'][i]
                galaxy.ston_J = data['ston_J'][i]

    data = pyf.getdata('../results/icd_VJ.fits')
    #data = pyf.getdata('../results/VJ_icd_20140806.fits')
    for i in range(len(data)):
        look = data['ID'][i]
        for galaxy in galaxies:
            if look == galaxy.ID:
                galaxy.ICD_VJ = data['ICD_VJ'][i]
                galaxy.ICD_VJ_ERR = data['ICD_VJ_ERR'][i]
                galaxy.ston_V = data['ston_V'][i]

    # Add FAST results
    data = pyf.getdata('./data/FAST_result_GS_1.5_3.5.fits')
    for i in range(len(data)):
        look = data['id'][i]  # note the difference
        for galaxy in galaxies:
            if look == galaxy.ID:
                galaxy.Mass = data['lmass'][i]
                galaxy.AV = data['Av'][i]
                galaxy.lsfr = data['lsfr'][i]
                galaxy.lssfr = data['lssfr'][i]

    # Add EAZY results
    u = np.loadtxt('./data/photz.153.rf')
    v = np.loadtxt('./data/photz.155.rf')
    j = np.loadtxt('./data/photz.161.rf')
    for ID, u1, v1, j1 in zip(u[:, 0], u[:, 5], v[:, 5], j[:, 5]):
        look = int(ID)
        for galaxy in galaxies:
            if look == galaxy.ID:
                galaxy.Uflux_rest = u1
                galaxy.Vflux_rest = v1
                galaxy.Jflux_rest = j1

    # Add GalFit data
    data = pyf.getdata('./data/gs_all_candels_ers_udf_f160w_v0.5_galfit.fits')
    for i in range(len(data)):
        look = data['id'][i]
        for galaxy in galaxies:
            if look == galaxy.ID:
                galaxy.sersic = data['n'][i]
                galaxy.axis_ratio = data['q'][i]
                galaxy.halflight = data['re'][i]

    # Add Color Gradient for GSD
    hdulist =\
    pyf.open('./data/GOODS-S_May2013_colourGradients_diff_all_no-pegs_2013-06-20.fits')
    tbdata = hdulist[1].data
    for i in range(len(tbdata.field('ID'))):
        look = tbdata.field('ID')[i]
        for galaxy in galaxies:
            if look == galaxy.ID:
                galaxy.Color_grad = tbdata.field('grad_i-H_obs')[i]
                #galaxy.Color_grad = tbdata.field('grad_z-H_obs')[i]

    # MIPS
    hdulist =\
    pyf.open('./data/Sample_MIPS_matched.fits')
    tbdata = hdulist[1].data
    for i in range(len(tbdata.field('ID_GSD'))):
        look = tbdata.field('ID_GSD')[i]
        for galaxy in galaxies:
            if look == galaxy.ID:
                galaxy.Mips = tbdata.field('F24')[i]

    # Add the morphology data.
    data1 = np.genfromtxt('./gini_m20/gdss-match.txt', names=True)
    for i in range(len(data1)):
        look = data1[i]['ID_1']  # ID
        for galaxy in galaxies:
            if look == galaxy.ID:
                G = galaxy.Gini = data1[i]['G']
                M = galaxy.M20 = data1[i]['M20']
                if G <= (-0.14 * M + 0.33) and G > (0.14 * M +
                                                    0.80):  # E/S0/Sa
                    galaxy.Elliptical = True
                elif G <= (-0.14 * M + 0.33) and G <= (0.14 * M +
                                                       0.80):  #Sb-Ir
                    galaxy.Spiral = True
                elif G > (-0.14 * M + 0.33):  # Mergers
                    galaxy.Merger = True

    # Add extra stuff
    data = pyf.getdata('./data/gs_all_tf_h_130511b_multi.fits')
    for galaxy in galaxies:
        galaxy.stellarity = data['CLASS_STAR'][galaxy.ID - 1]
        galaxy.halflight = data['FLUX_RADIUS_2'][galaxy.ID - 1]

    data = np.genfromtxt('./data/centers_removed.dat', names=True)
    for i in range(len(data)):
        look = data['ID'][i]
        for galaxy in galaxies:
            if look == galaxy.ID:
                galaxy.ICD_IH_cored = data['ICD'][i]

    data = np.genfromtxt('./data/clumpylist_4steven.dat')
    for i in range(len(data)):
        look = data[i][0]
        for galaxy in galaxies:
            if look == galaxy.ID:
                # Number of blobs with UV Luminosity L_blob/L_galaxy>0.08
                #galaxy.clumps = data[i][5]
                # Number of blobs with UV Luminosity L_blob/L_galaxy>0.05
                galaxy.clumps = data[i][4]
                # Number of blobs with UV Luminosity L_blob/L_galaxy>0.01
                galaxy.clumps = data[i][3]

    data = np.genfromtxt('./data/sfrs.txt', names=True)
    for i in data:
        look = i['UserID']
        for galaxy in galaxies:
            if look == galaxy.ID:
                if i['SFR_Wuyts'] < 0.0:
                    a2800 = 1.79289 * i['FAST_ma05_Av']
                    #a2800 = 1.79289 * galaxy.AV
                    correction = 10.0**(a2800 / 2.5)
                    galaxy.sfr2800 = i['SFR_2800']
                    galaxy.sfrtotal = i['SFR_2800'] * correction
                else:
                    a2800 = 1.79289 * i['FAST_ma05_Av']
                    #a2800 = 1.79289 * galaxy.AV
                    correction = 10.0**(a2800 / 2.5)
                    galaxy.sfr2800 = i['SFR_2800']
                    galaxy.sfrir = (i['SFR_Wuyts'] +
                                    i['SFR_2800']) / 10**0.2178
                    galaxy.sfrtotal = (i['SFR_Wuyts'] +
                                       i['SFR_2800']) / 10**0.2178

                #galaxy.Mass = i['FAST_ma05_lmass']
                galaxy.ssfr = galaxy.sfrtotal / 10**galaxy.Mass
                galaxy.mips24 = i['mips24_cryo']

    data = np.genfromtxt('./data/masses_from_CANDELS.txt', names=True)
    for i in data:
        look = i['UserID']
        for galaxy in galaxies:
            if look == galaxy.ID:
                galaxy.mass_candels = i['FAST_ma05_lmass']

    x = [galaxy.ID for galaxy in galaxies if galaxy.stellarity > 0.78]

    # Bad galaxy removal
    bad = [
        1073.0, 3736.0, 8030.0, 10832.0, 15769.0, 949.0, 1961.0, 3608.0,
        4956.0, 10426.0, 18801.0
    ] + x
    galaxies = filter(lambda galaxy: galaxy.ID not in bad, galaxies)

    pickle.dump(galaxies, open('galaxies_fortable.pickle', 'wb'))
    '''
Example #32
0
def compareLabelLists(labelFile1, labelFile2, magCut=18):
    t = 2006.580

    ## Read in star labels
    tab1 = asciidata.open(labelFile1)
    name1 = [tab1[0][ss].strip() for ss in range(tab1.nrows)]
    mag1 = tab1[1].tonumpy()
    x01 = tab1[2].tonumpy()
    y01 = tab1[3].tonumpy()
    vx1 = tab1[6].tonumpy()
    vy1 = tab1[7].tonumpy()
    t01 = tab1[10].tonumpy()
    x1 = x01 + vx1 * (t - t01) / 10**3
    y1 = y01 + vy1 * (t - t01) / 10**3

    tab2 = asciidata.open(labelFile2)
    name2 = [tab2[0][ss].strip() for ss in range(tab2.nrows)]
    mag2 = tab2[1].tonumpy()
    x02 = tab2[2].tonumpy()
    y02 = tab2[3].tonumpy()
    vx2 = tab2[6].tonumpy()
    vy2 = tab2[7].tonumpy()
    t02 = tab2[10].tonumpy()
    x2 = x02 + vx2 * (t - t02) / 10**3
    y2 = y02 + vy2 * (t - t02) / 10**3

    # Image
    im = pyfits.getdata(
        '/u/ghezgroup/data/gc/06maylgs1/combo/mag06maylgs1_dp_msc_kp.fits')
    imgsize = (im.shape)[0]

    # pixel position (0,0) is at upper left
    xpix = np.arange(0, im.shape[0], dtype=float)
    ypix = np.arange(0, im.shape[1], dtype=float)

    sgra = [1422.6, 1543.8]
    scale_jpg = 0.00995
    xim = (xpix - sgra[0]) * scale_jpg * -1.0
    yim = (ypix - sgra[1]) * scale_jpg

    py.clf()
    py.grid(True)
    py.subplots_adjust(left=0.05, right=0.95, bottom=0.05, top=0.95)
    py.imshow(np.log10(im),
              extent=[xim[0], xim[-1], yim[0], yim[-1]],
              aspect='equal',
              vmin=1.9,
              vmax=6.0,
              cmap=py.cm.gray)
    py.xlabel('X Offset from Sgr A* (arcsec)')
    py.ylabel('Y Offset from Sgr A* (arcsec)')
    py.title('UCLA/Keck Galactic Center Group', fontsize=20, fontweight='bold')
    thePlot = py.gca()

    py.axis([15, -15, -15, 15])

    idx2 = np.where(mag2 < magCut)[0]
    py.plot(x2[idx2], y2[idx2], 'ro', color='cyan', mfc='none', mec='cyan')
    for ii in idx2:
        py.text(x2[ii], y2[ii], name2[ii], color='cyan', fontsize=10)

    idx1 = np.where(mag1 < magCut)[0]
    py.plot(x1[idx1], y1[idx1], 'ro', color='orange', mfc='none', mec='orange')
    for ii in idx1:
        py.text(x1[ii], y1[ii], name1[ii], color='orange', fontsize=10)
Example #33
0
def plotStarfinderList(
        starList,
        hasErrors=True,
        magCut=18,
        cooStarList='16C',
        cooStarLabels='irs16C',
        scaleList=0.00995,
        scaleImg=0.00995,
        image='/u/ghezgroup/data/gc/06maylgs1/combo/mag06maylgs1_dp_msc_kp.fits'
):
    """
    Plot the specified image and overlay the photo_calib.dat sources on top.
    Coordinates are converted from pixels to arcsec using the coo star and
    assuming that the angle of the image is 0.

    This code assumes coordinates are NIRC2 narrow pixels coordaintes. You can modify this by 
    changing the scale. But +x must be to the west in the starlist.
    """

    # Load up the photometric calibraters table.
    lis = starTables.StarfinderList(starList, hasErrors=hasErrors)
    labels = starTables.Labels()

    # Find the coo star in the starlist and in the labels
    ii1 = np.where(lis.name == cooStarList)[0]
    ii2 = np.where(labels.name == cooStarLabels)[0]

    dt = lis.epoch[0] - labels.t0
    labels.x += (labels.vx / 10**3) * dt
    labels.y += (labels.vy / 10**3) * dt

    # Convert the pixels in the starlist into arcsec
    x = ((lis.x - lis.x[ii1]) * -scaleList) + labels.x[ii2]
    y = ((lis.y - lis.y[ii1]) * scaleList) + labels.y[ii2]

    im = pyfits.getdata(image)
    imgsize = (im.shape)[0]

    # pixel position (0,0) is at upper left
    xpix = np.arange(0, im.shape[0], dtype=float)
    ypix = np.arange(0, im.shape[1], dtype=float)

    # Read in the image coo file
    # Coo star pixel coordinates
    _coo = open(image.replace('.fits', '.coo'), 'r')
    coordsTmp = _coo.readline().split()
    coords = [float(coo) for coo in coordsTmp]
    print 'Coordinates for %s:' % cooStarLabels
    print '  [%10.4f, %10.4f] pixels' % (coords[0], coords[1])
    print '  [%10.4f, %10.4f] arcsec' % (labels.x[ii2], labels.y[ii2])

    sgrax = coords[0] - (labels.x[ii2] / -scaleImg)
    sgray = coords[1] - (labels.y[ii2] / scaleImg)
    sgra = [sgrax, sgray]

    # Image coordinates (in arcsec)
    xim = (xpix - sgra[0]) * -scaleImg
    yim = (ypix - sgra[1]) * scaleImg

    py.clf()
    py.close(2)
    py.figure(2, figsize=(6, 4.5))
    py.grid(True)
    py.subplots_adjust(left=0.05, right=0.95, bottom=0.05, top=0.95)
    py.imshow(np.log10(im),
              extent=[xim[0], xim[-1], yim[0], yim[-1]],
              aspect='equal',
              vmin=1.9,
              vmax=6.0,
              cmap=py.cm.gray)
    py.xlabel('X Offset from Sgr A* (arcsec)')
    py.ylabel('Y Offset from Sgr A* (arcsec)')
    py.title('UCLA/Keck Galactic Center Group', fontsize=20, fontweight='bold')
    thePlot = py.gca()

    py.axis([15, -15, -15, 15])

    idx = (np.where((lis.mag < magCut) & (x > xim.min()) & (x < xim.max())
                    & (y > yim.min()) & (y < yim.max())))[0]

    py.plot(x[idx], y[idx], 'r+', color='orange')
    for ii in idx:
        py.text(x[ii], y[ii], lis.name[ii], color='orange', fontsize=10)
Example #34
0
def plotPhotoCalib(
        image,
        cooStar,
        photoCalib='/u/ghezgroup/data/gc/source_list/photo_calib.dat'):
    """
    Plot the specified image and overlay the photo_calib.dat sources on top.
    Coordinates are converted from pixels to arcsec using the coo star and
    assuming that the angle of the image is 0.
    """
    # Load up the photometric calibraters table.
    _tab = asciidata.open(photoCalib)

    name = _tab[0].tonumpy()
    x = _tab[1].tonumpy()
    y = _tab[2].tonumpy()

    # Load up the image
    imageRoot = image.replace('.fits', '')
    im = pyfits.getdata(imageRoot + '.fits')

    # Coo star pixel coordinates
    _coo = open(imageRoot + '.coo', 'r')
    tmp = _coo.readline().split()
    cooPixel = [float(tmp[0]), float(tmp[1])]

    imgsize = (im.shape)[0]
    xpix = np.arange(0, im.shape[0], dtype=float)
    ypix = np.arange(0, im.shape[1], dtype=float)

    cooIdx = np.where(name == cooStar)[0]
    if len(cooIdx) == 0:
        print 'Failed to find the coo star %s in %s' % (cooStar, photoCalib)

    cooArcsec = [x[cooIdx[0]], y[cooIdx[0]]]

    scale = 0.00994
    xim = ((xpix - cooPixel[0]) * scale * -1.0) + cooArcsec[0]
    yim = ((ypix - cooPixel[1]) * scale) + cooArcsec[1]

    py.figure(1)
    py.clf()
    py.grid(True)
    py.subplots_adjust(left=0.05, right=0.95, bottom=0.05, top=0.95)
    py.imshow(np.log10(im),
              extent=[xim[0], xim[-1], yim[0], yim[-1]],
              aspect='equal',
              vmin=1.9,
              vmax=6.0,
              cmap=py.cm.gray)
    py.xlabel('X Offset from Sgr A* (arcsec)')
    py.ylabel('Y Offset from Sgr A* (arcsec)')
    py.title(imageRoot)

    thePlot = py.gca()

    idx = (np.where((x > xim.min()) & (x < xim.max()) & (y > yim.min())
                    & (y < yim.max())))[0]

    py.plot(x[idx], y[idx], 'r+', color='orange')
    for ii in idx:
        py.text(x[ii], y[ii], name[ii], color='orange', fontsize=12)