def get_thresh(filename):
    '''
    Get the initial thresholds for marking images

    Input:
        filename = The name (and path) of the fits file to get the values from

    Returns:
        Thredholddict - dictionary with 2 keys, dark_current and stdev. These
            have a list which contain the values for amilifiers a,b,c,d. This
            returns the median of dark current and its standard deviation.
    '''

    chip2 = fits.getdata(filename, 1) * 1.5 / 900.0
    chip1 = fits.getdata(filename, 4) * 1.5 / 900.0

    a = chip1[19:2070, 25:2072]
    b = chip1[19:2070, 2130:4178]
    c = chip2[0:2051, 25:2072]
    d = chip1[0:2070, 2130:4178]

    meda = np.median(a)
    astdev = np.std(a)
    medb = np.median(b)
    bstdev = np.std(b)
    medc = np.median(c)
    cstdev = np.std(c)
    medd = np.median(d)
    dstdev = np.std(d)

    threshdict = {'dark_current': [meda, medb, medc, medd],
                  'stdev': [astdev, bstdev, cstdev, dstdev]}

    return threshdict
Пример #2
0
def load_uvot_images(galaxy, band, imdir=''):
    """ugh:
    :returns hdr:

    :returns images:
        List of images  [cps, exposure, sensitivity]

    :returns masks:
        Dictionary of mask images, keyed by strings:
        ['bkg', 'reg', 'reg1', 'reg5', 'reg20']
    """
    imroot = os.path.join(imdir, galaxy, galaxy)
    info = {'root': imroot, 'band': band}
    
    names = ['{root}_t{band}.fits', '{root}_t{band}_ex.fits', '{root}_t{band}_lss.fits']
    #print(names[0].format(**info))
    try:
        hdr = pyfits.getheader(names[0].format(**info), 1)
    except:
        hdr = pyfits.getheader(names[0].format(**info), 0)
    images = [pyfits.getdata(n.format(**info)) for n in names]

    masknames = ['bkg', 'reg', 'reg1', 'reg5', 'reg20']
    masks = [pyfits.getdata('{root}_{mask}.fits'.format(root=info['root'], mask=n))
            for n in masknames]

    return hdr, images, dict(zip(masknames, masks))
Пример #3
0
def dark_subtract(imlist, MasterDark, SaveSteps=False):
    '''Dark subtract the images in imlist using the previously
       combined Master Dark frame. Scale the Master Dark up to 
       the exptime of each image before subtraction.

       Set SaveSteps = True to write each dark-subtracted image 
       to a new file named [input_image].ds.fits. This option is 
       good for checking the reduction at each step. 
       Default is to overwrite input image files.
    '''
    # read in Master Dark frame
    Dark = fits.getdata(MasterDark)

    # get the date and time
    now = time.strftime('%c')
    for image in imlist:
        im,hdr = fits.getdata(image, header=True)
        # write a keyword to header 
        darkstr = 'Dark subtracted: %s' % now
        hdr['DARKSUB'] = darkstr

        # output file name
        if SaveSteps:
            output = ''.join([os.path.splitext(image)[0], '.ds.fits'])
        else:            
            output = image

        # scale Dark up to exptime of image
        exptime = hdr['EXPTIME']
        ScaledDark = Dark * exptime
        new = im - ScaledDark
        fits.writeto(output, new, header=hdr, clobber=True)
    
    return
Пример #4
0
def _scale_maps(ch, subtract_mean=True):
    f = _config.freq[ch]
    if subtract_mean:
        fhd_map = fits.open(
            _config.maps_dir + 'fhd_0.000h_{:.3f}MHz.fits'.format(f),
            mode='update'
        )
        fhd_map[0].data -= np.nanmean(fhd_map[0].data)
        fhd_map.flush()
        fhd_map.close()
    fhd_map, hdr = fits.getdata(
        _config.maps_dir + 'fhd_0.000h_{:.3f}MHz.fits'.format(f), header=True
    )
    gamp, gmean, gstd = np.genfromtxt(
        _config.psf_dir + 'psf_fit_{:.3f}MHz.csv'.format(f), delimiter=',',
        unpack=True
    )
    garea = 2 * np.pi * (gstd / hdr['CDELT2']) ** 2
    scaled_map = fhd_map / garea
    scaled_map_hdu = fits.PrimaryHDU(data=scaled_map, header=hdr)
    scaled_map_hdu.writeto(
        _config.maps_dir + 'fhd_scaled_0.000h_{:.3f}MHz.fits'.format(f),
        clobber=True
    )
    gauss_map = fits.getdata(
        _config.maps_dir + 'gauss_0.000h_{:.3f}MHz.fits'.format(f)
    )
    res_map = scaled_map - gauss_map
    res_map_hdu = fits.PrimaryHDU(data=res_map, header=hdr)
    res_map_hdu.writeto(
        _config.maps_dir + 'res_0.000h_{:.3f}MHz.fits'.format(f),
        clobber=True
    )
Пример #5
0
def read_zhu():
    MgII = fits.getdata(prefix + '/MgII/Expanded_SDSS_DR7_107.fits')
    qso0 = fits.getdata(prefix + '/MgII/QSObased_Expanded_SDSS_DR7_107.fits')

    #MgII = fits.getdata(prefix + '/MgII/JHU-SDSS/Expanded_SDSS_DR7_107.fits')
    #qso0 = fits.getdata(prefix + '/MgII/JHU-SDSS/QSObased_Expanded_SDSS_DR7_107.fits')

    # find the min & max redshift for MgII search path
    qso_zmin, qso_zmax = get_MgII_zsearch_lim(qso0['ZQSO'])

    # remove qsos with tiny z search paths
    cond = (qso_zmax - qso_zmin) > 0.05

    cond &= qso_zmin < 0.9
    
    qso = qso0[cond]
    qso_zmin = qso_zmin[cond]
    qso_zmax = qso_zmax[cond]

    # add in DR9 too later? (not as many as DR7). Need to check there
    # is no overlap in QSOs first.
    arr = [qso['RA'], qso['DEC'], qso['ZQSO'], qso_zmin, qso_zmax,
           qso['INDEX_QSO']]
    qso1 = np.rec.fromarrays(arr, names='ra,dec,z,zmin_mg2,zmax_mg2, qid')
    arr = [MgII.ZABS, MgII.REW_MGII_2796, MgII.INDEX_QSO, np.arange(len(MgII))]
    MgII1 = np.rec.fromarrays(arr, names='z,Wr,qid,abid')
    MgII1.sort(order='qid')

    iMgII_from_id = {ind:i for i,ind in enumerate(MgII1['abid'])}
    iqso_from_id = {ind:i for i,ind in enumerate(qso1['qid'])}

    return dict(MgII=MgII1, qso=qso1), iqso_from_id, iMgII_from_id
Пример #6
0
    def test_dd(self):
        """Test both single core and dual core running"""
        cmd = """{executable} {specter_dir}/bin/specter \
          -i {sky} \
          -p {monospot_file} \
          -t {throughput_file} \
          -w 7500,7620 \
          --specmin 0 --nspec 2 --exptime 1500 --trimxy""".format(
              executable=self.executable,
              specter_dir=self.specter_dir,
              sky=self.sky_file,
              monospot_file=self.monospot_file,
              throughput_file=self.throughput_file)

        if os.path.exists(imgfile1):
            os.remove(imgfile1)
        if os.path.exists(imgfile2):
            os.remove(imgfile2)

        err = os.system(cmd + " --numcores 1 -o " + imgfile1)
        self.assertEqual(err, 0, 'Error code {} != 0'.format(err))
        self.assertTrue(os.path.exists(imgfile1))

        err = os.system(cmd + " --numcores 2 -o " + imgfile2)
        self.assertEqual(err, 0, 'Error code {} != 0'.format(err))
        self.assertTrue(os.path.exists(imgfile2))

        img1 = fits.getdata(imgfile1)
        img2 = fits.getdata(imgfile2)

        self.assertTrue(np.allclose(img1, img2))
Пример #7
0
def main():
    fitslst = glob.glob('*skysub.fits')
    fitslst.sort()

    ref = pf.getdata(fitslst[0])
    xref,yref = np.loadtxt(fitslst[0]+'.coo')
    refmask = makeCircularMask(ref.shape,15,xref,yref) - makeCircularMask(ref.shape,8,xref,yref) 
    refmed = np.median(ref[refmask])
    pf.writeto('aligned_'+fitslst[0], ref)

    data = [ref]
    for fits in fitslst[1:]:
        targ = pf.getdata(fits)
        xtarg,ytarg = np.loadtxt(fits+'.coo')

        shim = shift(targ,(yref-ytarg,xref-xtarg),order=1)
        shval = alignIm(shim,ref,refmask) # first 
        shim = shift(shim,shval,order=1)
        targmed = np.median(shim[refmask])
        pf.writeto('aligned_'+fits, refmed*shim/targmed)
        data.append(shim)
    pf.writeto('Combo_%s.fits'%(fitslst[0].split('_')[0]),np.median(np.array(data),axis=0))
    if not os.path.exists('../Aligned'): os.mkdir('../Aligned')
    os.system('mv aligned*fits ../Aligned/')
    os.system('mv Combo_*fits ../Aligned/')
Пример #8
0
def find_sky(args):

    for im_name in args.input:
        # Separate path and file
        imdir, imname = os.path.split(im_name)

        # Read images
        im = fits.open(im_name, mode='update')
        hdr = im[0].header
        data = fits.getdata(im_name).astype(numpy.float64)

        # If mask exist read it, otherwise build it with all values unmasked
        try:
            maskname = hdr[args.mask_key]
            mask = fits.getdata(maskname)
        except KeyError:
            mask = numpy.ma.make_mask_none(data.shape)

        # Make a copy of the array, but only with the unmasked pixels
        whr2 = numpy.where( (mask == 0) & (numpy.isfinite(data)))
        data2 = data[whr2]
        median = numpy.median(data2)
        MAD = numpy.median( numpy.abs( data2 - median))


        # Do a histogram, with limits of +- 7.5 MAD (~+-5 sigma for a normal distribution)
        minx, maxx = median-7.5*MAD, median+7.5*MAD
        nbins = 15
        n, bins = numpy.histogram(data2, bins=nbins, range=[minx, maxx])
        bincenters = 0.5 * (bins[1:]+bins[:-1])

        # Find max position and value
        maxpos = n.argmax()
        maxvalue = bincenters[maxpos]

        # First guess for a fit
        p0 = [n[maxpos], maxvalue, 1.5 * MAD, numpy.min(n)]
        coeff, varmatrix = curve_fit(gauss, bincenters,n,p0=p0)

        # Plot the histogram
        pyplot.plot(bincenters, n, 'o')

        # Fit the histogram to calculate centre and standard deviation
        hist_fit = gauss(bincenters, *coeff)

        # Plot the resulting fit
        if args.plot == True:
            pyplot.plot(bincenters,hist_fit)
            pyplot.draw()
            pyplot.show()

        # Including (or updating) sky values in the header of the image
        hdr.add_history("- Added sky value and std dev estimated from histogram of" +\
                        " image. See sky and sky_std keywords.")
        hdr["sky"] = (str(coeff[1]), "Sky value")
        hdr["sky_std"] = (str(coeff[2]), "Standard deviation of sky")
        im.flush()
        im.close()
            
    return coeff
Пример #9
0
 def __init__(self, tag='repro', clobber=False):
     """dir should contain the cubes already as produced by prepare()"""
     self.tag = tag
     self.clobber = clobber
     # Use toal component as reference
     self.ref_file = filename(tag=tag)
     self.fitsimage = FITSimage(self.ref_file)
     # Construct vectors of glon, glat, energy e.g. for plotting
     ac = iu.axis_coordinates(self.fitsimage)
     self.glon = ac['GLON']
     self.glat = ac['GLAT']
     self.energy = 10 ** ac['PHOTON ENERGY']
     # Read mask if there is one, else don't use a mask
     try:
         self.mask = fits.getdata('mask.fits')
         logging.info('Loaded mask.fits')
     except IOError:
         self.mask = 1
         logging.info('mask.fits not found')
     try:
         self.area = fits.getdata('area.fits')
         logging.info('Loaded area.fits')
     except IOError:
         self.area = iu.area(self.fitsimage, deg=False)
         logging.info('area.fits not found')
Пример #10
0
    def get_primaryID(self):
        """Identifies and ranks duplicate detections.

        Returns
        -------
        matchdata : tuple (sourceID, matchinfo)

        Notes
        -----
        The ranking criteria are
          1. source w/ best filter coverage wins (3 > 2 > 1); 
             if no winner, then
          2. source w/ smallest 'errBits' error flag wins; 
             if no winner, then
          3. source w/ best seeing wins (if seeing better by >20%);
             if no winner, then
          4. source closest to the optical axis wins.

        This function is optimised for speed at the expense of readability :-(
        """

        # Open the file with the sources crossmatched across fields
        try:
            crossmatch = fits.getdata(self.crossmatch_file, 1, Memmap=True)
        except OSError, e:  # Anticipate a "No such file" error
            log.warning('Failed to open {0}: {1}'.format(self.crossmatch_file,
                                                         e))
            log.warning('Will try again in 5 seconds.')
            time.sleep(5)
            crossmatch = fits.getdata(self.crossmatch_file, 1)
Пример #11
0
def bfixpix(image_file, mask_file, outsuffix='_f', msksuffix='_s'):
    """
    Inputs
    ---------
    image_file : string
        input image file to fix bad pixels on

    mask_file : string
        mask file (0 == good pixels, >0 == bad pixels

    outsuffix : string
        suffix for fixed image. default = '_f'

    msksuffix : string
        suffix for bad pixels significance mask. default = '_s'
    """
    outf = image_file.replace('.fits', outsuffix + '.fits')
    outm = image_file.replace('.fits', msksuffix + '.fits')
    
    util.rmall([outf, outm])
    print("bfixpix: {0} -> {1}".format(image_file, outf))

    # fetch the image, fetch the mask
    img, hdr = fits.getdata(image_file, header=True)
    msk = fits.getdata(mask_file)

    # median the image
    medimg = ndimage.median_filter(img, 3, mode='nearest')

    # generate the pixel files
    outf_img = np.where(msk == 0, img, medimg)
    outm_img = np.where(msk == 1, (img - medimg), 0)

    fits.writeto(outf, outf_img, hdr)
    fits.writeto(outm, outm_img, hdr)
Пример #12
0
def load_and_reduce(filename, moment_folder="moments/"):
    '''
    Load the cube in and derive the property arrays.
    '''

    file_dict = {}

    file_labels = ["_moment0", "_centroid", "_linewidth", "_intint"]
    labels = ["moment0", "centroid", "linewidth", "integrated_intensity"]

    # load the cube in
    file_dict['cube'] = list(getdata(filename, header=True))

    prefix_direc = "/".join(filename.split("/")[:-1])
    if len(prefix_direc) != 0:
        prefix_direc = prefix_direc + "/"
    sim_name = os.path.splitext(os.path.basename(filename))[0]

    for dic_lab, file_lab in zip(labels, file_labels):
        file_dict[dic_lab] = \
            list(getdata(os.path.join(prefix_direc, moment_folder,
                                      sim_name + file_lab + ".fits"),
                         0, header=True))

        # And the errors
        file_dict[dic_lab + "_error"] = \
            list(getdata(os.path.join(prefix_direc, moment_folder,
                                      sim_name + file_lab + ".fits"),
                         1, header=True))

    return file_dict
Пример #13
0
def determine_ratio_baseline_sigma(plot=False):
    data = pyfits.getdata(fits_path+'local_counts_baseline.fits')
    el = data[:,:,0].astype(np.float)
    sp = data[:,:,1].astype(np.float)
    mask_el = el < 1
    mask_sp = sp < 1
    mask_all = np.logical_and(mask_el, mask_sp)
    mask = np.logical_or(mask_el, mask_sp)
    ratio = pyfits.getdata(fits_path+'local_ratio_baseline.fits')
    count_sum = (el + sp).astype(np.float)
    count_product = (el * sp).astype(np.float)
    np.putmask(count_product, mask, 1.0)
    np.putmask(count_sum, mask, 1.0)
    sigma = (np.log10(np.e))**2 * count_sum / count_product
    sigma = np.sqrt(sigma)
    np.putmask(sigma, mask, unknown_ratio)

    sigma_masked = ma.masked_less_equal(sigma, unknown_ratio)

    if plot:
        fig = plt.figure(3)
        fig.clf()
        ax = fig.add_subplot(111)
        im = ax.imshow(sigma_masked, interpolation='nearest', origin='lower')
        cb = plt.colorbar(im)
        ax.set_aspect('auto')
        ax.set_xlabel(r'$M_R [mag]$',fontsize=22)
        ax.set_ylabel(r'$R_{50} [kpc]$',fontsize=22)

    pyfits.writeto(fits_path+'local_ratio_baseline_sigma.fits',
           sigma, clobber=True)    
Пример #14
0
def getMasks(filename):
  #Delete the old masks, if any exist.
  iraf.imdelete(filename+'_Handmask.fits')
  iraf.imdelete(filename+'_HaHandmask.fits')
  
  #Extract the mask from the masked R image, using a handmasked image if one exists otherwise using the masked image
  maskValue = 0.0
  try:
    maskedRImage = fits.getdata(filename+'_Handmasked.fits')
  except IOError:
    maskedRImage = fits.getdata(filename+'Masked.fits')
  manyZeros = np.zeros_like(maskedRImage)
  manyOnes = np.ones_like(maskedRImage)
  
  print 'Writing R mask'
  RmaskPixels = np.where((maskedRImage!=maskValue),manyZeros,manyOnes)
  
  hdu=fits.PrimaryHDU(RmaskPixels)
  hdulist = fits.HDUList([hdu])
  hdulist.writeto(filename+'_Handmask.fits')
  
  #Do the same for the Ha image
  print 'Writing Ha mask'
  try:
    maskedHaImage = fits.getdata(filename+'_HaHandmasked.fits')
  except IOError:
    maskedHaImage = fits.getdata(filename+'_Ha.fits')
  HaMaskPixels = np.where((maskedHaImage!=maskValue),manyZeros,manyOnes)
  
  hdu=fits.PrimaryHDU(HaMaskPixels)
  hdulist = fits.HDUList([hdu])
  hdulist.writeto(filename+'_HaHandmask.fits')
Пример #15
0
def load_poisson_stats_image(extra_info=False, return_filenames=False):
    """Load Poisson statistics counts image of a Gaussian source on flat background.

    See poissson_stats_image/README.md for further info.
    TODO: add better description (extract from README?)

    Parameters
    ----------
    extra_info : bool
        If true, a dict of images is returned.
    return_filenames : bool
        If true, return filenames instead of images

    Returns
    -------
    data : numpy array or dict of arrays or filenames
        Depending on the ``extra_info`` and ``return_filenames`` options.
    """
    if extra_info:
        out = dict()
        for name in ['counts', 'model', 'source', 'background']:
            filename = get_path('poisson_stats_image/{0}.fits.gz'.format(name))
            if return_filenames:
                out[name] = filename
            else:
                data = fits.getdata(filename)
                out[name] = data
    else:
        filename = get_path('poisson_stats_image/counts.fits.gz')
        if return_filenames:
            out = filename
        else:
            out = fits.getdata(filename)

    return out
Пример #16
0
def two_line_ratio(line1, line2, save_fits=False, filename=''):
    """
    line1: str
        Filename of nominator line

    line2: str
        Filename of denominator line

    filename: str
        The name of the fits file to be saved 

    Warning: works only if line1 and line2 have the same shape!

    Example:
        mp.two_line_ratio('Ha_moment0.fits', 'Hb_moment0.fits', save_fits=True, filename='Ha_Hb.fits')
    
    """

    
    l1, l2 = fits.getdata(line1), fits.getdata(line2)

    hd = fits.getheader(line1)
    ratio = l1/l2

    if save_fits==True:
        ff = fits.PrimaryHDU(data=ratio, header=hd)
        ff.writeto(filename, clobber=True)

    return ratio
def GetImage(image, mask):
    Passed = 0
    try:
        imageData = fits.getdata(reducedpath+date+'.{:0>3}.reduced.fits'.format(image))
        hdr = fits.getheader(reducedpath+date+'.{:0>3}.reduced.fits'.format(image))
        w = WCS(reducedpath+date+'.{:0>3}.reduced.fits'.format(image))
        Passed = 1
    except:
        print('Trying a different file name.')
        Passed = 0
        pass
    if Passed == 0:
        try:
            imageData = fits.getdata(reducedpath+date+'.f{:0>3}.reduced.fits'.format(image))
            hdr = fits.getheader(reducedpath+date+'.f{:0>3}.reduced.fits'.format(image))
            w = WCS(reducedpath+date+'.f{:0>3}.reduced.fits'.format(image))
        except: raise OSError('We do not know that filename: %s'%(reducedpath+date+'.f{:0>3}.reduced.fits'.format(image)))

    # Parse the header to get the object name
    ObjName = hdr['OBJECT'].split(',')[0].split(' ')[0]
    band    = hdr['FILTER2']

    # Create the masked Image Data
    imageDataM = np.ma.array(imageData, mask=mask)

    # Computed the background levels
    mean, median, std = stats.sigma_clipped_stats(imageData, mask=mask, sigma=3.0)
    print('mean', 'median', 'std', 'BACKGROUND')
    print(mean, median, std)

    # Remove the background
    imageDataRed = imageDataM - median
    return imageDataRed, median, ObjName, band, std, hdr, w
Пример #18
0
def sdss_flux(filelist,scale=1,interp='cubic'):
    nfiles=len(filelist)
    spectra,header =fits.getdata(filelist[0],header=True)
    flux=np.zeros((nfiles,np.shape(spectra[1,:])[0]*scale))
    wavelength=np.zeros((nfiles,np.shape(spectra[0,:])[0]*scale))
    for i in np.arange(nfiles):
        spectra,header =fits.getdata(filelist[i],header=True)
        wave= spectra[0,:]
        f   = spectra[1,:]
        err = spectra[2,:]
        n      = header['NAXIS1']
        plate  = header['PLATEID']
        mjd    = header['MJD']
        fiberID= header['FIBERID']
        ra     = header['PLUG_RA']
        dec    = header['PLUG_DEC']
        sna    = header['SPEC1_R']
        vac_wavelength=10.**wave
        #vac to air
        air_wavelength=vactoair(vac_wavelength)
        #plot(vac_wavelength,spectra)
        #interpolating
        new_wavelength=np.linspace(air_wavelength[0],air_wavelength[-1],np.shape(air_wavelength)[0]*scale)
        func = interpolate.interp1d(air_wavelength,f,kind=interp)
        new_spectra=func(new_wavelength)
        flux[i,:]=new_spectra
        wavelength[i,:]=new_wavelength
    return wavelength,flux
Пример #19
0
def measure_spire_flux(imagenames, reg):
    """Measure fluxes in background subtracted Herschel SPIRE imaging, given as
    a list of filenames.
    """
    spirebands = ['spire250', 'spire350', 'spire500']
    # set up output
    fluxes, uncertainties, bands = [], [], []
    # loop over images
    for imname in imagenames:
        # Read the image data and header
        im = pyfits.getdata(imname)
        hdr = pyfits.getheader(imname)
        # Read the uncertainty image.  We assume this is matched pixel by pixel
        # with the flux image.
        uncname = imname.replace('scan.fits', 'scan.unc.fits')
        unc = pyfits.getdata(uncname)
        # Get image flux in the ds9 region
        flux, ps, units =  photometer(im, hdr, [reg])
        # Get image variance in the ds9 region
        var, _, _ = photometer(unc**2, hdr, [reg])
        # Get flux and unc in Jy, assuming image data in MJy/sr
        flux = (flux * 1e6) * (ps.prod() * 2.35e-11)
        unc = (np.sqrt(var) * 1e6) * (ps.prod() * 2.35e-11)
        fluxes.append(flux)
        uncertainties.append(unc)
        # set the band flag for this image
        bands.append([b for b in spirebands if b in imname][0])

    return fluxes, uncertainties, bands
Пример #20
0
def get_data_path(W,filepath, header = False):                                                                
      """                                                                         
      gets either S, M or L as string representing the dataset wavelengh          
      returns: the cutted data, i.e centered, and a boolean matrix                
      that represent a threathole in the coverage matrix.                         
                                                                                 
      """                                                                         
                                                                                  
      # Load the data                                                             
      fits_data_uncut, fits_header = fits.getdata(filepath,"image", header=True)                       
                                                                                  
      fits_coverage_uncut = fits.getdata(filepath,"coverage")                
      # Define cuts                                                               
      ycenter = int(len(fits_data_uncut)/2)                                       
      xcenter = int(len(fits_data_uncut[0])/2)                                    
      cut = int(ycenter/2)                                                        
      # Cut array to focus on center                                              
      fits_data = fits_data_uncut[xcenter-cut:xcenter+cut,ycenter-cut:ycenter+cut]
      fits_coverage = fits_coverage_uncut[xcenter-cut:xcenter+cut,ycenter-cut:ycenter+cut    ]
                                                                                  
      # Create masked array from fits_data                                        
      m_array = create_marray(fits_data)                                          
      # Create masked array from fits_data maskin with coverage                   
      mask_coverage = m_array.mask * fits_coverage                                
      above = fits_coverage > 8                                                   
      ## build this outside 
      # image = (above * fits_data)                                                 
      if header == True:
        return above, fits_data, fits_header
      else:
        return above, fits_data
Пример #21
0
def retrieve_SFHs(filelist, i, massnorm='mformed', nsubpersfh=None, nsfhperfile=None):
    '''
    fetch a time array & SFH from a series of FITS archives,
        each with `nsfhperfile` SFHs & `nsubpersfh` Z/tau/mu realizations for
        each SFH
    '''

    if nsfhperfile is None:
        nsfhperfile = fits.getval(filelist[0], ext=0, keyword='NSFHPER')
    if nsubpersfh is None:
        nsubpersfh = fits.getval(filelist[0], ext=0, keyword='NSUBPER')

    fnum, fi, fii, fiii = find_sfh_ixs(i, nsfhperfile, nsubpersfh)

    '''
    print('trainer {0}: spectral:file-spec {1}-{2}; SFH:file-rec-subsample {1}-{3}-{4}'.format(
          i, fnum, fi, fii, fiii))
    '''
    fname = filelist[fnum]

    allts = fits.getdata(fname, extname='allts')
    allsfhs = np.repeat(fits.getdata(fname, extname='allsfhs'),
                        nsubpersfh, axis=0)

    # normalize mass either by total mass formed or current stellar mass
    mtot = fits.getdata(fname, massnorm)[:, None]

    return allts, allsfhs / mtot, fi
Пример #22
0
def sum_test():
    work_dir = '/Users/Jake/Research/PHAT/sfhmaps/__old__analysis/map'
    brick_list = [2, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13,
                  14, 15, 16, 17, 18, 19, 20, 21, 22, 23]

    data_nat, data_rep = [], []  # brick native, brick reprojected
    for brick in brick_list:
        # Native, unprojected image
        filename = 'b{:02d}_mod_fuv_attenuated.fits'.format(brick)
        filename = os.path.join(work_dir, 'b{:02d}'.format(brick), filename)
        data = fits.getdata(filename)

        hdr = fits.getheader(filename)
        hwcs = wcs.WCS(hdr)
        #area = calc_area1(data, hwcs)
        area = calc_area3(hdr, ref='central')
        data_nat.append(data * area)

        # Reprojected image
        filename = 'hdu0_b{:02d}_mod_fuv_attenuated.fits'.format(brick)
        filename = os.path.join(work_dir, '_mod_fuv_attenuated', 'reproject', filename)
        data = fits.getdata(filename)

        filename = 'hdu0_b{:02d}_mod_fuv_attenuated_area.fits'.format(brick)
        filename = os.path.join(work_dir, '_mod_fuv_attenuated', 'reproject', filename)
        area_rep = fits.getdata(filename) * (180/np.pi*3600)**2  # arcsec2
        data_rep.append(data * area_rep)

    sum_nat = np.array([np.nansum(arr) for arr in data_nat])
    sum_rep = np.array([np.nansum(arr) for arr in data_rep])

    fractionaldiff = (sum_rep - sum_nat) / sum_nat

    print_summary(fractionaldiff)
Пример #23
0
def load_data():

    mgs = fits.getdata('{0}/matched/gz2_main.fits'.format(decals_path),1)
    s82 = fits.getdata('{0}/matched/gz2_s82_coadd1.fits'.format(decals_path),1)
    decals = fits.getdata('{0}/matched/decals_dr1.fits'.format(decals_path),1)

    return mgs,s82,decals
Пример #24
0
def get_simstds(simspecfile, spectroid=0):
    """
    Extract the true standard star flux from the input simspec files.
    
    Args:
        night : string YEARMMDD
        expid : int or string exposure ID
        spectroid : optional spectrograph ID [default 0]
        
    Returns tuple of:
        stdfiber : 1D array of fiberid [0-499] of standard stars
        wave : 1D array of wavelength sampling
        flux : 2D array [nstd, nwave] flux in ergs/s/cm^2/A
    """
    #- Read inputs
    ii = slice(500*spectroid, 500*(spectroid+1))
    hdr = fits.getheader(simspecfile, 0)
    flux = fits.getdata(simspecfile, 'FLUX')[ii]
    meta = fits.getdata(simspecfile, 'METADATA')[ii]
    wave = hdr['CRVAL1'] + hdr['CDELT1']*np.arange(hdr['NAXIS1'])

    #- Which ones are standard stars?
    stdfiber = np.where(meta['OBJTYPE'] == 'STD')[0]

    return stdfiber, wave, flux[stdfiber]
Пример #25
0
def _cal_stats(i):
    f = _config.freq[i]
    z = _config.redshift[i]
    xi = _config.ion_frac[i]
    fhd_map = fits.getdata(
        _config.maps_dir + 'fhd_0.000h_{:.3f}MHz.fits'.format(f)
    )
    mask = ~np.isnan(fhd_map)
    fhd_map = fhd_map
    gauss_map = fits.getdata(
        _config.maps_dir + 'gauss_0.000h_{:.3f}MHz.fits'.format(f)
    )
    scaled_map = fits.getdata(
        _config.maps_dir + 'fhd_scaled_0.000h_{:.3f}MHz.fits'.format(f)
    )
    model_map = fits.getdata(
        _config.maps_dir + 'model_0.000h_{:.3f}MHz.fits'.format(f)
    )
    res_map = fits.getdata(
        _config.maps_dir + 'res_0.000h_{:.3f}MHz.fits'.format(f)
    )
    # Calculate stats
    _stats_arr[0, :, i] = _stats(model_map, mask=mask)
    _stats_arr[1, :, i] = _stats(fhd_map, mask=mask)
    _stats_arr[2, :, i] = _stats(gauss_map, mask=mask)
    _stats_arr[3, :, i] = _stats(scaled_map, mask=mask)
    _stats_arr[4, :, i] = _stats(res_map, mask=mask)
Пример #26
0
def test_kernels_are_similar():

	psfi = fits.getdata(dir_obj+'psf-i.fits')
	psfr = fits.getdata(dir_obj+'psf-r.fits')
	
	# avgdiff = 0.0180033021648
	assert not two_kernels_are_similar(psfi, psfr)
Пример #27
0
def query_phatcat(objname, phottable='data/f2_apcanfinal_6phot_v2.fits',
                  crosstable=None,
                  filtcols=['275','336','475','814','110','160'],
                  **extras):
    
    """
    Read LCJ's catalog for a certain object and return the magnitudes
    and their uncertainties. Can take either AP numbers (starting with
    'AP') or ALTIDs.
    """
    print(phottable, crosstable, objname)
    ap = pyfits.getdata(phottable, 1)
    if objname[0:2].upper() == 'AP':
        objname = int(objname[2:])
        ind = (ap['id'] == objname)
    else:
        if crosstable is None:
            crosstable = phottable.replace('canfinal_6phot_v2', 'match_known')
        cross = pyfits.getdata(crosstable)
        ind = (cross['altid'] == objname)
        ind = (ap['id'] == cross[ind]['id'][0])
    
    dat = ap[ind][0]
    mags = np.array([dat['MAG'+f] for f in filtcols]).flatten()
    mags_unc = np.array([dat['SIG'+f] for f in filtcols]).flatten()
    flags = ap[ind]['id'] #return the ap number
    
    return mags, mags_unc, flags
Пример #28
0
def buildcat(pawprints, primaries, pawprintNum):
	# builds the matched pawprint catalogue from the matchID file
	#
	#
	
	# generate match ID file path string
	tile = pawprints[0]['tile']
	P2matchFile = constants.matchDir+'P2match_'+tile+'_pp'+str(pawprintNum)+'_IDs.fits'
	
	# load match ID table
	IDtable = getdata(P2matchFile, 1)
	
	# TODO: remove rows with too few detections here?
	# dont forget to reorder the master_ID column if necessary
	
	# generate extension array
	extArray = np.zeros(len(IDtable['master_ID']), dtype=np.int)
	for primary in primaries:
		ob = primary['ob']
		extCol = 'ext_'+ob+'.'
		extArray = np.maximum(np.array(extArray), np.array(IDtable[extCol]))
	
	# identify an overall primary epoch (P1)
	# we'll just use the P2 epoch with the best seeing for now
	# 	but could use the P2 epoch with the most detected sources instead?
	best_OB_ID = np.argmin(primaries['seeing_median'])
	best_OB = primaries[best_OB_ID]['ob']
	
	# lets open a template array to put the data into
	dataTemplate = np.empty(len(IDtable['master_ID']),
		dtype={
		'names':['RA', 'Dec', 'X', 'Y', 'e_X', 'e_Y', 'mag', 'e_mag', 'ext', 'class', 'ell', 'mjdobs'],
		'formats':['f8','f8','f8','f8','f8','f8','f4','f4','i2','i1','f4','f8']
		})
	dataTemplate[:] = (np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, 0, 0, np.nan, np.nan)
	
	# generate final data array
	outputData = []
	for epoch in pawprints:
		outputData.append(np.array(dataTemplate))
	
	# generate list of secondary epoch files and OBs
	secondaryEpochIDs = [num for num, item in enumerate(pawprints) if item['ob'] not in [best_OB]]
	secondaryEpochs = pawprints[secondaryEpochIDs]
	obs = [best_OB] + list(secondaryEpochs['ob'])
	files = [primaries[best_OB_ID]['filename']] + list(secondaryEpochs['filename'])
	mjdobs = np.array([primaries[best_OB_ID]['mjdobs']] + list(secondaryEpochs['mjdobs']))
	
	# load each catalogue into the output data array
	for epoch, file_ob in enumerate(zip(files, obs)):
		# load the catalogue file
		filepath = constants.dataDir+file_ob[0]+'_cat.fits'
		catdata = getdata(filepath, 1)
		
		# copy to output data array
		IDtablecol = 'ID_'+file_ob[1]
		indices = IDtable[IDtablecol]
		outputData[epoch][indices>0] = catdata[indices[indices>0]-1]
	
	return outputData, extArray, mjdobs
Пример #29
0
def test_match_psf_fits():

	bandfrom = b0
	bandto = b1

	fp_img = dir_obj+'stamp-{}.fits'.format(bandfrom)
	fp_psf = dir_obj+'psf-{}.fits'.format(bandfrom)
	fp_psfto = dir_obj+'psf-{}.fits'.format(bandto)

	fp_img_out = dir_obj+'stamp-{}_psfmt-{}.fits'.format(bandfrom, bandto)
	fp_psf_out = dir_obj+'psf-{}_psfmt-{}.fits'.format(bandfrom, bandto)
	fp_psk_out = dir_obj+'psk-{}_psfmt-{}.fits'.format(bandfrom, bandto)

	matchpsf.match_psf_fits(fp_img, fp_psf, fp_psfto, fp_img_out, fp_psf_out, fp_psk_out, overwrite=True, towrite_psk=True)	

	# check that file exists
	for fp in [fp_img_out,fp_psf_out,fp_psk_out,]:
		assert os.path.isfile(fp)


	img = fits.getdata(fp_img)
	psf = fits.getdata(fp_psf)
	psfto = fits.getdata(fp_psfto)

	# check that the final image content is correct
	img_out, psf_out, psk_out = matchpsf.match_psf(img, psf, psfto)

	assert np.all(img_out == fits.getdata(fp_img_out))
	assert np.all(psf_out == fits.getdata(fp_psf_out))
Пример #30
0
def get_data(W):
    """
    gets either S, M or L as string representing the dataset wavelengh
    returns: the cutted data, i.e centered, and a boolean matrix
    that represent a threathole in the coverage matrix.
    
    """
    
    path = "/home/abeelen/Herschel/DDT_mustdo_5/PLCK_SZ_G004.5-19.6-1/"
    filename = "OD1271_0x50012da8L_SpirePhotoLargeScan_PLCK_SZ_G004.5-19.6-1_destriped_P"\
    +W+"W.fits"
    # Load the data
    fits_data_uncut = fits.getdata(path+filename,"image")
    
    fits_coverage_uncut = fits.getdata(path+filename,"coverage")
    # Define cuts
    ycenter = int(len(fits_data_uncut)/2)
    xcenter = int(len(fits_data_uncut[0])/2)
    cut = int(ycenter/2)
    print "centers ", float(xcenter)/ycenter
    # Cut array to focus on center
    fits_data = fits_data_uncut[xcenter-cut:xcenter+cut,ycenter-cut:ycenter+cut]
    fits_coverage = fits_coverage_uncut[xcenter-cut:xcenter+cut,ycenter-cut:ycenter+cut]

    # Create masked array from fits_data
    m_array = create_marray(fits_data)
    # Create masked array from fits_data maskin with coverage
    mask_coverage = m_array.mask * fits_coverage
    above = fits_coverage > 8

    image = (above * fits_data)
    
    return above, fits_data
Пример #31
0
    def get(self, Teff, logg, FeH, resolution=None, interp=True):
        """
        Retrieve the wavelength, flux, and effective radius
        for the spectrum of the given parameters

        Parameters
        ----------
        Teff: int
            The effective temperature (K)
        logg: float
            The logarithm of the surface gravity (dex)
        FeH: float
            The logarithm of the ratio of the metallicity
            and solar metallicity (dex)
        resolution: int (optional)
            The desired wavelength resolution (lambda/d_lambda)
        interp: bool
            Interpolate the model if possible

        Returns
        -------
        dict
            A dictionary of arrays of the wavelength, flux, and
            mu values and the effective radius for the given model

        """
        # See if the model with the desired parameters is witin the grid
        in_grid = all([
            (Teff >= min(self.Teff_vals)) & (Teff <= max(self.Teff_vals)) &
            (logg >= min(self.logg_vals)) & (logg <= max(self.logg_vals)) &
            (FeH >= min(self.FeH_vals)) & (FeH <= max(self.FeH_vals))
        ])

        if in_grid:

            # See if the model with the desired parameters is a true grid point
            on_grid = self.data[[
                (self.data['Teff'] == Teff) & (self.data['logg'] == logg) &
                (self.data['FeH'] == FeH)
            ]] in self.data

            # Grab the data if the point is on the grid
            if on_grid:

                # Get the row index and filepath
                row, = np.where((self.data['Teff'] == Teff)
                                & (self.data['logg'] == logg)
                                & (self.data['FeH'] == FeH))[0]

                filepath = self.path + str(self.data[row]['filename'])

                # Get the flux, mu, and abundance arrays
                raw_flux = fits.getdata(filepath, 0)
                mu = fits.getdata(filepath, 1)
                # abund = fits.getdata(filepath, 2)

                # Construct full wavelength scale and convert to microns
                if self.CRVAL1 == '-':
                    # Try to get data from WAVELENGTH extension...
                    dat = fits.getdata(filepath, ext=-1)
                    raw_wave = np.array(dat).squeeze()
                else:
                    # ...or try to generate it
                    b = self.CDELT1 * np.arange(len(raw_flux[0]))
                    raw_wave = np.array(self.CRVAL1 + b).squeeze()

                # Convert from A to desired units
                raw_wave *= self.const

                # Janky unit nullification
                def toQ(val):
                    return val if hasattr(val,
                                          'unit') else val * self.wave_units

                # Trim the wavelength and flux arrays
                idx, = np.where(
                    np.logical_and(
                        raw_wave * self.wave_units >= toQ(self.wave_rng[0]),
                        raw_wave * self.wave_units <= toQ(self.wave_rng[1])))
                flux = raw_flux[:, idx]
                wave = raw_wave[idx]

                # Bin the spectrum if necessary
                if resolution is not None or self.resolution is not None:

                    # Calculate zoom
                    z = utils.calc_zoom(resolution or self.resolution, wave)
                    wave = zoom(wave, z)
                    flux = zoom(flux, (1, z))

                # Make a dictionary of parameters
                # This should really be a core.Spectrum() object!
                row_data = self.data[row].as_void()
                spec_dict = dict(zip(self.data.colnames, row_data))
                spec_dict['wave'] = wave
                spec_dict['flux'] = flux
                spec_dict['mu'] = mu

            # If not on the grid, interpolate to it
            else:
                # Call grid_interp method
                if interp:
                    spec_dict = self.grid_interp(Teff, logg, FeH)
                else:
                    return

            return spec_dict

        else:
            print('Teff: ', Teff, ' logg: ', logg, ' FeH: ', FeH,
                  ' model not in grid.')
            return
Пример #32
0
# Initialize arrays
hdu_slic = [None] * len(dofields)
hdu_cube = [None] * len(dofields)
var_slic = [None] * len(dofields)
var_cube = [None] * len(dofields)

# Main loop over spectral lines to be processed
for line in sline:

    # Loop over fields to produce individual pbcor and variance cubes
    for i, field in enumerate(dofields):

        # --- Regrid the noise-flattened cube and calculate rms
        flatfile = prefix + field + basename + '.image.fits'
        flatimg, hdr = fits.getdata(flatfile, header=True)
        newhdr = hdr.copy()
        if velregrid:
            newhdr['crpix3'] = 1.
            newhdr['cdelt3'] = delv*1000
            newhdr['crval3'] = vstart*1000
            newhdr['naxis3'] = naxis3
            flatimg = regrid_cube(flatimg, hdr, newhdr)
        noisech = np.r_[0:5, flatimg.shape[0]-5:flatimg.shape[0]]
        rms = mad_std(flatimg[noisech,:,:], axis=None, ignore_nan=True)

        # --- Regrid the pb cube and apply pb correction
        if imtype != 'TP':
            pbfile = prefix + field + baseint + '.flux.pbcoverage.fits'
            pbimage, pbhd = fits.getdata(pbfile, header=True)
            pbimage[pbimage<pbcut] = np.nan
Пример #33
0
def make_defocus_phase_map(rms_wfe, circular_pupil=False):
    # We will use some files from AIROPA
    dir_airopa = environ['AIROPA_DATA_PATH']

    # Read in the default NIRC2 pupil.
    pupil_file = path.join(dir_airopa, 'phase_maps', 'defocus', 'pupil.fits')
    pupil = fits.getdata(pupil_file)

    clear_idx = np.where(pupil == 1)

    # Calculate the pupil plane coordinates (normalized over the clear aperture).
    pupil_u0 = np.median(clear_idx[0])
    pupil_v0 = np.median(clear_idx[1])
    pupil_u = np.arange(
        pupil.shape[0],
        dtype=float) - pupil_u0  # note this isn't quite right; but
    pupil_v = np.arange(
        pupil.shape[1], dtype=float
    ) - pupil_v0  # it appears to be the convention Gunther used.
    pupil_u_2d, pupil_v_2d = np.meshgrid(pupil_u, pupil_v, indexing='ij')
    pupil_rho_2d = np.hypot(1.0 * pupil_u_2d, pupil_v_2d)
    rho_max_clear = pupil_rho_2d[clear_idx].max()

    if circular_pupil:
        pupil[:, :] = 0
        idx = np.where(pupil_rho_2d <= rho_max_clear)
        pupil[idx] = 1

    pupil_u /= rho_max_clear
    pupil_v /= rho_max_clear
    pupil_u_2d /= rho_max_clear
    pupil_v_2d /= rho_max_clear
    pupil_rho_2d /= rho_max_clear
    clear_idx = np.where(pupil == 1)

    # Plot the clear pupil.
    plt.figure(1, figsize=(8, 6))
    plt.clf()
    plt.subplots_adjust(right=0.9)
    plt.imshow(pupil,
               extent=[pupil_u[0], pupil_u[-1], pupil_v[0], pupil_v[-1]])
    plt.colorbar()
    plt.axis('equal')
    plt.xlabel('Pupil u')
    plt.ylabel('Pupil v')
    plt.title('Pupil')

    # Add a defocus term
    phase_map = rms_wfe * math.sqrt(3.0) * (2.0 * pupil_rho_2d**2 - 1.0)
    print('RMS WFE before pupil applied: {0:.1f} nm'.format(
        phase_map[clear_idx].std()))
    phase_map *= pupil
    print('RMS WFE after pupil applied: {0:.1f} nm'.format(
        phase_map[clear_idx].std()))
    rms = np.sqrt((phase_map**2).sum()) / np.size(phase_map[clear_idx])**0.5
    print('RMS WFE after pupil applied: {0:.1f} nm'.format(rms))

    # Plot the phase map
    plt.figure(2, figsize=(8, 6))
    plt.clf()
    plt.subplots_adjust(right=0.9)
    plt.imshow(phase_map,
               extent=[pupil_u[0], pupil_u[-1], pupil_v[0], pupil_v[-1]])
    plt.colorbar(label='RMS WFE (nm)')
    plt.axis('equal')
    plt.xlabel('Pupil u')
    plt.ylabel('Pupil v')
    plt.title('Defocus: {0:.0f} nm'.format(rms_wfe))

    out_file = test_dir + 'phase_map_defocus_{0:0.0f}'.format(rms_wfe)
    if circular_pupil:
        out_file += '_circ'
    out_file += '.fits'
    hdu_phase = fits.PrimaryHDU(phase_map)
    hdu_amp = fits.ImageHDU(pupil)
    hdu_u_2d = fits.ImageHDU(pupil_u_2d)
    hdu_v_2d = fits.ImageHDU(pupil_v_2d)

    hdu_list = fits.HDUList([hdu_phase, hdu_amp, hdu_u_2d, hdu_v_2d])

    hdu_list.writeto(out_file, overwrite=True)

    return
Пример #34
0
        number = 0
        dnaf = ndimage.gaussian_filter(image, 2)
        T = threshold  #MAKE SAME AS DAVE!
        # find connected components
        labeled, nr_objects = ndimage.label(
            dnaf > T)  # `dna[:,:,0]>T` for red-dot case
        #print "Number of objects is %d " % nr_objects
        #REMOVE SMALL/WRONG ONES....
        labeled = np.where(labeled > 0, 1, 0)
        labeled2 = labeled.copy()
        return labeled2


#------soapy------
path = '/home/tanagnos/Desktop/Ph.D/sim_dicer/slit_analysis/'
file = fits.getdata(path + 'slit_400ms.fits')
file = np.transpose(file, (0, 2, 1))
file = file[:, 88:-88, 140:-155]

wm = np.zeros((file.shape[0], file.shape[2]))
com = np.zeros((file.shape[0], file.shape[2]))

for i in range(file.shape[0]):
    print i
    a = file[i].copy()
    f_an = gen_analysis()
    labeled = f_an.identify_objects(a, np.mean(a))
    slit = np.where(labeled == 1, a, 0)
    #   check if slit file makes sense!!!!
    wm[i], com[i] = f_an.gauss_fit(slit)
Пример #35
0
import matplotlib.pyplot as plt

#input
argv = sys.argv

if len(argv) < 2:
    print('Usage: python waveform_wave.py <fitsfile> <adcchannel> <start_time>(%Y/%m/%d %H:%M:%S:%f(5-digit)) <duration(sec)>')
    quit()

filename = argv[1]
adcchannel=int(argv[2])
start_time = datetime.strptime(argv[3], '%Y/%m/%d %H:%M:%S:%f').timestamp()
duration = float(argv[4])

#read
evt = fits.getdata(filename, 1, header=True)
data = pd.DataFrame(np.array(evt[0]).byteswap().newbyteorder())
data = data[(data.boardIndexAndChannel.astype('int') == adcchannel) & (data.unixTime > start_time-0.02) & (data.unixTime < start_time+duration)].reset_index(drop=True)
header = evt[1]
data.to_csv('nae.csv')

#array
unix_time, pha_min = array( 'd' ), array('d')
for i in range(len(data.index)):
    unix_time.append(data.unixTime[i]-start_time)
    pha_min.append(data.phaMin[i]+2**15)
    
f1 = ROOT.TF1("func1","[0]-[1]*exp(-x/[2])",0.05,0.2)
f1.SetParameter(0, 2050)
f1.SetParameter(1, 200)
f1.SetParameter(2, 0.05)
Пример #36
0
        open(folder + 'sim_kwargs.pkl', 'rb'))
    lens_model_list, lens_light_model_list, source_model_list, point_source_list = model_lists
    lens_model_list[0] = 'PEMD'
    z_l, z_s, TD_distance, TD_true, TD_obs, TD_err_l = lens_info
    kwargs_lens_list, kwargs_lens_light_list, kwargs_source_list, kwargs_ps = para_s
    solver_type = 'PROFILE_SHEAR'

    kwargs_constraints = {
        'joint_source_with_point_source': [[0, 0]],
        'num_point_source_list': [len(kwargs_ps['ra_image'])],
        'solver_type':
        solver_type,  # 'PROFILE', 'PROFILE_SHEAR', 'ELLIPSE', 'CENTER'
        'Ddt_sampling': True
    }
    if glob.glob(folder + savename) == []:
        lens_data = pyfits.getdata(folder + 'Drz_QSO_image.fits')
        # len_std = pyfits.getdata(folder+'noise_map.fits')
        lens_mask = cr_mask(lens_data, 'normal_mask.reg')
        framesize = 155
        ct = int((len(lens_data) - framesize) / 2)
        lens_data = lens_data[ct:-ct, ct:-ct]
        # len_std = len_std[ct:-ct,ct:-ct]
        lens_mask = (1 - lens_mask)[ct:-ct, ct:-ct]
        plt.imshow(lens_data * lens_mask,
                   origin='lower',
                   cmap='gist_heat',
                   norm=LogNorm())
        plt.colorbar()
        exp_time = 599. * 2 * 8
        stdd = 0.0004  #Measurement from empty retion, 0.016*0.08**2/0.13**2/np.sqrt(8)
        # vgrad = np.gradient(lens_data)
Пример #37
0
def align_stars(indir,outdir,fluxtable=None,fflux=None,ncpu=6,keepfrac=0.7,
                maxhalfsize=np.inf,minhalfsize=0,flipx=False,fitmode='neg'):
    '''align the stars and cut the maximum possible square around them'''
    from scipy.ndimage.interpolation import shift
    if outdir == None:
        outdir=indir
    dic_fflux = {0:'sat',1:'flux'}
    print('\n****Cutting out {}-frames****\n'.format(dic_fflux[fflux]))
    pxhalf = 30 #px right and left of image. final size 2*pxhalf+1, planet at ~242px
    filetable = ascii.read(os.path.join(indir,'filetable_bkgrnd.csv'),delimiter=',')
    filetable = filetable[filetable['flux'] ==fflux]
    #only continue if there is at least one file
    if len(filetable) == 0:
        print('No files found for type {}'.format(dic_fflux[fflux]))
    else:
        filetable['PA'] = np.nan
        nimages = len(filetable)
        imdim = fits.getdata(filetable['fndewarped'][0]).shape[-1]
        #read in the images
        images = []
        full_images = []
        remove_ims = [] #which are too close to corner
        for ii,fn in enumerate(filetable['fndewarped']):
            data,head = fits.getdata(fn,header=True)
            full_images.append(data)
            if not len(data.shape) == 2:
                raise ValueError('Unknown data fromat at file %s'%fn)
            starx = int(filetable[ii]['roughx'])
            stary = int(filetable[ii]['roughy'])
    #        nims = data.shape[0]
            datacut = np.full([2*pxhalf+1,2*pxhalf+1],np.nan)
            data = data[max(0, stary-pxhalf) : min(stary+pxhalf+1, data.shape[1]),
                        max(0, starx-pxhalf) : min(starx+pxhalf+1, data.shape[0]),
                    ]
            filetable['PA'][ii] = get_pa(head,verbose=False)
            try:
                datacut[0:datacut.shape[0],0:datacut.shape[1]] = data
            except:
                print('Star too close to center. Ignoring image %s'%fn)
                remove_ims.append(ii)
                continue
            filetable['PA'][ii] = get_pa(head,verbose=False)
            images.append(datacut)
        full_images = np.array(full_images)
        filetable.remove_rows(remove_ims)
    #    filetable.write('filetable_removed_close_borders.csv',delimiter=',',overwrite=True)
        print('Ignored {} images due to wrongly placed star.'.format(len(remove_ims)))
        nimages = len(filetable)
        
        #/////////////////////////////////////////////////////////
        #median combine and first xreg. Keep orig images and shift
        #them only once at the end!
        #/////////////////////////////////////////////////////////
    
        print('register number getting medianed: ',len(images))
        print(images[0].size, images[0].shape, images[0].dtype)
    
        first_median=np.median(images, axis=0)
    
        pool=Pool(ncpu)
        get_shifts=subreg(first_median)
        first_shifts=pool.map(get_shifts,images)
        pool.close()
        shifted_images = []
        for hh in range(len(images)): 
            shifted_images.append(shift(images[hh], first_shifts[hh]))
    
     
        #/////////////////////////////////////////////////////////
        #keep only the best of images
        #/////////////////////////////////////////////////////////
        cross_reg=[]
        for im in shifted_images:
            cross_reg.append(np.sum((im-first_median)**2.))
            
        sorted_cross_reg=np.argsort(cross_reg)
        selected_cross_reg=sorted(sorted_cross_reg[0:int(keepfrac*len(images))])
        n_selected=len(selected_cross_reg)
        
        #/////////////////////////////////////////////////////////
        #median combine and second xreg
        #/////////////////////////////////////////////////////////
    
        images=np.array(images)[selected_cross_reg,:,:]
        shifted_images = np.array(shifted_images)[selected_cross_reg,:,:]
        second_median=np.median(shifted_images,axis=0)
    
        print('second subreg')
        pool=Pool(ncpu)
        get_shifts=subreg(second_median)
        #second shifts is the shift to get all at the same center. need to absolutely
        #center them later
        second_shifts=pool.map(get_shifts,images)
        pool.close()
        
        shifted_images =[]
        for hh in range(n_selected): 
            shifted_images.append(shift(images[hh], second_shifts[hh]))
        shifted_images = np.array(shifted_images)
        
        #get center for images. Move to pxhalf
        print('Finding the absolute center')
        yxoff = []
        smallsizehalf = 20
        fitsizehalf = 6
        nfitworked = 0
        for hh in range(n_selected):
    #        xycen.append( get_center(images[h,:,:]) )
            small_im = shifted_images[hh,pxhalf-smallsizehalf:pxhalf+smallsizehalf+1,
                                         pxhalf-smallsizehalf:pxhalf+smallsizehalf+1]
            yxroughcen = np.array(get_rough_center(small_im,n=3,neg = fitmode))
            yxroughcen += pxhalf -smallsizehalf
            fit_im = shifted_images[hh,yxroughcen[0]-fitsizehalf: 
                                       yxroughcen[0]+fitsizehalf+1,
                                       yxroughcen[1]-fitsizehalf:
                                       yxroughcen[1]+fitsizehalf+1]
            if fitmode == 'double':
                xx = range(fit_im.shape[1])
                yy = range(fit_im.shape[0])
                xx, yy = np.meshgrid(xx,yy)
                amp = np.max(fit_im) - np.min(fit_im)
                initial_guess = (fitsizehalf,fitsizehalf,#center all in x,y
                                 fitsizehalf,fitsizehalf,#sigpos
                                 fitsizehalf/2.,fitsizehalf/2.,#signeg
                                 amp, -amp/2.,0.,) #amppos,ampneg,offset
                try:
                    popt, pcov = opt.curve_fit(shared_center, (xx,yy), fit_im.flatten(),
                                               p0=initial_guess)
                    yxcen = popt[0:2][::-1]
                    nfitworked +=1
                    yxoff.append(yxroughcen + (yxcen-fitsizehalf) - pxhalf)
                except:
                    continue
                    
            elif fitmode in ['neg','pos']:
                yxcen = np.array(fit_gaussian(fit_im,fitsizehalf,fitsizehalf,
                                              neg=fitmode))
            else:
                raise ValueError('Fitmode {} unknown!'.format(fitmode))
            if fitmode != 'double':
                yxoff.append(yxroughcen + (yxcen -fitsizehalf) - pxhalf)
#        del shifted_images
#        del images
        if fitmode == 'double': 
            print('Fitting precise double gaussian used for absolute centering \
worked {} of {} times'.format(nfitworked,n_selected))
        yxcenshift = np.median(-np.array(yxoff),axis=0)
        print('General offset of images: %s' %yxcenshift)
        all_shifts  =second_shifts + yxcenshift
        
        #store the precise centers
        filetable['precisey'] = np.nan
        filetable['precisex'] = np.nan
        filetable['pxtoborder']=0
        for istar,tstar,thisshift in zip(selected_cross_reg,
                                     filetable[selected_cross_reg],
                                     all_shifts):
            filetable['precisey'][istar]  = tstar['roughy'] - thisshift[0]
            filetable['precisex'][istar]  = tstar['roughx'] - thisshift[1]
            filetable['pxtoborder'][istar]= int(np.floor(np.min(\
                                            (filetable['precisex'][istar],
                                             filetable['precisey'][istar],
                                             imdim - filetable['precisex'][istar],
                                             imdim - filetable['precisey'][istar]))-0.5))
        #////////////////////////////////////////////////////////
        #after we got the precise centers cut the maximum area
        #////////////////////////////////////////////////////////
        #determine the minimum distance to any border
        min_dist = np.min((filetable['pxtoborder'][selected_cross_reg]))
        if min_dist >= minhalfsize:
            if min_dist <= maxhalfsize:
                maxsize = min_dist
                print('Cutting images to size {}, which is the maximum without discarding \
images'.format(2*maxsize+1))
            else:
                maxsize = maxhalfsize
                print('Cutting images to selected size {}'.format(2*maxsize+1))
            selected_final = selected_cross_reg
            n_final = n_selected
        else:
            maxsize = np.min(filetable['pxtoborder'][filetable['pxtoborder'] >=minhalfsize])
            selected_border = np.where(filetable['pxtoborder'] > maxsize)[0]
            selected_final = [ii for ii in selected_cross_reg if ii in selected_border]
            n_final = len(selected_final)
            print('Cutting images to chosen size {}, which means {} more images are \
being discarded.'.format(2*maxsize+1,n_selected-n_final))
        final_ims = np.full((n_final,2*maxsize+1,2*maxsize+1),np.nan)
    
        #1. round the center 2.shift the image by residuum 3.cut floor of min dist
        for ii,tstar in zip(range(n_final),filetable[selected_final]):
            inty = int(np.round(tstar['precisey']))
            intx = int(np.round(tstar['precisex']))
            tempim = shift(full_images[selected_final[ii],:,:],
                           (inty - tstar['precisey'],intx-tstar['precisex']))
            final_ims[ii,:,:]=tempim[inty -maxsize: inty +maxsize+1,
                                     intx -maxsize: intx +maxsize+1]
        #/////////////////////////////////////////////////////////
        #save
        #/////////////////////////////////////////////////////////
        filetable['selected_final'] = 0
        filetable['selected_final'][selected_final] = 1
        if flipx:
            final_ims = final_ims[:,:,::-1]
            filetable['PA'] = -filetable['PA']
        if fflux == 1:
            fits.writeto(os.path.join(outdir,'median_unsat.fits'),np.median(final_ims,axis=0),
                         overwrite=True)
            fits.writeto(os.path.join(outdir,'cube_unsat.fits'),final_ims,overwrite=True)
            print('Saved median images to {}'.format(outdir))
            return filetable
        else:
            if fluxtable != None:
                filetable = vstack([fluxtable,filetable])
    
            fits.writeto(os.path.join(outdir,'center_im.fits'),final_ims,overwrite=True)
            fits.writeto(os.path.join(outdir,'rotnth.fits'),filetable['PA'][selected_final],
                         overwrite=True)
            ascii.write(filetable,output=os.path.join(indir,'filetable_stars.csv'),delimiter=',',
                        overwrite=True)
    
        print('Cut stars. SAVED CENTERED IMAGES AND ANGLES IN {}.\
Their shape is {}'.format(outdir,final_ims.shape))
def reduce_frame(raw, out_dir, flatCacher=None):
    """

    Arguments:
        raw: RawDataSet object
        out_dir: Data product root directory

    """

    # initialize per-object logger and check output directory
    if raw.isPair:
        init(raw.baseNames['AB'], out_dir)
    else:
        init(raw.baseNames['A'], out_dir)

    # if no flats in raw data set then fail
    if (len(raw.flatFns) < 1):
        logger.error("no flats for {}".format(raw.baseName))
        raise DrpException.DrpException('no flats')

    # create reduced data set
    reduced = ReducedDataSet.ReducedDataSet(raw)

    # read raw object image data into reduced data set object
    reduced.objImg['A'] = fits.getdata(raw.objAFn, ignore_missing_end=True)

    if raw.isPair:
        reduced.objImg['B'] = fits.getdata(raw.objBFn, ignore_missing_end=True)

    # put object summary info into per-object log
    log_start_summary(reduced)

    # Get fully processed flat in the form of a Flat object
    reduced.Flat = getFlat(raw, flatCacher)
    logger.info('using flat {}'.format(reduced.Flat.getBaseName()))

    # clean cosmic ray hits on object frame(s)
    if config.params['no_cosmic']:
        logger.info(
            "cosmic ray rejection on object frame inhibited by command line flag")

    else:
        logger.info('cosmic ray cleaning object frame A')
        reduced.objImg['A'], cosmicMethod = image_lib.cosmic_clean(
            reduced.objImg['A'])
        logger.debug('cosmic ray cleaning object frame A complete')
        if reduced.isPair:
            logger.info('cosmic ray cleaning object frame B')
            reduced.objImg['B'], _ = image_lib.cosmic_clean(reduced.objImg['B'])
            logger.debug('cosmic ray cleaning object frame B complete')
        reduced.cosmicCleaned = True
        logger.info(cosmicMethod)

    # if darks are available, combine them if there are more than one
    # and subtract from object frame(s) and flat
    process_darks(raw, reduced)

    # if AB pair then subtract B from A
    if reduced.isPair:
        reduced.objImg['AB'] = np.subtract(
            reduced.objImg['A'], reduced.objImg['B'])

    # reduce orders
    try:
        reduce_orders(reduced)
    except IOError as e:
        # might want to do something else here
        raise

    # find and apply wavelength solution
    imp.reload(wavelength_utils)
    if find_global_wavelength_soln(reduced) is True:
        apply_wavelength_soln(reduced)
    else:
        logger.info('not applying wavelength solution')
        for order in reduced.orders:
            order.waveScale = order.flatOrder.gratingEqWaveScale
            order.calMethod = 'grating equation'

    return(reduced)
#step 1.a: read in the observed data

targetfile = file_target.split(".fits")
find_obsdate_target = targetfile[0].split("_")
obsdate_target = find_obsdate_target[1]
filenumber_target = find_obsdate_target[2]

specfile_target = targetfile[0] + ".spec.fits"
snrfile_target = targetfile[0] + ".sn.fits"
vegafile = targetfile[0] + ".spec_a0v.fits"

specpath_target = obsdir + obsdate_target + '/' + specfile_target
snrpath_target = obsdir + obsdate_target + '/' + snrfile_target
vegapath_target = obsdir + obsdate_target + '/' + vegafile

spec_target = pyfits.getdata(specpath_target)
wlsol_target = pyfits.getdata(specpath_target, 1)
snr_target = pyfits.getdata(snrpath_target)
vega = pyfits.getdata(vegapath_target, 4)

filename_out = obsdir_out + targetfile[
    0] + "." + base_filename_out + ".spec_a0v.fits"  #spectra
filename_out_txt = obsdir_out + targetfile[
    0] + "." + base_filename_out + ".spec_a0v.txt"  #text file out on info
f = open(filename_out_txt, 'w')
f.write('Performing a telluric correction \n')
print 'Performing a telluric correction'
print specfile_target

dataheader_target = pyfits.getheader(specpath_target)
Пример #40
0
def get_sed_interpolated_cube(teff, met, logg):
    """
    Returns an interpolated sed model:

    Args:
        teff: effective temperature
        met: metallicity
        logg: surface gravity

    Return:
        wave, flux - tuple with numpy array with the interpolated sed

    Examples:
        >>> wave, flux = get_sed_interpolated_cube(5777, 0, 4.44)
    """

    data_grid = fits.getdata(DIR_SED + "ck04models/catalog.fits")
    teffv = []
    metv = []
    loggv = []
    for line in data_grid:
        teffi, meti, loggi = line['INDEX'].split(',')
        teffv.append(float(teffi))
        metv.append(float(meti))
        loggv.append(float(loggi))
    teff_u = np.unique(teffv)
    met_u = np.unique(metv)
    logg_u = np.unique(loggv)

    # Getting the points of the cube to interpolate
    teff_l = teff_u[np.where(teff_u < teff)[0][-1]]
    teff_h = teff_u[np.where(teff_u >= teff)[0][0]]
    met_l = met_u[np.where(met_u < met)[0][-1]]
    met_h = met_u[np.where(met_u >= met)[0][0]]
    logg_l = logg_u[np.where(logg_u < logg)[0][-1]]
    logg_h = logg_u[np.where(logg_u >= logg)[0][0]]
    print(teff_l, teff, teff_h)
    print(met_l, met, met_h)
    print(logg_l, logg, logg_h)

    list_cube = [(teff_l, met_l, logg_l), (teff_l, met_l, logg_h),
                 (teff_l, met_h, logg_l), (teff_l, met_h, logg_h),
                 (teff_h, met_l, logg_l), (teff_h, met_l, logg_h),
                 (teff_h, met_h, logg_l), (teff_h, met_h, logg_h)]

    # Reading the seds in the cube
    list_sed = []
    for t, m, l in list_cube:
        if m >= 0:
            sm = "p%02d" % int(abs(m) * 10.)
        else:
            sm = "m%02d" % int(abs(m) * 10.)
        sl = "%2d" % int(l * 10.)
        if t > 9999:
            st = "%5d" % int(t)
        else:
            st = "%4d" % int(t)
        file_name = DIR_SED + "ck04models/ck" + sm + "/ck" + sm + "_" + st + ".fits[g" + sl + "]"
        wave, flux = read_ck04models_numbers(file_name)
        if np.all(flux == 0):
            print("Problem with sed: ", file_name)
            raise ValueError('Sed in interpolation cube with zero flux values')
        list_sed.append((wave, flux))


# Interpolating the sed
    wave_i = []
    flux_i = []
    t = np.linspace(teff_l, teff_h, 2)
    m = np.linspace(met_l, met_h, 2)
    l = np.linspace(logg_l, logg_h, 2)
    V = np.zeros((2, 2, 2))
    pt = (teff, met, logg)

    for i in range(NWAVE):
        V[0, 0, 0] = list_sed[0][1][i]
        V[0, 0, 1] = list_sed[1][1][i]
        V[0, 1, 0] = list_sed[2][1][i]
        V[0, 1, 1] = list_sed[3][1][i]
        V[1, 0, 0] = list_sed[4][1][i]
        V[1, 0, 1] = list_sed[5][1][i]
        V[1, 1, 0] = list_sed[6][1][i]
        V[1, 1, 1] = list_sed[7][1][i]
        fn = RegularGridInterpolator((t, m, l), V)
        flux_i.append(fn(pt))
        wave_i.append(list_sed[0][0][i])
    wave_i = np.array(wave_i)
    flux_i = np.array(flux_i)

    return wave_i, flux_i
Пример #41
0
import matplotlib
#get_ipython().magic(u'matplotlib inline')
from matplotlib import pyplot
from glob import glob

#Pull all the blank files into a list

files = glob('Sub*.fits')
#Sub_SgrA_I_1160_Continuum_Slice20.fits.fits

#Determine the shape of each files in order to crate a blank.

shape = len(files), 10, 2000, 2000

#Make a blank datacube to stack all the data into it.

data = np.empty(shape=shape, dtype=np.float32)

#Stack all the data into a single cube

for i, f in enumerate(files):
    data[i, :, :, :] = fits.getdata(f)

#Take the median of the data files
median = np.nanmedian(data, axis=0)

#Save the new cube
cube = fits.open(files[0])
cube[0].data = median
cube.writeto('median_nosignal.fits')
Пример #42
0
import numpy as np
import astropy.io.fits as pyfits
import os
infos = open('info.list', 'r').readlines()

for line in infos:
    info = line.split('\n')[0]
    info = info.split(' ')
    satellites = info[3].split(';')
    satellites = [int(i) for i in satellites]
    path = info[0]
    cube = pyfits.getdata(path)
    for i in range(len(cube)):
        if i + 1 not in satellites:
            name = '/h/ninou/trainset/detrended_trainset/images/' + path.split(
                'fits')[0] + str(i) + '.fits'
            pyfits.writeto(name, cube[i], overwrite=True)
Пример #43
0
def make_stamps_t80share(names, coords, sizes, outdir=None, redo=False,
                     img_types=None, bands=None, tiles_dir=None):
    """  Produces stamps of objects in S-PLUS from a table of names,
    coordinates.

    Parameters
    ----------
    names: np.array
        Array containing the name/id of the objects.

    coords: astropy.coordinates.SkyCoord
        Coordinates of the objects.

    size: np.array
        Size of the stamps (in pixels)

    outdir: str
        Path to the output directory. If not given, stamps are saved in the
        current directory.

    redo: bool
        Option to rewrite stamp in case it already exists.

    img_types: list
        List containing the image types to be used in stamps. Default is [
        "swp', "swpweight"] to save both the images and the weight images
        with uncertainties.

    bands: list
        List of bands for the stamps. Defaults produces stamps for all
        filters in S-PLUS. Options are 'U', 'F378', 'F395', 'F410', 'F430', 'G',
        'F515', 'R', 'F660', 'I', 'F861', and 'Z'.


    """
    names = np.atleast_1d(names)
    sizes = np.atleast_1d(sizes)
    if len(sizes) == 1:
        sizes = np.full(len(names), sizes[0])
    sizes = sizes.astype(np.int)
    img_types = ["swp", "swpweight"] if img_types is None else img_types
    outdir = os.getcwd() if outdir is None else outdir
    tiles_dir = "/storage/share/all_coadded/" if tiles_dir is None else \
        tiles_dir
    header_keys = ["OBJECT", "FILTER", "EXPTIME", "GAIN", "TELESCOP",
                   "INSTRUME", "AIRMASS"]
    bands = context.bands if bands is None else bands
    ############################################################################
    # Selecting tiles from S-PLUS footprint
    fields = Table.read(os.path.join(context._path, "data",
                                     "all_tiles_final.csv"))
    field_coords =  SkyCoord(fields["RA"], fields["DEC"],
                                unit=(u.hourangle, u.degree))
    idx, d2d, d3d = field_coords.match_to_catalog_sky(coords)
    idx = np.where(d2d < 2 * u.degree)
    fields = fields[idx]
    ############################################################################
    # Producing stamps
    for field in tqdm(fields, desc="Fields"):
        field_coords = SkyCoord(field["RA"], field["DEC"],
                                unit=(u.hourangle, u.degree))
        field_name = field["NAME"]
        tile_dir = os.path.join(tiles_dir, field["NAME"])
        d2d = coords.separation(field_coords)
        idx = np.where(d2d < 2 * u.degree)[0]
        fnames = names[idx]
        fcoords = coords[idx]
        fsizes = sizes[idx]
        for img_type in tqdm(img_types, desc="Data types", leave=False,
                             position=1):
            for band in tqdm(bands, desc="Bands", leave=False, position=2):

                fitsfile = os.path.join(tile_dir, "{}_{}_{}.fits".format(
                                         field["NAME"], band, img_type))
                fzfile = fitsfile.replace(".fits", ".fz")
                if os.path.exists(fitsfile):
                    header = fits.getheader(fitsfile)
                    data = fits.getdata(fitsfile)
                elif os.path.exists(fzfile):
                    f = fits.open(fzfile)[1]
                    header = f.header
                    data = f.data
                else:
                    continue
                wcs = WCS(header)
                xys = wcs.all_world2pix(fcoords.ra, fcoords.dec, 1)
                for i, (name, size) in enumerate(tqdm(zip(fnames, fsizes),
                                     desc="Galaxies", leave=False, position=3)):
                    galdir = os.path.join(outdir, name)
                    output = os.path.join(galdir,
                             "{0}_{1}_{2}_{3}x{3}_{4}.fits".format(
                              name, field_name, band, size, img_type))
                    if os.path.exists(output) and not redo:
                        continue
                    try:
                        cutout = Cutout2D(data, position=fcoords[i],
                                  size=size * u.pixel, wcs=wcs)
                    except ValueError:
                        continue
                    if np.all(cutout.data == 0):
                        continue
                    hdu = fits.ImageHDU(cutout.data)
                    for key in header_keys:
                        if key in header:
                            hdu.header[key] = header[key]
                    hdu.header["TILE"] = hdu.header["OBJECT"]
                    hdu.header["OBJECT"] = name
                    if "HIERARCH OAJ PRO FWHMMEAN" in header:
                        hdu.header["PSFFWHM"] = header["HIERARCH OAJ " \
                                                       "PRO FWHMMEAN"]
                    hdu.header["X0TILE"] = (xys[0][i], "Location in tile")
                    hdu.header["Y0TILE"] = (xys[1][i], "Location in tile")
                    hdu.header.update(cutout.wcs.to_header())
                    hdulist = fits.HDUList([fits.PrimaryHDU(), hdu])
                    if not os.path.exists(galdir):
                        os.mkdir(galdir)
                    hdulist.writeto(output, overwrite=True)
Пример #44
0
def get_wavelength(wave_file):
    """
    Load the wavelenth array from file
    """
    return fits.getdata(wave_file)
Пример #45
0
    # get the R50 and Mr bin for each galaxy in the sample
    R50_bins = np.digitize(R50, bins=R50_bin_edges).clip(1, n_R50_bins)
    Mr_bins = np.digitize(Mr, bins=Mr_bin_edges).clip(1, n_Mr_bins)

    # convert R50 and Mr bin indices to indices of bins
    # in the combined rectangular grid
    rect_bins = (Mr_bins - 1) + n_Mr_bins * (R50_bins - 1)

    # get the voronoi bin for each galaxy in the sample
    rect_bin_vbins = rect_vbins_table['vbin']
    voronoi_bins = rect_bin_vbins[rect_bins]
    
    return voronoi_bins # Gives each galaxy a voronoi bin.

# Load the required data:
all_data = fits.getdata(source_directory+full_sample,1)
all_data = Table(all_data)
#spiral_data = select_data_arm_number(all_data) # keep spirals for binning.

R50, Mr = [all_data[c] for c in ['PETROR50_R_KPC','PETROMAG_MR']]
R50 = np.log10(R50) # binning performed in log10(R50)

rect_bins_table, vbins_table, rect_vbins_table = voronoi_binning(R50, Mr)
voronoi_bins = voronoi_assignment(all_data, rect_bins_table, rect_vbins_table)

# Now make some plots to show the voronoi bins:

# First check if the directory exists:
os.mkdir('figures/voronoi_binning/') if os.path.isdir('figures/voronoi_binning/') is False else None
  
# Bin count histogram:
import numpy as np
import matplotlib.pyplot as plt
from astropy.io import fits
from itertools import product

# fname = 'ap_cutoff3475_rap12_ran3'
fname = 'final_scan_cutoff3490_v3'
fitsname = 'A1_pic.fits'
bins_number = 25

data = np.loadtxt(fname, skiprows=1)  # Data structure 0)ypos 1)xpos 2)radius
Magerror = 2.000E-02
Magzpt = 2.530E+01

image_data = fits.getdata(fitsname, ext=0)
shapes = image_data.shape


def fit_galaxy(ypos, xpos, r_in, r_out=4):
    """ fit the galaxy to a circle of radius r_in and use the outer ring to calculate the local background """
    count_out = []
    count_in = []
    for j, i in product(
            np.arange(ypos - (r_out + r_in), ypos + r_out + r_in + 1),
            np.arange(xpos - (r_out + r_in),
                      xpos + 1 + r_out + r_in)):  # Create square
        if (j - ypos)**2 + (i - xpos)**2 <= r_in**2 and 0 <= j <= shapes[
                0] - 1 and 0 <= i <= shapes[
                    1] - 1:  # make sure points are in a circle
            j, i = [int(j), int(i)]
            if 3419 < image_data[j, i]:
Пример #47
0
    def create(self):
        """MAIN FUNCTION"""
        self.logger = logging.getLogger('mirage.wfss_simulator')
        self.logger.info('\n\nRunning wfss_simulator....\n')
        self.logger.info('using parameter files: ')
        for pfile in self.paramfiles:
            self.logger.info('{}'.format(pfile))

        # Loop over the yaml files and create
        # a direct seed image for each
        imseeds = []
        ptsrc_seeds = []
        galaxy_seeds = []
        extended_seeds = []
        for pfile in self.paramfiles:
            self.logger.info('Running catalog_seed_image for {}'.format(pfile))
            cat = catalog_seed_image.Catalog_seed(offline=self.offline)
            cat.paramfile = pfile
            cat.make_seed()
            imseeds.append(cat.seed_file)
            ptsrc_seeds.append(cat.ptsrc_seed_filename)
            galaxy_seeds.append(cat.galaxy_seed_filename)
            extended_seeds.append(cat.extended_seed_filename)

            # If Mirage is going to produce an hdf5 file of spectra,
            # then we only need a single direct seed image. Note that
            # find_param_info() has reordered the list such that the
            # wfss mode yaml file will be examined first.
            if self.create_continuum_seds:
                break

        # Create hdf5 file with spectra of all sources if requested.
        if self.create_continuum_seds:
            det_name = cat.params['Readout']['array_name'].split('_')[0]
            self.SED_file = spectra_from_catalog.make_all_spectra(self.catalog_files, input_spectra=self.SED_dict,
                                                                  input_spectra_file=self.SED_file,
                                                                  extrapolate_SED=self.extrapolate_SED,
                                                                  output_filename=self.final_SED_file,
                                                                  normalizing_mag_column=self.SED_normalizing_catalog_column,
                                                                  module=self.module, detector=det_name)

        # Location of the configuration files needed for dispersion
        loc = os.path.join(self.datadir, "{}/GRISM_{}/".format(self.instrument,
                                                               self.instrument.upper()))

        # Determine the name of the background file to use, as well as the
        # orders to disperse.
        if self.instrument == 'nircam':
            dmode = 'mod{}_{}'.format(self.module, self.dispersion_direction)
            if self.params['simSignals']['use_dateobs_for_background']:
                self.logger.info("Generating background spectrum for observation date: {}".format(self.params['Output']['date_obs']))
                back_wave, back_sig = backgrounds.day_of_year_background_spectrum(self.params['Telescope']['ra'],
                                                                                  self.params['Telescope']['dec'],
                                                                                  self.params['Output']['date_obs'])
            else:
                if isinstance(self.params['simSignals']['bkgdrate'], str):
                    if self.params['simSignals']['bkgdrate'].lower() in ['low', 'medium', 'high']:
                        self.logger.info("Generating background spectrum based on requested level of: {}".format(self.params['simSignals']['bkgdrate']))
                        back_wave, back_sig = backgrounds.low_med_high_background_spectrum(self.params, self.detector,
                                                                                           self.module)
                    else:
                        raise ValueError("ERROR: Unrecognized background rate. Must be one of 'low', 'medium', 'high'")
                else:
                    raise ValueError(("ERROR: WFSS background rates must be one of 'low', 'medium', 'high', "
                                      "or use_dateobs_for_background must be True "))

        elif self.instrument == 'niriss':
            dmode = 'GR150{}'.format(self.dispersion_direction)
            background_file = "{}_{}_medium_background.fits".format(self.crossing_filter.lower(),
                                                                    dmode.lower())

            if isinstance(self.params['simSignals']['bkgdrate'], str):
                if self.params['simSignals']['bkgdrate'].lower() in ['low', 'medium', 'high']:
                    siaf_instance = pysiaf.Siaf('niriss')[self.params['Readout']['array_name']]
                    vegazp, photflam, photfnu, pivot_wavelength = fluxcal_info(self.params['Reffiles']['flux_cal'], self.instrument,
                                                                               self.params['Readout']['filter'], self.params['Readout']['pupil'],
                                                                               self.detector, self.module)

                    if os.path.split(self.params['Reffiles']['filter_throughput'])[1] == 'placeholder.txt' or self.params['Reffiles']['filter_throughput'] == 'config':
                        filter_file = get_filter_throughput_file(self.instrument, 'CLEAR', self.params['Readout']['pupil'])
                    else:
                        filter_file = self.params['Reffiles']['filter_throughput']

                    scaling_factor = backgrounds.calculate_background(self.params['Telescope']['ra'],
                                                                      self.params['Telescope']['dec'],
                                                                      filter_file,
                                                                      self.params['simSignals']['use_dateobs_for_background'],
                                                                      MEAN_GAIN_VALUES['niriss'], siaf_instance,
                                                                      level=self.params['simSignals']['bkgdrate'])

                    # Having the grism in the beam reduces the throughput by 20%.
                    # Mulitply that into the scaling factor
                    scaling_factor *= NIRISS_GRISM_THROUGHPUT_FACTOR

                    # Translate from ADU/sec/pix to e-/sec/pix since that is
                    # what the disperser works with
                    scaling_factor *= MEAN_GAIN_VALUES['niriss']

                else:
                    raise ValueError("ERROR: Unrecognized background rate. String value must be one of 'low', 'medium', 'high'")
            elif np.isreal(self.params['simSignals']['bkgdrate']):
                # The bkgdrate entry in the input yaml file is described as
                # the desired signal in ADU/sec/pixel IN A DIRECT IMAGE
                # Since we want e-/sec/pixel here for the disperser, multiply
                # by the gain as well as the throughput factor for the grism.
                scaling_factor = self.params['simSignals']['bkgdrate'] * MEAN_GAIN_VALUES['niriss'] * NIRISS_GRISM_THROUGHPUT_FACTOR

        # Default to extracting all orders
        orders = None

        # Call the disperser separately for each type of object: point sources
        # galaxies, extended objects
        disp_seed = np.zeros((cat.ffsize, cat.ffsize))
        background_done = False
        for seed_files in [ptsrc_seeds, galaxy_seeds, extended_seeds]:
            if seed_files[0] is not None:
                dispersed_objtype_seed = Grism_seed(seed_files, self.crossing_filter,
                                                    dmode, config_path=loc, instrument=self.instrument.upper(),
                                                    extrapolate_SED=self.extrapolate_SED, SED_file=self.SED_file,
                                                    SBE_save=self.source_stamps_file)
                dispersed_objtype_seed.observation(orders=orders)
                dispersed_objtype_seed.disperse(orders=orders)
                # Only include the background in one of the object type seed images
                if not background_done:
                    if self.instrument == 'nircam':
                        background_image = dispersed_objtype_seed.disperse_background_1D([back_wave, back_sig])
                        dispersed_objtype_seed.finalize(Back=background_image, BackLevel=None)
                    else:
                        # BackLevel is used as such: background / max(background) * BackLevel
                        # So we need to either set BackLevel equal to the requested level
                        # NOT THE RATIO OF THAT TO MEDIUM, or we need to open the background
                        # file and multiply it by the ratio of the requested level to medium.
                        # The former isn't quite correct because it'll be scaling the maximum
                        # value in the image to "low" or "high", rather than the median
                        full_background_file = os.path.join(loc, background_file)
                        background_image = fits.getdata(full_background_file)

                        # Before scaling the background image by the scaling_factor
                        # we need to normalize by the sigma-clipped mean value. This is
                        # because the background files were produced and scaled to the
                        # ETC "medium" level at some arbirtrary pointing, but the
                        # "medium" level is pointing-dependent. Current background files
                        # are scaled such that the "medium" value from the ETC is the
                        # sigma-clipped mean value.
                        clip, lo, hi = sigmaclip(background_image, low=3, high=3)
                        background_mean = np.mean(clip)
                        background_image = background_image / background_mean * scaling_factor
                        dispersed_objtype_seed.finalize(Back=background_image, BackLevel=None)

                    background_done = True

                    # Save the background image to a fits file
                    hprime = fits.PrimaryHDU()
                    himg = fits.ImageHDU(background_image)
                    himg.header['EXTNAME'] = 'BACKGRND'
                    himg.header['UNITS'] = 'e/s'
                    hlist = fits.HDUList([hprime, himg])
                    hlist.writeto(self.background_image_filename, overwrite=True)
                else:
                    dispersed_objtype_seed.finalize()
                disp_seed += dispersed_objtype_seed.final

        # Disperser output is always full frame. Remove the signal from
        # the refrence pixels now since we know exactly where they are
        disp_seed[0:4, :] = 0.
        disp_seed[2044:, :] = 0.
        disp_seed[:, 0:4] = 0.
        disp_seed[:, 2044:] = 0.

        # Crop to the requested subarray if necessary
        if cat.params['Readout']['array_name'] not in self.fullframe_apertures:
            self.logger.info("Subarray bounds: {}".format(cat.subarray_bounds))
            self.logger.info("Dispersed seed image size: {}".format(disp_seed.shape))
            disp_seed = self.crop_to_subarray(disp_seed, cat.subarray_bounds)

        # Save the dispersed seed image if requested
        # Save in units of e/s, under the idea that this should be a
        # "perfect" noiseless view of the scene that does not depend on
        # detector effects, such as gain.
        if self.save_dispersed_seed:
            self.save_dispersed_seed_image(disp_seed)

        # Convert seed image to ADU/sec to be consistent
        # with other simulator outputs
        if self.instrument == 'niriss':
            gain = MEAN_GAIN_VALUES['niriss']
        elif self.instrument == 'nircam':
            gain = MEAN_GAIN_VALUES['nircam']['lw{}'.format(self.module.lower())]

        disp_seed /= gain

        # Update seed image header to reflect the
        # division by the gain
        cat.seedinfo['units'] = 'ADU/sec'

        # Prepare dark current exposure if
        # needed.
        if self.override_dark is None:
            d = dark_prep.DarkPrep(offline=self.offline)
            d.paramfile = self.wfss_yaml
            d.prepare()

            if len(d.dark_files) == 1:
                obslindark = d.prepDark
            else:
                obslindark = d.dark_files
        else:
            self.logger.info('\n\noverride_dark has been set. Skipping dark_prep.')
            if isinstance(self.override_dark, str):
                self.read_dark_product()
                obslindark = self.prepDark
            elif isinstance(self.override_dark, list):
                obslindark = self.override_dark

        # Combine into final observation
        obs = obs_generator.Observation(offline=self.offline)
        obs.linDark = obslindark
        obs.seed = disp_seed
        obs.segmap = cat.seed_segmap
        obs.seedheader = cat.seedinfo
        #obs.paramfile = y.outname
        obs.paramfile = self.wfss_yaml
        obs.create()
Пример #48
0
    plt.savefig(filepath)
    plt.clf()


def save_fits(data_frame, filepath):
    print('Saving', filepath)
    hdu = fits.PrimaryHDU(data_frame)
    hdul = fits.HDUList([hdu])
    hdul.writeto(filepath, overwrite=True)


#100ms

#Load Dark Frames
darkframes = [
    fits.getdata(f'raw_data/Albireo_V_Dark_100ms_{i:03}.FITS')
    for i in range(1, 6)
]
for g in darkframes:
    print(g.shape)
med_dark_frame = np.median(darkframes, axis=0)
print(med_dark_frame.shape)
save_image(med_dark_frame, 'dark_fields/100ms.png')

#V Band
for i in range(1, 11):
    df = fits.getdata(f'raw_data/Albireo_V_100ms_{i:03}.FITS')
    save_image(df, f'data_fields/Albireo_V_100ms_{i:03}.png')
    dark_sub = df - med_dark_frame
    save_image(dark_sub, f'subtracted_fields/Albireo_V_100ms_{i:03}.png')
    save_fits(dark_sub, f'subtracted_fields/Albireo_V_100ms_{i:03}.FITS')
def flattest(step_input_filename,
             dflatref_path=None,
             sfile_path=None,
             fflat_path=None,
             msa_shutter_conf=None,
             writefile=False,
             show_figs=True,
             save_figs=False,
             plot_name=None,
             threshold_diff=1.0e-14,
             debug=False):
    """
    This function does the WCS comparison from the world coordinates calculated using the
    compute_world_coordinates.py script with the ESA files. The function calls that script.

    Args:
        step_input_filename: str, name of the output fits file from the 2d_extract step (with full path)
        dflatref_path: str, path of where the D-flat reference fits files
        sfile_path: str, path of where the S-flat reference fits files
        fflat_path: str, path of where the F-flat reference fits files
        msa_shutter_conf: str, full path and name of the MSA configuration fits file
        writefile: boolean, if True writes the fits files of the calculated flat and difference images
        show_figs: boolean, whether to show plots or not
        save_figs: boolean, save the plots (the 3 plots can be saved or not independently with the function call)
        plot_name: string, desired name (if name is not given, the plot function will name the plot by
                    default)
        threshold_diff: float, threshold difference between pipeline output and ESA file
        debug: boolean, if true a series of print statements will show on-screen

    Returns:
        - 1 plot, if told to save and/or show.
        - median_diff: Boolean, True if smaller or equal to 1e-14

    """

    # get info from the rate file header
    det = fits.getval(step_input_filename, "DETECTOR", 0)
    print('step_input_filename=', step_input_filename)
    lamp = fits.getval(step_input_filename, "LAMP", 0)
    exptype = fits.getval(step_input_filename, "EXP_TYPE", 0)
    grat = fits.getval(step_input_filename, "GRATING", 0)
    filt = fits.getval(step_input_filename, "FILTER", 0)
    print("rate_file  -->     Grating:", grat, "   Filter:", filt, "   Lamp:",
          lamp)

    # read in the on-the-fly flat image
    flatfile = step_input_filename.replace("flat_field.fits", "intflat.fits")
    pipeflat = fits.getdata(flatfile, 1)

    # get the reference files
    # D-Flat
    dflat_ending = "f_01.03.fits"
    dfile = "_".join((dflatref_path, "nrs1", dflat_ending))
    if det == "NRS2":
        dfile = dfile.replace("nrs1", "nrs2")
    #print(" ***** path for d-flat: ", os.path.isfile(dfile))
    dfim = fits.getdata(dfile, 1)
    dfimdq = fits.getdata(dfile, 4)
    # need to flip/rotate the image into science orientation
    ns = np.shape(dfim)
    dfim = np.transpose(dfim, (
        0, 2,
        1))  # keep in mind that 0,1,2 = z,y,x in Python, whereas =x,y,z in IDL
    dfimdq = np.transpose(dfimdq)
    if det == "NRS2":
        dfim = reverse_cols(dfim)
        dfim = dfim[::-1]
        dfimdq = reverse_cols(dfimdq)
        dfimdq = dfimdq[::-1]
    naxis3 = fits.getval(dfile, "NAXIS3", 1)

    # get the wavelength values
    dfwave = np.array([])
    for i in range(naxis3):
        keyword = "_".join(("PFLAT", str(i + 1)))
        dfwave = np.append(dfwave, fits.getval(dfile, keyword, 1))
    dfrqe = fits.getdata(dfile, 2)

    # S-flat
    tsp = exptype.split("_")
    mode = tsp[1]
    if filt == "F070LP":
        flat = "FLAT4"
    elif filt == "F100LP":
        flat = "FLAT1"
    elif filt == "F170LP":
        flat = "FLAT2"
    elif filt == "F290LP":
        flat = "FLAT3"
    elif filt == "CLEAR":
        flat = "FLAT5"
    else:
        print("No filter correspondence. Exiting the program.")
        # This is the key argument for the assert pytest function
        msg = "Test skiped because there is no flat correspondance for the filter in the data: {}".format(
            filt)
        median_diff = "skip"
        return median_diff, msg

    sflat_ending = "f_01.01.fits"
    sfile = "_".join((sfile_path, grat, "OPAQUE", flat, "nrs1", sflat_ending))
    #print(" ***** path for s-flat: ", os.path.isfile(sfile))

    if debug:
        print("grat = ", grat)
        print("flat = ", flat)
        print("sfile used = ", sfile)

    if det == "NRS2":
        sfile = sfile.replace("nrs1", "nrs2")
    sfim = fits.getdata(sfile, 1)
    sfimdq = fits.getdata(sfile, 3)

    # need to flip/rotate image into science orientation
    sfim = np.transpose(sfim, (0, 2, 1))
    sfimdq = np.transpose(sfimdq, (0, 2, 1))
    if det == "NRS2":
        sfim = reverse_cols(sfim)
        sfim = sfim[::-1]
        sfimdq = reverse_cols(sfimdq)
        sfimdq = sfimdq[::-1]

    # get the wavelength values for sflat cube
    sfimwave = np.array([])
    naxis3 = fits.getval(sfile, "NAXIS3", 1)
    for i in range(0, naxis3):
        if i + 1 < 10:
            keyword = "".join(("FLAT_0", str(i + 1)))
        else:
            keyword = "".join(("FLAT_", str(i + 1)))
        #print ("S-flat -> using ", keyword)
        try:
            sfimwave = np.append(sfimwave, fits.getval(sfile, keyword, 1))
        except:
            KeyError
    sfv = fits.getdata(sfile, 5)

    # F-Flat
    #print ("F-flat -> using the following flats: ")
    fflat_ending = "_01.01.fits"
    ffile = fflat_path + "_" + filt + fflat_ending
    naxis3 = fits.getval(ffile, "NAXIS3", 1)
    #print(" ***** path for f-flat: ", os.path.isfile(ffile))
    ffsq1 = fits.getdata(ffile, 1)
    ffswaveq1 = np.array([])
    for i in range(0, naxis3):
        if i <= 9:
            suff = "".join(("0", str(i)))
        else:
            suff = str(i)
        t = ("FLAT", suff)
        keyword = "_".join(t)
        #print ("1. F-flat -> ", keyword)
        ffswaveq1 = np.append(ffswaveq1, fits.getval(ffile, keyword, 1))
    ffserrq1 = fits.getdata(ffile, 2)
    ffsdqq1 = fits.getdata(ffile, 3)
    ffvq1 = fits.getdata(ffile, 4)
    ffsq2 = fits.getdata(ffile, 1)
    ffswaveq2 = np.array([])
    for i in range(0, naxis3):
        if i <= 9:
            suff = "".join(("0", str(i)))
        else:
            suff = str(i)
        t = ("FLAT", suff)
        keyword = "_".join(t)
        #print ("2. F-flat -> using ", keyword)
        ffswaveq2 = np.append(ffswaveq2, fits.getval(ffile, keyword, 1))
    ffserrq2 = fits.getdata(ffile, 2)
    ffsdqq2 = fits.getdata(ffile, 3)
    ffvq2 = fits.getdata(ffile, 4)
    ffsq3 = fits.getdata(ffile, 1)
    ffswaveq3 = np.array([])
    for i in range(0, naxis3):
        if i <= 9:
            suff = "".join(("0", str(i)))
        else:
            suff = str(i)
        t = ("FLAT", suff)
        keyword = "_".join(t)
        #print ("3. F-flat -> using ", keyword)
        ffswaveq3 = np.append(ffswaveq3, fits.getval(ffile, keyword, 1))
    ffserrq3 = fits.getdata(ffile, 2)
    ffsdqq3 = fits.getdata(ffile, 3)
    ffvq3 = fits.getdata(ffile, 4)
    ffsq4 = fits.getdata(ffile, 1)
    ffswaveq4 = np.array([])
    for i in range(0, naxis3):
        if i <= 9:
            suff = "0" + str(i)
        else:
            suff = str(i)
        keyword = "FLAT_" + suff
        #print ("4. F-flat -> using ", keyword)
        ffswaveq4 = np.append(ffswaveq4, fits.getval(ffile, keyword, 1))
    ffserrq4 = fits.getdata(ffile, 2)
    ffsdqq4 = fits.getdata(ffile, 3)
    ffvq4 = fits.getdata(ffile, 4)

    # go through each pixel in the test data
    wc_file_name = step_input_filename.replace("_flat_field.fits",
                                               "_world_coordinates.fits")
    wc_hdulist = fits.open(wc_file_name)

    if writefile:
        # create the fits list to hold the image of pipeline-calculated difference values
        hdu0 = fits.PrimaryHDU()
        outfile = fits.HDUList()
        outfile.append(hdu0)

        # create the fits list to hold the image of pipeline-calculated difference values
        hdu0 = fits.PrimaryHDU()
        complfile = fits.HDUList()
        complfile.append(hdu0)

    # loop over the 2D subwindows and read in the WCS values
    for i, _ in enumerate(wc_hdulist):
        ext = i + 1
        if ext >= len(wc_hdulist):
            break
        slit_id = fits.getval(wc_file_name, "SLIT", ext)
        wc_data = fits.getdata(wc_file_name, ext)
        # get the wavelength
        wave = wc_data[0, :, :]

        # get the subwindow origin
        px0 = int(fits.getval(wc_file_name, "CRVAL1", ext)) - 1
        py0 = int(fits.getval(wc_file_name, "CRVAL2", ext)) - 1
        n_p = np.shape(wave)
        nw = n_p[0] * n_p[1]
        if debug:
            print("subwindow origin:   px0=", px0, "   py0=", py0)
            #print ("nw = ", nw)
        delf = np.zeros([nw]) + 999.0
        flatcor = np.zeros([nw]) + 999.0

        # get the slitlet info, needed for the F-Flat
        ext_shutter_info = "SHUTTER_INFO"  # this is extension 2 of the msa file, that has the shutter info
        slitlet_info = fits.getdata(msa_shutter_conf, ext_shutter_info)
        sltid = slitlet_info.field("SLITLET_ID")
        for j, s in enumerate(sltid):
            if s == int(slit_id):
                im = j
                # get the shutter with the source in it
                if slitlet_info.field("BACKGROUND")[im] == "N":
                    isrc = j
        quad = slitlet_info.field("SHUTTER_QUADRANT")[im]
        row = slitlet_info.field("SHUTTER_ROW")[im]
        col = slitlet_info.field("SHUTTER_COLUMN")[im]
        slitlet_id = repr(row) + "_" + repr(col)
        print('sltid=', sltid, "   quad=", quad, "   row=", row, "   col=",
              col, "   slitlet_id=", slitlet_id)

        # get the relevant F-flat reference data
        if quad == 1:
            ffsall = ffsq1
            ffsallwave = ffswaveq1
            ffsalldq = ffsdqq1
            ffv = ffvq1
        if quad == 2:
            ffsall = ffsq2
            ffsallwave = ffswaveq2
            ffsalldq = ffsdqq2
            ffv = ffvq2
        if quad == 3:
            ffsall = ffsq3
            ffsallwave = ffswaveq3
            ffsalldq = ffsdqq3
            ffv = ffvq3
        if quad == 4:
            ffsall = ffsq4
            ffsallwave = ffswaveq4
            ffsalldq = ffsdqq4
            ffv = ffvq4

        # loop through the pixels
        print("looping through the pixels, this may take a little time ... ")
        flat_wave = wave.flatten()
        wave_shape = np.shape(wave)
        for j in range(0, nw):
            if np.isfinite(flat_wave[j]):  # skip if wavelength is NaN
                # get the pixel indeces
                jwav = flat_wave[j]
                t = np.where(wave == jwav)
                pind = [t[0][0] + py0, t[1][0] + px0]
                if debug:
                    print('j, jwav, px0, py0 : ', j, jwav, px0, py0)
                    print('pind = ', pind)

                # get the pixel bandwidth **this needs to be modified for prism, since the dispersion is not linear!**
                delw = 0.0
                if (j != 0) and (int(
                    (j - 1) / n_p[1]) == int(j / n_p[1])) and (int(
                        (j + 1) / n_p[1]) == int(j / n_p[1])) and np.isfinite(
                            flat_wave[j + 1]) and np.isfinite(
                                flat_wave[j - 1]):
                    delw = 0.5 * (flat_wave[j + 1] - flat_wave[j - 1])
                if (j == 0) or not np.isfinite(flat_wave[j - 1]) or (int(
                    (j - 1) / n_p[1]) != int(j / n_p[1])):
                    delw = flat_wave[j + 1] - flat_wave[j]
                if (j == nw - 1) or not np.isfinite(flat_wave[j + 1]) or (int(
                    (j + 1) / n_p[1]) != int(j / n_p[1])):
                    delw = flat_wave[j] - flat_wave[j - 1]

                if debug:
                    #print ("(j, (j-1), n_p[1], (j-1)/n_p[1], (j+1), (j+1)/n_p[1])", j, (j-1), n_p[1], int((j-1)/n_p[1]), (j+1), int((j+1)/n_p[1]))
                    #print ("np.isfinite(flat_wave[j+1]), np.isfinite(flat_wave[j-1])", np.isfinite(flat_wave[j+1]), np.isfinite(flat_wave[j-1]))
                    #print ("flat_wave[j+1], flat_wave[j-1] : ", np.isfinite(flat_wave[j+1]), flat_wave[j+1], flat_wave[j-1])
                    print("delw = ", delw)

                # integrate over dflat fast vector
                dfrqe_wav = dfrqe.field("WAVELENGTH")
                dfrqe_rqe = dfrqe.field("RQE")
                iw = np.where((dfrqe_wav >= flat_wave[j] - delw / 2.0)
                              & (dfrqe_wav <= flat_wave[j] + delw / 2.0))
                int_tab = auxfunc.idl_tabulate(dfrqe_wav[iw[0]],
                                               dfrqe_rqe[iw[0]])
                first_dfrqe_wav, last_dfrqe_wav = dfrqe_wav[
                    iw[0]][0], dfrqe_wav[iw[0]][-1]
                dff = int_tab / (last_dfrqe_wav - first_dfrqe_wav)

                if debug:
                    #print ("np.shape(dfrqe_wav) : ", np.shape(dfrqe_wav))
                    #print ("np.shape(dfrqe_rqe) : ", np.shape(dfrqe_rqe))
                    #print ("dfimdq[pind[0]][pind[1]] : ", dfimdq[pind[0]][pind[1]])
                    #print ("np.shape(iw) =", np.shape(iw))
                    #print ("np.shape(dfrqe_wav[iw[0]]) = ", np.shape(dfrqe_wav[iw[0]]))
                    #print ("np.shape(dfrqe_rqe[iw[0]]) = ", np.shape(dfrqe_rqe[iw[0]]))
                    #print ("int_tab=", int_tab)
                    print("dff = ", dff)

                # interpolate over dflat cube
                iloc = auxfunc.idl_valuelocate(dfwave, flat_wave[j])[0]
                if dfwave[iloc] > flat_wave[j]:
                    iloc -= 1
                ibr = [iloc]
                if iloc != len(dfwave) - 1:
                    ibr.append(iloc + 1)
                # get the values in the z-array at indeces ibr, and x=pind[1] and y=pind[0]
                zz = dfim[:, pind[0], pind[1]][[ibr]]
                # now determine the length of the array with only the finite numbers
                zzwherenonan = np.where(np.isfinite(zz))
                kk = np.size(zzwherenonan)
                dfs = 1.0
                if (flat_wave[j] <= max(dfwave)) and (
                        flat_wave[j] >= min(dfwave)) and (kk == 2):
                    dfs = np.interp(flat_wave[j], dfwave[ibr],
                                    zz[zzwherenonan])
                # check DQ flags
                if dfimdq[pind[0]][pind[1]] != 0:
                    dfs = 1.0

                # integrate over S-flat fast vector
                sfv_wav = sfv.field("WAVELENGTH")
                sfv_dat = sfv.field("DATA")
                iw = np.where((sfv_wav >= flat_wave[j] - delw / 2.0)
                              & (sfv_wav <= flat_wave[j] + delw / 2.0))
                sff = 1.0
                if np.size(iw) != 0:
                    int_tab = auxfunc.idl_tabulate(sfv_wav[iw], sfv_dat[iw])
                    first_sfv_wav, last_sfv_wav = sfv_wav[iw[0]][0], sfv_wav[
                        iw[0]][-1]
                    sff = int_tab / (last_sfv_wav - first_sfv_wav)

                # interpolate s-flat cube
                iloc = auxfunc.idl_valuelocate(sfimwave, flat_wave[j])[0]
                ibr = [iloc]
                if iloc != len(sfimwave) - 1:
                    ibr.append(iloc + 1)
                # get the values in the z-array at indeces ibr, and x=pind[1] and y=pind[0]
                zz = sfim[:, pind[0], pind[1]][[ibr]]
                # now determine the length of the array with only the finite numbers
                zzwherenonan = np.where(np.isfinite(zz))
                kk = np.size(zzwherenonan)
                sfs = 1.0
                if (flat_wave[j] <= max(sfimwave)) and (
                        flat_wave[j] >= min(sfimwave)) and (kk == 2):
                    sfs = np.interp(flat_wave[j], sfimwave[ibr],
                                    zz[zzwherenonan])
                # check DQ flags
                kk = np.where(sfimdq[:, pind[0], pind[1]][[ibr]] == 0)
                if np.size(kk) != 2:
                    sfs = 1.0

                # integrate over f-flat fast vector
                # reference file wavelength range is from 0.6 to 5.206 microns, so need to force
                # solution to 1 for wavelengths outside that range
                ffv_wav = ffv.field("WAVELENGTH")
                ffv_dat = ffv.field("DATA")
                fff = 1.0
                if (flat_wave[j] - delw / 2.0 >=
                        0.6) and (flat_wave[j] + delw / 2.0 <= 5.206):
                    iw = np.where((ffv_wav >= flat_wave[j] - delw / 2.0)
                                  & (ffv_wav <= flat_wave[j] + delw / 2.0))
                    if np.size(iw) > 1:
                        int_tab = auxfunc.idl_tabulate(ffv_wav[iw],
                                                       ffv_dat[iw])
                        first_ffv_wav, last_ffv_wav = ffv_wav[
                            iw[0]][0], ffv_wav[iw[0]][-1]
                        fff = int_tab / (last_ffv_wav - first_ffv_wav)

                # interpolate over f-flat cube
                ffs = np.interp(flat_wave[j], ffsallwave, ffsall[:, col - 1,
                                                                 row - 1])
                flatcor[j] = dff * dfs * sff * sfs * fff * ffs

                if (pind[1] - px0 + 1 == 9999) and (pind[0] - py0 + 1 == 9999):
                    print("pind = ", pind)
                    print("flat_wave[j] = ", flat_wave[j])
                    print("dfs, dff = ", dfs, dff)
                    print("sfs, sff = ", sfs, sff)
                    # make plot
                    font = {  #'family' : 'normal',
                        'weight': 'normal',
                        'size': 16
                    }
                    matplotlib.rc('font', **font)
                    fig = plt.figure(1, figsize=(12, 10))
                    plt.subplots_adjust(hspace=.4)
                    ax = plt.subplot(111)
                    xmin = flat_wave[j] - 0.01
                    xmax = flat_wave[j] + 0.01
                    plt.xlim(xmin, xmax)
                    #plt.ylim(min(dfim[:, pind[0], pind[1]])*0.9, max(dfim[:, pind[0], pind[1]])*1.1)
                    plt.plot(dfwave,
                             dfim[:, pind[0], pind[1]],
                             linewidth=7,
                             marker='D',
                             color='k',
                             label="dflat_im")
                    plt.plot(flat_wave[j],
                             dfs,
                             linewidth=7,
                             marker='D',
                             color='r')
                    plt.plot(dfrqe_wav,
                             dfrqe_rqe,
                             linewidth=7,
                             marker='D',
                             c='k',
                             label="dflat_vec")
                    plt.plot(flat_wave[j],
                             dff,
                             linewidth=7,
                             marker='D',
                             color='r')
                    plt.plot(sfimwave,
                             sfim[:, pind[0], pind[1]],
                             linewidth=7,
                             marker='D',
                             color='k',
                             label="sflat_im")
                    plt.plot(flat_wave[j],
                             sfs,
                             linewidth=7,
                             marker='D',
                             color='r')
                    plt.plot(sfv_wav,
                             sfv_dat,
                             linewidth=7,
                             marker='D',
                             color='k',
                             label="sflat_vec")
                    plt.plot(flat_wave[j],
                             sff,
                             linewidth=7,
                             marker='D',
                             color='r')
                    # add legend
                    box = ax.get_position()
                    ax.set_position(
                        [box.x0, box.y0, box.width * 1.0, box.height])
                    ax.legend(loc='upper right', bbox_to_anchor=(1, 1))
                    plt.minorticks_on()
                    plt.tick_params(axis='both',
                                    which='both',
                                    bottom='on',
                                    top='on',
                                    right='on',
                                    direction='in',
                                    labelbottom='on')
                    plt.show()
                    print(
                        "Exiting the program. Unable to determine mean and std_dev. Test set to be skiped."
                    )
                    plt.close()
                    msg = "Unable to determine mean and std_dev. Test set to be skiped."
                    median_diff = "skip"
                    return median_diff, msg

                if debug:
                    print("dfs = ", dfs)
                    print("sff = ", sff)
                    print("sfs = ", sfs)
                    print("ffs = ", ffs)

                # Difference between pipeline and calculated values
                if ((pind[0] - py0) < np.shape(pipeflat)[0]) and (
                    (pind[1] - px0) < np.shape(pipeflat)[1]):
                    delf[j] = pipeflat[pind[0] - py0,
                                       pind[1] - px0] - flatcor[j]
                else:
                    break
                #print("pind[0], py0, pind[1], px0, flatcor[j] : ", pind[0], py0, pind[1], px0, flatcor[j])
                #print("(pind[0]-py0, pind[1]-px0) : ", pind[0]-py0, pind[1]-px0 )
                #print("(pind[0]-py0, pind[1]-px0) - flatcor[j] : ", (pind[0]-py0, pind[1]-px0) - flatcor[j])
                #print("np.shape(pipeflat) = ", np.shape(pipeflat))
                #print("delf[j] : ", delf[j])

                # Remove all pixels with values=1 (mainly inter-slit pixels) for statistics
                if pipeflat[pind[0] - py0, pind[1] - px0] == 1:
                    delf[j] = 999.0
                else:
                    flatcor[j] = 1.0  # no correction if no wavelength

        wc_hdulist.close()

        delfg = delf[np.where((delf != 999.0)
                              & (delf >= -0.1))]  # ignore outliers
        delfg_median, delfg_std = np.median(delfg), np.std(delfg)
        print("median, stdev in flat value differences: ", delfg_median,
              delfg_std)

        # This is the key argument for the assert pytest function
        median_diff = False
        if abs(delfg_median) <= float(threshold_diff):
            median_diff = True
        if median_diff:
            test_result = "PASSED"
        else:
            test_result = "FAILED"
        print(" *** Result of the test: ", test_result)

        # make histogram
        font = {  #'family' : 'normal',
            'weight': 'normal',
            'size': 16
        }
        matplotlib.rc('font', **font)
        alpha = 0.2
        fontsize = 15
        fig = plt.figure(1, figsize=(8, 6))
        plt.subplots_adjust(hspace=.4)
        ax = plt.subplot(111)
        t = (filt, grat, "   SLIT", slit_id)
        plt.title(" ".join(t))
        plt.xlabel("flat$_{pipe}$ - flat$_{calc}$")
        plt.ylabel("N")
        xmin = delfg_median - delfg_std * 5
        xmax = delfg_median + delfg_std * 5
        plt.xlim(xmin, xmax)
        x_median = "median = {:0.3}".format(delfg_median)
        x_stddev = "stddev = {:0.3}".format(delfg_std)
        ax.text(0.7, 0.9, x_median, transform=ax.transAxes, fontsize=fontsize)
        ax.text(0.7, 0.83, x_stddev, transform=ax.transAxes, fontsize=fontsize)
        plt.tick_params(axis='both',
                        which='both',
                        bottom='on',
                        top='on',
                        right='on',
                        direction='in',
                        labelbottom='on')
        binwidth = (xmax - xmin) / 40.
        _, _, _ = ax.hist(delfg,
                          bins=np.arange(xmin, xmax + binwidth, binwidth),
                          histtype='bar',
                          ec='k',
                          facecolor="red",
                          alpha=alpha)

        if save_figs:
            #if plot_name is None:
            file_basename = step_input_filename.replace(".fits", "")
            plot_name = file_basename + "_" + slitlet_id + "_MOS_flattest_histogram.pdf"
            plt.savefig(plot_name)
            print('\n Plot saved: ', plot_name)
        if show_figs:
            plt.show()
        plt.close()

        # create fits file to hold the calculated flat for each slit
        if writefile:
            # this is the file to hold the image of pipeline-calculated difference values
            outfile_ext = fits.ImageHDU(flatcor.reshape(wave_shape),
                                        name=slitlet_id)
            outfile.append(outfile_ext)

            # this is the file to hold the image of pipeline-calculated difference values
            complfile_ext = fits.ImageHDU(delf.reshape(wave_shape),
                                          name=slitlet_id)
            complfile.append(complfile_ext)

    if writefile:
        outfile_name = step_input_filename.replace("2d_flat_field.fits",
                                                   det + "_flat_calc.fits")
        complfile_name = step_input_filename.replace("2d_flat_field.fits",
                                                     det + "_flat_comp.fits")

        # this is the file to hold the image of pipeline-calculated difference values
        outfile.writeto(outfile_name, overwrite=True)

        # this is the file to hold the image of pipeline-calculated difference values
        complfile.writeto(complfile_name, overwrite=True)

        print("Fits file with flat values of each slice saved as: ")
        print(outfile_name)

        print("Fits file with image of pipeline - calculated saved as: ")
        print(complfile_name)

    print("Done.")
    msg = ""
    return median_diff, msg
Пример #50
0
 fig, ax = plt.subplots(figsize=(11, 8))
 labels = []
 for filt_i in [0, 2]:
     filt_l = ['F150W', 'F200W', 'F356W', 'F444W']
     filt = filt_l[filt_i]
     labels.append(filt)
     x_p = [1, 1, 2, 2]
     zp = zp_dic[filt]
     bias_list = []
     for seed in range(seed_range[0], seed_range[1]):
         folder = folder_suf + 'sim' + '_ID' + repr(
             ID) + '_' + filt + '_seed' + repr(seed)
         f = open(folder + "/sim_info.txt", "r")
         string = f.read()
         lines = string.split('\n')  # Split in to \n
         true_host = pyfits.getdata(folder + '/Drz_HOSTclean_image.fits')
         result, framesize = pickle.load(
             open(folder + '/{0}.pkl'.format(save_name), 'rb'))
         half_r = int(framesize / 2)
         peak = np.where(true_host == true_host.max())
         peak = [peak[0][0], peak[1][0]]
         true_host = true_host[peak[0] - half_r:peak[0] + half_r + 1,
                               peak[1] - half_r:peak[1] + half_r + 1]
         true_host_flux = true_host.sum()
         true_host_mag = -2.5 * np.log10(true_host_flux) + zp
         # print(true_host_mag)
         true_host_ratio = [
             lines[i] for i in range(len(lines))
             if 'host_flux_ratio' in lines[i]
         ][0].split('\t')[1]
         true_total_flux = true_host_flux / float(
def compare_pixscale():

    # fnames = [x for x in useful.fnames if "irac" not in x]
    fnames = []
    for instr in useful.instr_used_list[:-1]:
        for filt in useful.filters[instr][:1]:
            fnames.append("%s_%s" % (instr, filt))

    zorder = np.linspace(1, 10, len(fnames))[::-1]

    fig, ax = plt.subplots(1, 1, figsize=(10, 10), dpi=75)
    fig.subplots_adjust(left=0.1, right=0.98, top=0.98, bottom=0.07)
    divider = make_axes_locatable(ax)
    dax = divider.append_axes("bottom", size="40%", pad=0.15)

    for fname, zo in zip(fnames, zorder):

        print fname
        instr, filt = fname.split("_")
        fcolor = useful.fcolor_dict[instr][filt]
        inp_name = orig_fnames[instr][filt]["img"]
        pixscale = proj_plane_pixel_scales(WCS(
            fitsio.getheader(inp_name)))[0] * 3600.

        cat_orig = fitsio.getdata("catalog_rms2_orig_%s.matched.fits" % fname)
        cat_swrp = fitsio.getdata("catalog_rms2_swrp_%s.matched.fits" % fname)

        orig_zp = useful.orig_zp[instr][
            filt] if "supcam" not in fname else useful.orig_zp[instr][filt][1]
        cat_orig["FLUX_APER"] = cat_orig["FLUX_APER"] * calc_fscale(orig_zp)
        cat_orig["FLUXERR_APER"] = cat_orig["FLUXERR_APER"] * calc_fscale(
            orig_zp)

        cond = (cat_orig["FLUXERR_APER"][:, 2] >
                0) & (cat_swrp["FLUXERR_APER"][:, 2] > 0)
        cat_orig = cat_orig[cond]
        cat_swrp = cat_swrp[cond]

        sn_orig = cat_orig["FLUX_APER"][:, 2] / cat_orig["FLUXERR_APER"][:, 2]
        sn_swrp = cat_swrp["FLUX_APER"][:, 2] / cat_swrp["FLUXERR_APER"][:, 2]

        cond = (sn_orig >= 25.) & (sn_swrp >= 25.)
        sn_orig, sn_swrp = sn_orig[cond], sn_swrp[cond]

        ferr_orig = cat_orig["FLUXERR_APER"][:, 2][cond]
        ferr_swrp = cat_swrp["FLUXERR_APER"][:, 2][cond]

        ferr_med = np.nanmedian(ferr_swrp / ferr_orig)
        ferr_CIs = np.nanpercentile(ferr_swrp / ferr_orig, [16, 84])
        ferr_err = np.abs(ferr_CIs - ferr_med)[:, np.newaxis]

        sn_med = np.nanmedian(sn_swrp / sn_orig)
        sn_CIs = np.nanpercentile(sn_swrp / sn_orig, [16, 84])
        sn_err = np.abs(sn_CIs - sn_med)[:, np.newaxis]

        ax.scatter(pixscale,
                   ferr_med,
                   marker='o',
                   s=75,
                   facecolor=fcolor,
                   edgecolor='none',
                   label=fname.replace("_", ":"),
                   zorder=zo)
        ax.errorbar(pixscale,
                    ferr_med,
                    yerr=ferr_err,
                    marker='',
                    markersize=0,
                    color=fcolor,
                    capsize=5,
                    zorder=zo)

        dax.scatter(pixscale,
                    sn_med,
                    marker='o',
                    s=75,
                    facecolor=fcolor,
                    edgecolor='none',
                    zorder=zo)
        dax.errorbar(pixscale,
                     sn_med,
                     yerr=sn_err,
                     marker='',
                     markersize=0,
                     color=fcolor,
                     capsize=5,
                     zorder=zo)

        print fname, ferr_med, ferr_CIs, sn_med, sn_CIs

    ax.axhline(1, c='k', lw=1.2, ls='--')
    dax.axhline(1, c='k', lw=1.2, ls='--')
    ax.axvline(0.15, c='k', lw=1.2)
    dax.axvline(0.15, c='k', lw=1.2, label='SWARP\'d Pixscale')

    xx = np.arange(0.1, 0.3, 0.01)
    yy = xx / 0.15
    ax.plot(xx, 1. / yy, c='k', ls='-')
    dax.plot(xx, yy, c='k', ls='-')

    ax.set_ylim(0.6, 1.6)
    ax.set_xlim(0.11, 0.23)
    dax.set_ylim(0.75, 1.55)
    dax.set_xlim(0.11, 0.23)

    ax.set_ylabel("$\sigma_{f,SWARP} / \sigma_{f,Orig}$", fontsize=20)
    dax.set_ylabel("$SN_{SWARP} / SN_{Orig}$", fontsize=20)
    dax.set_xlabel("Original Pixscale [\"/px]", fontsize=20)

    _ = [label.set_visible(False) for label in ax.get_xticklabels()]
    _ = [
        label.set_fontsize(16) for label in ax.get_xticklabels() +
        ax.get_yticklabels() + dax.get_xticklabels() + dax.get_yticklabels()
    ]

    dax.legend(loc=2, fontsize=16, fancybox=True, frameon=False)
    leg = ax.legend(loc="upper center",
                    fontsize=14,
                    ncol=4,
                    fancybox=True,
                    frameon=False)
    for txt, hndl in zip(leg.get_texts(), leg.legendHandles):
        txt.set_color(hndl.get_facecolor()[0])
        txt.set_fontproperties(FontProperties(size=14, weight=600))
        hndl.set_visible(False)

    fig.savefig("errors_swarp_rms2.png")
def pathtest(step_input_filename,
             reffile,
             comparison_filename,
             writefile=True,
             show_figs=False,
             save_figs=True,
             threshold_diff=1.0e-7,
             debug=False):
    """
    This function calculates the difference between the pipeline and
    calculated pathloss values.
    Args:
        step_input_filename: str, full path name of sourcetype output fits file
        reffile: str, path to the pathloss FS reference fits file
        comparison_filename: str, path to pipeline-generated pathloss fits file
        writefile: boolean, if True writes the fits files of
                   calculated flat and difference images
        show_figs: boolean, whether to show plots or not
        save_figs: boolean, whether to save the plots or not
        plot_name: string, desired name. If not given, plot has default name
        threshold_diff: float, threshold difference between
                        pipeline output and comparison file
        debug: boolean, if true print statements will show on-screen
    Returns:
        - 1 plot, if told to save and/or show them.
        - median_diff: Boolean, True if smaller or equal to threshold.
        - log_msgs: list, all print statements are captured in this variable
    """

    log_msgs = []

    # start the timer
    pathtest_start_time = time.time()

    # get info from the rate file header
    det = fits.getval(step_input_filename, "DETECTOR", 0)
    msg = 'step_input_filename=' + step_input_filename
    print(msg)
    log_msgs.append(msg)
    exptype = fits.getval(step_input_filename, "EXP_TYPE", 0)
    grat = fits.getval(step_input_filename, "GRATING", 0)
    filt = fits.getval(step_input_filename, "FILTER", 0)

    msg = "pathloss file:  Grating:" + grat + " Filter:" + filt + " EXP_TYPE:" + exptype
    print(msg)
    log_msgs.append(msg)

    is_point_source = False

    # get the datamodel from the assign_wcs output file
    extract2d_wcs_file = step_input_filename.replace("_srctype.fits",
                                                     "_extract_2d.fits")
    model = datamodels.MultiSlitModel(extract2d_wcs_file)

    if writefile:
        # create the fits list to hold the calculated pathloss values for each slit
        hdu0 = fits.PrimaryHDU()
        outfile = fits.HDUList()
        outfile.append(hdu0)

        # create fits list to hold pipeline-calculated difference values
        hdu0 = fits.PrimaryHDU()
        compfile = fits.HDUList()
        compfile.append(hdu0)

    # list to determine if pytest is passed or not
    total_test_result = []

    print('Checking files exist & obtaining datamodels, takes a few mins...')
    if os.path.isfile(comparison_filename):
        if debug:
            print('Comparison file does exist.')
    else:
        result_msg = 'Comparison file does NOT exist. Skipping pathloss test.'
        log_msgs.append(result_msg)
        result = 'skip'
        return result, result_msg, log_msgs

    # get the comparison data model
    pathloss_pipe = datamodels.open(comparison_filename)
    # For the moment, the pipeline is using the wrong reference file for slit 400A1, so read file that
    # re-processed with the right reference file and open corresponding data model
    if os.path.isfile(
            step_input_filename.replace("srctype.fits",
                                        "pathloss_400A1.fits")):
        pathloss_400a1 = step_input_filename.replace("srctype.fits",
                                                     "pathloss_400A1.fits")
        pathloss_pipe_400a1 = datamodels.open(pathloss_400a1)
    if debug:
        print('got comparison datamodel!')

    if os.path.isfile(step_input_filename):
        if debug:
            print('Input file does exist.')
    else:
        result_msg = 'Input file does NOT exist. Skipping pathloss test.'
        log_msgs.append(result_msg)
        result = 'skip'
        return result, result_msg, log_msgs

    # get the input data model
    pl = datamodels.open(step_input_filename)
    if debug:
        print('got input datamodel!')

        msg = "Now looping through the slits. This may take a while... "
        print(msg)
        log_msgs.append(msg)

    sltname_list = ["S200A1", "S200A2", "S400A1", "S1600A1"]
    if det == "NRS2":
        sltname_list.append("S200B1")

    # but check if data is BOTS
    if fits.getval(step_input_filename, "EXP_TYPE", 0) == "NRS_BRIGHTOBJ":
        sltname_list = ["S1600A1"]

    # get all the science extensions
    ps_uni_ext_list = get_ps_uni_extensions(reffile, is_point_source)

    slit_val = 0
    for slit, pipe_slit in zip(pl.slits, pathloss_pipe.slits):
        slit_val = slit_val + 1
        slit_id = slit.name
        #if slit_id == 'S400A1':
        #    continue
        continue_pathloss_test = False
        if fits.getval(step_input_filename, "EXP_TYPE", 0) == "NRS_BRIGHTOBJ":
            slit = model
            continue_pathloss_test = True
        else:
            for slit_in_MultiSlitModel in model.slits:
                if slit_in_MultiSlitModel.name == slit_id:
                    slit = slit_in_MultiSlitModel
                    continue_pathloss_test = True
                    break

        if not continue_pathloss_test:
            continue
        else:
            try:
                if is_point_source is True:
                    ext = ps_uni_ext_list[0][slit_id]
                    print("Retrieved point source extension")
                elif is_point_source is False:
                    ext = ps_uni_ext_list[1][slit_id]
                    print("Retrieved extended source extension for {}".format(
                        slit_val))
            except KeyError:
                # gets index associted with slit if issue above
                ext = sltname_list.index(slit_id)
                print("Unable to retrieve extension.")

        wcs_obj = slit.meta.wcs

        # get the wavelength
        x, y = wcstools.grid_from_bounding_box(wcs_obj.bounding_box,
                                               step=(1, 1),
                                               center=True)
        ra, dec, wave = wcs_obj(x, y)
        wave_sci = wave * 10**(-6)  # microns --> meters

        # adjustments for S400A1
        if slit_id == "S400A1":
            if is_point_source:
                ext = 1
            else:
                ext = 3
                print(
                    "Got uniform source extension frome extra reference file")
            reffile2use = "jwst-nirspec-a400.plrf.fits"
        else:
            reffile2use = reffile

        print("Using reference file {}".format(reffile2use))
        plcor_ref_ext = fits.getdata(reffile2use, ext)
        hdul = fits.open(reffile2use)

        plcor_ref = hdul[1].data
        w = wcs.WCS(hdul[1].header)

        w1, y1, x1 = np.mgrid[:plcor_ref.shape[0], :plcor_ref.
                              shape[1], :plcor_ref.shape[2]]
        slitx_ref, slity_ref, wave_ref = w.all_pix2world(x1, y1, w1, 0)

        previous_sci = slit.data
        if slit_id == 'S400A1':
            if pathloss_pipe_400a1 is not None:
                for pipe_slit_400a1 in pathloss_pipe_400a1.slits:
                    if pipe_slit_400a1.name == "S400A1":
                        comp_sci = pipe_slit_400a1.data
                        pipe_correction = pipe_slit_400a1.pathloss
                        break
                    else:
                        continue
        else:
            comp_sci = pipe_slit.data
            pipe_correction = pipe_slit.pathloss
        if len(pipe_correction) == 0:
            print(
                "Pipeline pathloss correction in datamodel is empty. Skipping testing this slit."
            )
            continue

        # set up generals for all the plots
        font = {'weight': 'normal', 'size': 7}
        matplotlib.rc('font', **font)

        corr_vals = np.interp(wave_sci, wave_ref[:, 0, 0], plcor_ref_ext)
        corrected_array = previous_sci / corr_vals

        # Plots:
        step_input_filepath = step_input_filename.replace(".fits", "")
        # my correction values
        fig = plt.figure()
        plt.subplot(321)
        norm = ImageNormalize(corr_vals)
        plt.imshow(corr_vals,
                   norm=norm,
                   aspect=10.0,
                   origin='lower',
                   cmap='viridis')
        plt.xlabel('dispersion in pixels')
        plt.ylabel('y in pixels')
        plt.title('Calculated Correction')
        plt.colorbar()
        # pipe corerction
        plt.subplot(322)
        norm = ImageNormalize(pipe_correction)
        plt.imshow(pipe_correction,
                   norm=norm,
                   aspect=10.0,
                   origin='lower',
                   cmap='viridis')
        plt.xlabel('dispersion in pixels')
        plt.ylabel('y in pixels')
        plt.title('Pathloss Correction Comparison')
        plt.colorbar()
        # residuals (pipe correction - my correction)
        corr_residuals = pipe_correction - corr_vals
        plt.subplot(323)
        norm = ImageNormalize(corr_residuals)
        plt.imshow(corr_residuals,
                   norm=norm,
                   aspect=10.0,
                   origin='lower',
                   cmap='viridis')
        plt.xlabel('dispersion in pixels')
        plt.ylabel('y in pixels')
        plt.title('Correction residuals')
        plt.colorbar()
        # pipe science data before
        plt.subplot(324)
        norm = ImageNormalize(previous_sci)
        plt.imshow(previous_sci,
                   norm=norm,
                   aspect=10.0,
                   origin='lower',
                   cmap='viridis')
        plt.xlabel('dispersion in pixels')
        plt.ylabel('y in pixels')
        plt.title('Normalized pipeline science data before pathloss')
        plt.colorbar()
        # pipe science data after
        plt.subplot(325)
        norm = ImageNormalize(comp_sci)
        plt.imshow(comp_sci,
                   norm=norm,
                   aspect=10.0,
                   origin='lower',
                   cmap='viridis')
        plt.xlabel('dispersion in pixels')
        plt.ylabel('y in pixels')
        plt.title('Normalized pipeline science data after pathloss')
        plt.colorbar()
        # pipe science data after pathloss
        plt.subplot(326)
        norm = ImageNormalize(corrected_array)
        plt.imshow(corrected_array,
                   norm=norm,
                   aspect=10.0,
                   origin='lower',
                   cmap='viridis')
        plt.title('My science data after pathloss')
        plt.xlabel('dispersion in pixels')
        plt.ylabel('y in pixels')
        plt.colorbar()
        fig.suptitle(
            "FS UNI Pathloss Correction Testing for {}".format(slit_id))

        # add space between the subplots
        fig.subplots_adjust(wspace=0.9)

        # Show and/or save figures
        if show_figs:
            plt.show()
        if save_figs:
            plt_name = step_input_filepath + "_Pathloss_test_slit_" + str(
                slit_id) + "_FS_extended.png"
            plt.savefig(plt_name)
            print('Figure saved as: ', plt_name)
        elif not save_figs and not show_figs:
            msg = "Not making plots because both show_figs and save_figs were set to False."
            if debug:
                print(msg)
            log_msgs.append(msg)
        elif not save_figs:
            msg = "Not saving plots because save_figs was set to False."
            if debug:
                print(msg)
            log_msgs.append(msg)
        plt.clf()

        # create fits file to hold the calculated pathloss for each slit
        if writefile:
            msg = "Saving the fits files with the calculated pathloss for each slit..."
            print(msg)
            log_msgs.append(msg)

            # this is the file to hold the image of pipeline-calculated difference values
            outfile_ext = fits.ImageHDU(corr_vals, name=slit_id)
            outfile.append(outfile_ext)

            # this is the file to hold the image of pipeline-calculated difference values
            compfile_ext = fits.ImageHDU(corr_residuals, name=slit_id)
            compfile.append(compfile_ext)

        # Histogram
        ax = plt.subplot(212)
        plt.hist(corr_residuals[~np.isnan(corr_residuals)],
                 bins=100,
                 range=(-0.00000013, 0.00000013))
        plt.title('Residuals Histogram')
        plt.xlabel("Correction Value")
        plt.ylabel("Number of Occurences")
        nanind = np.isnan(corr_residuals)  # get all the nan indexes
        notnan = ~nanind  # get all the not-nan indexes
        arr_mean = np.mean(corr_residuals[notnan])
        arr_median = np.median(corr_residuals[notnan])
        arr_stddev = np.std(corr_residuals[notnan])
        plt.axvline(arr_mean, label="mean = %0.3e" % (arr_mean), color="g")
        plt.axvline(arr_median,
                    label="median = %0.3e" % (arr_median),
                    linestyle="-.",
                    color="b")
        str_arr_stddev = "stddev = {:0.3e}".format(arr_stddev)
        ax.text(0.73, 0.67, str_arr_stddev, transform=ax.transAxes, fontsize=7)
        plt.legend()
        plt.minorticks_on()

        # Show and/or save figures
        if save_figs:
            plt_name = step_input_filename.replace(
                ".fits", "") + "_Pathlosstest_slitlet_" + slit_id + ".png"
            plt.savefig(plt_name)
            print('Figure saved as: ', plt_name)
        if show_figs:
            plt.show()
        elif not save_figs and not show_figs:
            msg = "Not making plots because both show_figs and save_figs were set to False."
            if debug:
                print(msg)
            log_msgs.append(msg)
        elif not save_figs:
            msg = "Not saving plots because save_figs was set to False."
            if debug:
                print(msg)
            log_msgs.append(msg)

        plt.close()

        if corr_residuals[~np.isnan(corr_residuals)].size == 0:
            msg1 = " * Unable to calculate statistics because difference array has all values as NaN. " \
                   "Test will be set to FAILED."
            print(msg1)
            log_msgs.append(msg1)
            test_result = "FAILED"
        else:
            msg = "Calculating statistics... "
            print(msg)
            log_msgs.append(msg)
            corr_residuals = corr_residuals[
                np.where((corr_residuals != 999.0) & (corr_residuals < 0.1)
                         & (corr_residuals > -0.1))]  # ignore outliers
            if corr_residuals.size == 0:
                msg1 = " * Unable to calculate statistics because difference array has all outlier values. Test " \
                       "will be set to FAILED."
                if debug:
                    print(msg1)
                log_msgs.append(msg1)
                test_result = "FAILED"
            else:
                stats_and_strings = auxfunc.print_stats(corr_residuals,
                                                        "Difference",
                                                        float(threshold_diff),
                                                        absolute=True)
                stats, stats_print_strings = stats_and_strings
                corr_residuals_mean, corr_residuals_median, corr_residuals_std = stats
                for msg in stats_print_strings:
                    log_msgs.append(msg)

                # This is the key argument for the assert pytest function
                median_diff = False
                if abs(corr_residuals_median) <= float(threshold_diff):
                    median_diff = True
                if median_diff:
                    test_result = "PASSED"
                else:
                    test_result = "FAILED"

                msg = " *** Result of the test: " + test_result + "\n"
                if debug:
                    print(msg)
                log_msgs.append(msg)
                total_test_result.append(test_result)

    if writefile:
        outfile_name = step_input_filename.replace("srctype",
                                                   det + "_calcuated_pathloss")
        compfile_name = step_input_filename.replace(
            "srctype", det + "_comparison_pathloss")

        # create the fits list to hold the calculated pathloss values for each slit
        outfile.writeto(outfile_name, overwrite=True)

        # this is the file to hold the image of pipeline-calculated difference values
        compfile.writeto(compfile_name, overwrite=True)

        msg = "\nFits file with calculated pathloss values of each slit saved as: "
        print(msg)
        log_msgs.append(msg)
        print(outfile_name)
        log_msgs.append(outfile_name)

        msg = "Fits file with comparison (pipeline pathloss - calculated pathloss) saved as: "
        print(msg)
        log_msgs.append(msg)
        print(compfile_name)
        log_msgs.append(compfile_name)

    # If all tests passed then pytest will be marked as PASSED, else it will be FAILED
    FINAL_TEST_RESULT = False
    for t in total_test_result:
        if t == "FAILED":
            FINAL_TEST_RESULT = False
            break
        else:
            FINAL_TEST_RESULT = True

    if FINAL_TEST_RESULT:
        msg = "\n *** Final result for path_loss test will be reported as PASSED *** \n"
        print(msg)
        log_msgs.append(msg)
        result_msg = "All slits PASSED path_loss test."
    else:
        msg = "\n *** Final result for path_loss test will be reported as FAILED *** \n"
        print(msg)
        log_msgs.append(msg)
        result_msg = "One or more slits FAILED path_loss test."

    # end the timer
    pathloss_end_time = time.time() - pathtest_start_time
    if pathloss_end_time > 60.0:
        pathloss_end_time = pathloss_end_time / 60.0  # in minutes
        pathloss_tot_time = "* Script FS_UNI.py took ", repr(
            pathloss_end_time) + " minutes to finish."
        if pathloss_end_time > 60.0:
            pathloss_end_time = pathloss_end_time / 60.  # in hours
            pathloss_tot_time = "* Script FS_UNI.py took ", repr(
                pathloss_end_time) + " hours to finish."
    else:
        pathloss_tot_time = "* Script FS_UNI.py took ", repr(
            pathloss_end_time) + " seconds to finish."
    print(pathloss_tot_time)
    log_msgs.append(pathloss_tot_time)

    return FINAL_TEST_RESULT, result_msg, log_msgs
Пример #53
0
def img_normalization(img):
    '''
    图像归一化
    '''
    img = img.astype(np.float32)
    img[np.isnan(img)] = 0
    if np.amax(img) == 0:
        return img
    else:
        img -= np.amin(img)
        img = img / (np.amax(img) - np.amin(img))
        img *= 255
        return img.astype(np.uint8)


image_data = fits.getdata('./target/fpC-001035-g2-0011.fit')
# plt.imshow(image_data, cmap='gray')
# plt.colorbar()


def histeq(img, nbr_bins=65536):
    """ Histogram equalization of a grayscale image. """
    # 获取直方图p(r)
    imhist, bins = histogram(img.flatten(), nbr_bins, normed=True)

    # 获取T(r)
    cdf = imhist.cumsum()  # cumulative distribution function
    cdf = 65535 * cdf / cdf[-1]

    # 获取s,并用s替换原始图像对应的灰度值
    result = interp(img.flatten(), bins[:-1], cdf)
Пример #54
0
def dark_sub(darkFrame, frame, scale):
    darkData = fits.getdata(darkFrame)
    fData = fits.getdata(frame)
    dataOut = fData - (darkData // scale)  # subtracts bias from frame
    return dataOut
Пример #55
0
# import autolens_utils.autolens_tracer_utils as autolens_tracer_utils

lens_redshift = 0.5
source_redshift = 2.0

n_pixels = 100
pixel_scale = 0.05

n_channels = 32

frequencies = casa_utils.generate_frequencies(central_frequency=260.0 *
                                              units.GHz,
                                              n_channels=n_channels,
                                              bandwidth=2.0 * units.GHz)

uv = fits.getdata("./uv.fits")

uv_wavelengths = casa_utils.convert_uv_coords_from_meters_to_wavelengths(
    uv=uv, frequencies=frequencies)

# NOTE: For this tutorial we channel-average the uv_wavelengths.
uv_wavelengths = np.average(a=uv_wavelengths, axis=0)

antennas = fits.getdata("./antennas.fits")
if not (uv_wavelengths.shape[0] == antennas.shape[0]):
    raise ValueError("...")

antennas_unique = np.unique(antennas)

if os.path.isfile("./phase_errors.fits"):
    phase_errors = fits.getdata(filename="./phase_errors.fits")
Пример #56
0
def get_all_TESS_data(object_name,
                      radius=".02 deg",
                      get_PDC=True,
                      get_all=False):
    """ 
    Given a planet name, this function returns a dictionary of times, fluxes and 
    errors on fluxes in a juliet-friendly format for usage. The function does an 
    astroquery to MAST using a default radius of .02 deg around the target name. If get_PDC is True, 
    this function returns PDC fluxes. False returns SAP fluxes. If get_all is true, this function 
    returns a dictionary that in addition to the times, fluxes and errors, returns other 
    metadata.
    """
    if not has_astroquery:
        print(
            "Error on using juliet function `get_all_TESS_data`: astroquery.mast not found."
        )
    obs_table = Observations.query_object(object_name, radius=radius)
    out_dict = {}
    times = {}
    fluxes = {}
    fluxes_errors = {}
    for i in range(len(obs_table['dataURL'])):
        if 's_lc.fits' in obs_table['dataURL'][i]:
            fname = obs_table['dataURL'][i].split('/')[-1]
            metadata = fname.split('-')
            if len(metadata) == 5:
                # Extract metadata:
                sector = np.int(metadata[1].split('s')[-1])
                ticid = np.int(metadata[2])
                # Download files:
                data_products = Observations.get_product_list(obs_table[i])
                manifest = Observations.download_products(data_products)
                # Read lightcurve file:
                d, h = fits.getdata('mastDownload/TESS/' + fname[:-8] + '/' +
                                    fname,
                                    header=True)
                t,fs,fserr,f,ferr = d['TIME']+h['BJDREFI'],d['SAP_FLUX'],d['SAP_FLUX_ERR'],\
                                    d['PDCSAP_FLUX'],d['PDCSAP_FLUX_ERR']
                idx_goodpdc = np.where((f != 0.) & (~np.isnan(f)))[0]
                idx_goodsap = np.where((fs != 0.) & (~np.isnan(fs)))[0]
                # Save to output dictionary:
                if 'TIC' not in out_dict.keys():
                    out_dict['TIC'] = ticid
                out_dict[sector] = {}
                out_dict[sector]['TIME_PDCSAP_FLUX'] = t[idx_goodpdc]
                out_dict[sector]['PDCSAP_FLUX'] = f[idx_goodpdc]
                out_dict[sector]['PDCSAP_FLUX_ERR'] = ferr[idx_goodpdc]
                out_dict[sector]['TIME_SAP_FLUX'] = t[idx_goodsap]
                out_dict[sector]['SAP_FLUX'] = fs[idx_goodsap]
                out_dict[sector]['SAP_FLUX_ERR'] = fserr[idx_goodsap]
                if get_PDC:
                    times['TESS' + str(sector)] = t[idx_goodpdc]
                    med = np.median(f[idx_goodpdc])
                    fluxes['TESS' + str(sector)] = f[idx_goodpdc] / med
                    fluxes_errors['TESS' +
                                  str(sector)] = ferr[idx_goodpdc] / med
                else:
                    times['TESS' + str(sector)] = t[idx_goodsap]
                    med = np.median(fs[idx_goodsap])
                    fluxes['TESS' + str(sector)] = fs[idx_goodsap] / med
                    fluxes_errors['TESS' +
                                  str(sector)] = fserr[idx_goodsap] / med
                # Remove downloaded folder:
                os.system('rm -r mastDownload')
    if get_all:
        return out_dict, times, fluxes, fluxes_errors
    else:
        return times, fluxes, fluxes_errors
Пример #57
0
user = '******'
mike_hacking_wave_soln = False
overwrite_extension_table = True

files = input_locations.Files(user=user, mode=mode, cam=cam)
# instantiate the ghostsim arm
ghost = polyfit.ghost.GhostArm(cam, mode=mode)

if model == 'W':
    #arclinefile = lookups_path + '/' + lookups.line_list
    arclinefile = files.arclinefile

    # Define the files in use (NB xmod.txt and wavemod.txt should be
    # correct)
    arc_file = files.arc_image_file
    arc_data = pyfits.getdata(arc_file)
    if len(arc_data) == 0:
        arc_data = pyfits.getdata(arc_file, 1)
    thar_spec = files.thar_spectrum(arclinefile)
    #import pdb; pdb.set_trace()

flat_file = files.flat_image_file
print(flat_file)  #DEBUG

# Define the files in use (NB xmod.txt and wavemod.txt should be correct)
flat_data = pyfits.getdata(flat_file)
if len(flat_data) == 0:
    flat_data = pyfits.getdata(flat_file, 1)

# Load all the parameter files, even if they are dummy
try:
Пример #58
0
def main_mpi(args, comm=None):

    log = get_logger()

    psf_file = args.psf
    input_file = args.input

    # these parameters are interpreted as the *global* spec range,
    # to be divided among processes.
    specmin = args.specmin
    nspec = args.nspec

    #- Load input files and broadcast

    # FIXME: after we have fixed the serialization
    # of the PSF, read and broadcast here, to reduce
    # disk contention.

    img = None
    if comm is None:
        img = io.read_image(input_file)
    else:
        if comm.rank == 0:
            img = io.read_image(input_file)
        img = comm.bcast(img, root=0)

    psf = load_psf(psf_file)

    # get spectral range

    if nspec is None:
        nspec = psf.nspec
    specmax = specmin + nspec

    camera = img.meta['CAMERA'].lower()     #- b0, r1, .. z9
    spectrograph = int(camera[1])
    fibermin = spectrograph * psf.nspec + specmin

    if args.fibermap is not None:
        fibermap = io.read_fibermap(args.fibermap)
        fibermap = fibermap[fibermin:fibermin+nspec]
        fibers = fibermap['FIBER']
    else:
        fibermap = None
        fibers = np.arange(fibermin, fibermin+nspec, dtype='i4')

    #- Get wavelength grid from options

    if args.wavelength is not None:
        wstart, wstop, dw = [float(tmp) for tmp in args.wavelength.split(',')]
    else:
        wstart = np.ceil(psf.wmin_all)
        wstop = np.floor(psf.wmax_all)
        dw = 0.5

    wave = np.arange(wstart, wstop+dw/2.0, dw)
    nwave = len(wave)

    #- Confirm that this PSF covers these wavelengths for these spectra

    psf_wavemin = np.max(psf.wavelength(list(range(specmin, specmax)), y=0))
    psf_wavemax = np.min(psf.wavelength(list(range(specmin, specmax)), y=psf.npix_y-1))
    if psf_wavemin > wstart:
        raise ValueError('Start wavelength {:.2f} < min wavelength {:.2f} for these fibers'.format(wstart, psf_wavemin))
    if psf_wavemax < wstop:
        raise ValueError('Stop wavelength {:.2f} > max wavelength {:.2f} for these fibers'.format(wstop, psf_wavemax))

    # Now we divide our spectra into bundles

    bundlesize = args.bundlesize
    checkbundles = set()
    checkbundles.update(np.floor_divide(np.arange(specmin, specmax), bundlesize*np.ones(nspec)).astype(int))
    bundles = sorted(checkbundles)
    nbundle = len(bundles)

    bspecmin = {}
    bnspec = {}
    for b in bundles:
        if specmin > b * bundlesize:
            bspecmin[b] = specmin
        else:
            bspecmin[b] = b * bundlesize
        if (b+1) * bundlesize > specmax:
            bnspec[b] = specmax - bspecmin[b]
        else:
            bnspec[b] = bundlesize

    # Now we assign bundles to processes

    nproc = 1
    rank = 0
    if comm is not None:
        nproc = comm.size
        rank = comm.rank

    mynbundle = int(nbundle // nproc)
    myfirstbundle = 0
    leftover = nbundle % nproc
    if rank < leftover:
        mynbundle += 1
        myfirstbundle = rank * mynbundle
    else:
        myfirstbundle = ((mynbundle + 1) * leftover) + (mynbundle * (rank - leftover))

    if rank == 0:
        #- Print parameters
        log.info("extract:  input = {}".format(input_file))
        log.info("extract:  psf = {}".format(psf_file))
        log.info("extract:  specmin = {}".format(specmin))
        log.info("extract:  nspec = {}".format(nspec))
        log.info("extract:  wavelength = {},{},{}".format(wstart, wstop, dw))
        log.info("extract:  nwavestep = {}".format(args.nwavestep))
        log.info("extract:  regularize = {}".format(args.regularize))

    # get the root output file

    outpat = re.compile(r'(.*)\.fits')
    outmat = outpat.match(args.output)
    if outmat is None:
        raise RuntimeError("extraction output file should have .fits extension")
    outroot = outmat.group(1)

    outdir = os.path.normpath(os.path.dirname(outroot))
    if rank == 0:
        if not os.path.isdir(outdir):
            os.makedirs(outdir)

    if comm is not None:
        comm.barrier()

    failcount = 0

    for b in range(myfirstbundle, myfirstbundle+mynbundle):
        outbundle = "{}_{:02d}.fits".format(outroot, b)
        outmodel = "{}_model_{:02d}.fits".format(outroot, b)

        log.info('extract:  Rank {} starting {} spectra {}:{} at {}'.format(
            rank, os.path.basename(input_file),
            bspecmin[b], bspecmin[b]+bnspec[b], time.asctime(),
            ) )
        sys.stdout.flush()

        #- The actual extraction
        try:
            results = ex2d(img.pix, img.ivar*(img.mask==0), psf, bspecmin[b],
                bnspec[b], wave, regularize=args.regularize, ndecorr=True,
                bundlesize=bundlesize, wavesize=args.nwavestep, verbose=args.verbose,
                full_output=True)

            flux = results['flux']
            ivar = results['ivar']
            Rdata = results['resolution_data']
            chi2pix = results['chi2pix']

            mask = np.zeros(flux.shape, dtype=np.uint32)
            mask[results['pixmask_fraction']>0.5] |= specmask.SOMEBADPIX
            mask[results['pixmask_fraction']==1.0] |= specmask.ALLBADPIX
            mask[chi2pix>100.0] |= specmask.BAD2DFIT

            #- Augment input image header for output
            img.meta['NSPEC']   = (nspec, 'Number of spectra')
            img.meta['WAVEMIN'] = (wstart, 'First wavelength [Angstroms]')
            img.meta['WAVEMAX'] = (wstop, 'Last wavelength [Angstroms]')
            img.meta['WAVESTEP']= (dw, 'Wavelength step size [Angstroms]')
            img.meta['SPECTER'] = (specter.__version__, 'https://github.com/desihub/specter')
            img.meta['IN_PSF']  = (_trim(psf_file), 'Input spectral PSF')
            img.meta['IN_IMG']  = (_trim(input_file), 'Input image')

            if fibermap is not None:
                bfibermap = fibermap[bspecmin[b]-specmin:bspecmin[b]+bnspec[b]-specmin]
            else:
                bfibermap = None

            bfibers = fibers[bspecmin[b]-specmin:bspecmin[b]+bnspec[b]-specmin]

            frame = Frame(wave, flux, ivar, mask=mask, resolution_data=Rdata,
                        fibers=bfibers, meta=img.meta, fibermap=bfibermap,
                        chi2pix=chi2pix)

            #- Write output
            io.write_frame(outbundle, frame, units='photon/bin')

            if args.model is not None:
                from astropy.io import fits
                fits.writeto(outmodel, results['modelimage'], header=frame.meta)

            log.info('extract:  Done {} spectra {}:{} at {}'.format(os.path.basename(input_file),
                bspecmin[b], bspecmin[b]+bnspec[b], time.asctime()))
            sys.stdout.flush()
        except:
            # Log the error and increment the number of failures
            log.error("extract:  FAILED bundle {}, spectrum range {}:{}".format(b, bspecmin[b], bspecmin[b]+bnspec[b]))
            exc_type, exc_value, exc_traceback = sys.exc_info()
            lines = traceback.format_exception(exc_type, exc_value, exc_traceback)
            log.error(''.join(lines))
            failcount += 1
            sys.stdout.flush()

    if comm is not None:
        failcount = comm.allreduce(failcount)

    if failcount > 0:
        # all processes throw
        raise RuntimeError("some extraction bundles failed")

    if rank == 0:
        mergeopts = [
            '--output', args.output,
            '--force',
            '--delete'
        ]
        mergeopts.extend([ "{}_{:02d}.fits".format(outroot, b) for b in bundles ])
        mergeargs = mergebundles.parse(mergeopts)
        mergebundles.main(mergeargs)

        if args.model is not None:
            model = None
            for b in bundles:
                outmodel = "{}_model_{:02d}.fits".format(outroot, b)
                if model is None:
                    model = fits.getdata(outmodel)
                else:
                    #- TODO: test and warn if models overlap for pixels with
                    #- non-zero values
                    model += fits.getdata(outmodel)

                os.remove(outmodel)

            fits.writeto(args.model, model)
Пример #59
0
    batman_curves.append(model['curve ' + str(i)])

data = glob.glob(
    '/common/contrib/classroom/ast520/tess_batman/data/TESS/Sector2/*.fits')

test = []
sector = []
TIC = []
batman_indices = []

for i in data:

    fits_file = str(i)
    fits.info(fits_file)
    try:
        fits.getdata(fits_file, ext=1).columns
        with fits.open(fits_file, mode="readonly") as hdulist:
            hdr = hdulist[0].header
            sector.append(hdr[20])
            #identifier
            TIC.append(hdr[21])
            tess_bjds = hdulist[1].data['TIME']

            pdcsap_fluxes = hdulist[1].data['PDCSAP_FLUX']
    except Exception as e:
        print(e, "file:", fits_file)
    pdcsap_fluxes[np.isnan(pdcsap_fluxes)] = 0
    tess_bjds[np.isnan(tess_bjds)] = 0

    #image = fits.open(str(i))
    #Read the exposure time from the header
Пример #60
0
from astropy.utils.data import get_pkg_data_filename
from astropy.io import fits

image_file = get_pkg_data_filename('tutorials/FITS-images/HorseHead.fits')

##############################################################################
# Use `astropy.io.fits.info()` to display the structure of the file:

fits.info(image_file)

##############################################################################
# Generally the image information is located in the Primary HDU, also known
# as extension 0. Here, we use `astropy.io.fits.getdata()` to read the image
# data from this first extension using the keyword argument ``ext=0``:

image_data = fits.getdata(image_file, ext=0)

##############################################################################
# The data is now stored as a 2D numpy array. Print the dimensions using the
# shape attribute:

print(image_data.shape)

##############################################################################
# Display the image data:

plt.figure()
plt.imshow(image_data, cmap='gray')
plt.colorbar()
plt.show()