示例#1
0
 def test_append_filehandle(self, tmpdir, mode):
     """
     Test fits.append with a file handle argument.
     """
     append_file = tmpdir.join('append.fits')
     with append_file.open(mode) as handle:
         fits.append(filename=handle, data=np.ones((4, 4)))
 def export_to_fits(self, fitsfile='ModelSceneCube.fits'):
     """
     Write model cube to a FITS file
     """
     header = self.grid.wcs_info()
     fits.writeto(fitsfile, np.rollaxis(self.int, 2), header, clobber=True)
     fits.append(fitsfile, self.wave)
 def export_to_fits(self, fitsfile='ModelDetectorCube'):
     header = self.wcs_info()
     slice_index = 0
     for flux_plus_bg in self.flux_plus_bg_list:
         fitsfile_slice = fitsfile + str(slice_index).strip() + '.fits'
         fits.writeto(fitsfile_slice, np.rollaxis(flux_plus_bg, 2), header, clobber=True)
         fits.append(fitsfile_slice, self.wave)
示例#4
0
def save_reduced_data(input_file,sp,unc,wave=None):
	print 'Saving extraction as %s'%(os.path.splitext(input_file)[0]+'_reduced.fits')
	#load up file to grab header to append reduction information to
	hdulist=loadfits(input_file)
	head=hdulist[0].header
	head['ORDERS'] = (sp.shape[1], 'Number of orders reduced')
	#append header field 'how many orders',len(ord)
	head['REDUTIME'] = (strftime("%c"), 'When reduced')
	#append header field 'when reduced', time.strftime("%c")
	head['comment'] = 'data saved as (ext,unc,wave) for each order'
	
	if wave==None:
		wave=np.arange(sp.shape[0])
	data=[[]]*(sp.shape[1]+1)
	for i in np.arange(sp.shape[1]):
		data=np.vstack((sp[:,i],unc[:,i],wave[i,:][::-1]))
		if i==0:
			head['ORDER']=((i+1),'Order number')
			pyfits.writeto(os.path.splitext(input_file)[0]+'_reduced.fits', data,head, clobber=True)
		else:
			head['ORDER']=((i+1),'Order number')
			pyfits.append(os.path.splitext(input_file)[0]+'_reduced.fits', data,head)
	#save file as original file + _reduced
	#ie. if aug008811.fits was the file - this becomes
	#aug008811_reduced.npy
	hdulist.close()
示例#5
0
文件: fits.py 项目: ddefrere/VIP
def append_extension(filename, array):
    """Appends an extension to fits file. 
    """
    fits.append(filename, array)
    print "\nFits extension appended"
        
        
    
示例#6
0
文件: fits.py 项目: jaytmiller/crds
 def add_checksum(self):
     """Add checksums to `filepath`."""
     output = "crds-" + str(uuid.uuid4()) + ".fits"
     with fits_open(self.filepath, do_not_scale_image_data=True) as hdus:
         for hdu in hdus:
             fits.append(output, hdu.data, hdu.header, checksum=True)
     os.remove(self.filepath)
     os.rename(output, self.filepath)
 def export_fits_prisim(self,fitsfile,pol_list,freq_list,scheme='RING',nside_out=None):
     '''
     export fits-file at channel chan and polarization pol
     Args:
     fitsfile, str, name of file to save .fits to
     pol_list, list of labels of polarizations to write
     chan_list, list of frequencies to write
     '''
     if nside_out is None:
         nside_out=self.nside
     pol_list=n.array(pol_list)
     freq_list=n.array(freq_list)
     pol_inds=[]
     freq_inds=[]
     for pol in pol_list:
         assert pol in self.pols
         pol_inds.append(n.where(n.array(self.pols)==pol)[0][0])
     for freq in freq_list:
         assert freq in self.fAxis
         freq_inds.append(n.where(n.array(self.fAxis)==freq)[0][0])
     data=self.data[:,freq_inds,:].reshape(-1,1)
     theta_out,phi_out=hp.pix2ang(nside_out,n.arange(hp.nside2npix(nside_out)))
     #freq_col=[fits.Column(name='Frequency [MHz]',format='D',array=n.array(freq_list))]
     #freq_columns=fits.ColDefs(freq_col,ascii=False)
     #freq_tbhdu = fits.BinTableHDU.from_columns(freq_col)
     #freq_tbhdu = fits.BinTableHDU.from_columns(n.array(freq_list))
     
     hduprimary=fits.PrimaryHDU()
     hduprimary.header.set('EXTNAME','PRIMARY')
     hduprimary.header.set('NEXTEN',2)
     hduprimary.header.set('FITSTYPE','IMAGE')
     hduprimary.header['NSIDE']=(nside_out,'NSIDE')
     hduprimary.header['PIXAREA']=(hp.nside2pixarea(nside_out),'pixel solid angle (steradians)')
     hduprimary.header['NEXTEN']=(2,'Number of extensions')
     hduprimary.header['NPOL'] = (len(pol_inds), 'Number of polarizations')
     hduprimary.header['SOURCE'] = ('HERA-CST', 'Source of data')
     hdulist=[hduprimary]
     fits.HDUList(hdulist).writeto(fitsfile,clobber=True)
     for pol in pol_list:
         #freq_tbhdu.header.set('EXTNAME','FREQS_{0}'.format(pol))
         freq_tbhdu=fits.ImageHDU(freq_list,name='FREQS_{0}'.format(pol))
         fits.append(fitsfile,freq_tbhdu.data,freq_tbhdu.header,verify=False)
     data_interp=n.zeros((hp.nside2npix(nside_out),len(freq_inds)))
     for polind,pol in zip(pol_inds,pol_list):
         for fi,freqind in enumerate(freq_inds):
             data=self.data[polind,freqind,:].flatten()
             data_interp[:,fi]=hp.get_interp_val(data,theta_out,phi_out)
             #if DEBUG:
             #    hp.mollview(data_interp[:,fi])
             #    plt.show()
         imghdu = fits.ImageHDU(data_interp, name='BEAM_{0}'.format(pol))
         imghdu.header['PIXTYPE'] = ('HEALPIX', 'Type of pixelization')
         imghdu.header['ORDERING'] = (scheme, 'Pixel ordering scheme, either RING or NESTED')
         imghdu.header['NSIDE'] = (nside_out, 'NSIDE parameter of HEALPIX')
         imghdu.header['NPIX'] = (hp.nside2npix(nside_out), 'Number of HEALPIX pixels')
         imghdu.header['FIRSTPIX'] = (0, 'First pixel # (0 based)')
         imghdu.header['LASTPIX'] = (len(data_interp)-1, 'Last pixel # (0 based)')
         fits.append(fitsfile,imghdu.data,imghdu.header,verify=False)
示例#8
0
文件: fits.py 项目: jaytmiller/crds
 def remove_checksum(self):
     """Remove checksums from `filepath`."""
     output = "crds-" + str(uuid.uuid4()) + ".fits"
     with fits_open(self.filepath, checksum=False, do_not_scale_image_data=True) as hdus:
         for hdu in hdus:
             hdu.header.pop("CHECKSUM",None)
             hdu.header.pop("DATASUM", None)
             fits.append(output, hdu.data, hdu.header, checksum=False)
     os.remove(self.filepath)
     os.rename(output, self.filepath)
示例#9
0
    def writeFits(self, filename):
        """Write to FITS file

        Parameters
        ----------
        filename : `str`
            Name of file to which to write.
        """
        fits = astropy.io.fits.HDUList()
        fits.append(astropy.io.fits.ImageHDU(self.vector))
        with open(filename, "w") as fd:
            fits.writeto(fd)
示例#10
0
文件: phot.py 项目: petigura/k2phot
def write_hduL(fitsfn,hduL):
    # If file doesn't exist, add the primary header
    hdu_primary = hduL[0]
    if os.path.exists(fitsfn) is False:
        fits.append(fitsfn, hdu_primary.data, header=hdu_primary.header)

    hduL = hduL[1:]
    for hdu in hduL:
        data = hdu.data
        extname = hdu.header['EXTNAME']
        header = hdu.header
        try:
            fits.update(fitsfn, data, extname, header=header)
        except KeyError:
            fits.append(fitsfn, data, header=header)
示例#11
0
    def _writeImpl(self, fits):
        """Implementation for writing to FITS file

        Parameters
        ----------
        fits : `astropy.io.fits.HDUList`
            List of FITS HDUs. This has a Primary HDU already, the header of
            which may be supplemented with additional keywords.
        """
        from astropy.io.fits import BinTableHDU, Column
        fits.append(BinTableHDU.from_columns([
            Column("wavelength", "D", array=self.wavelength),
            Column("flux", "D", array=self.flux),
            Column("mask", "K", array=self.mask),
        ], header=astropyHeaderFromDict(self.flags.toFitsHeader()), name="FLUXTBL"))
        self.target.toFits(fits)
示例#12
0
    def writeFits(self, filename):
        """Write to FITS file

        This API is intended for use by the LSST data butler, which handles
        translating the desired identity into a filename.

        Parameters
        ----------
        filename : `str`
            Filename of FITS file.
        """
        from astropy.io.fits import HDUList, PrimaryHDU
        fits = HDUList()
        fits.append(PrimaryHDU())
        self._writeImpl(fits)
        with open(filename, "w") as fd:
            fits.writeto(fd)
示例#13
0
def removerows(gal):
    d = "/Data/vimos/cubes/"
    f = glob.glob(d + gal + ".cube.combined.corr.fits")
    p = pyfits.open(f[0])
    data = p[0].data
    header = p[0].header
    data_new = data[:,0:40,0:40]

    pyfits.writeto(d+gal+".cube.combined.corr.fits", data_new, header=header, clobber=True)

    for ex in range(1, len(p)):
        data_ex = p[ex].data
        header_ex = p[ex].header
        data_ex_new = data_ex[:,0:40,0:40]
        pyfits.append(d+gal+".cube.combined.corr.fits",data_ex_new,header=header_ex)

    p.close()
def do_analysis(i=0,n_ps_groupi=0,psloop1=False,pscomball=False,psloop2=False,minuit_new=False):
    global emin,emax, eachps_dict, eachps_En_center
    if method=='polychord':
        polychord=True
    else:
        polychord=False
    b.run_tag = run_tag_energy
    tri = fa.make_triangle(b,run_tag_energy,edep=True,polychord=polychord,minuit_new=minuit_new)
    tri.make_triangle()

    if not minuit_new:
        sle = fa.save_log_evidence(b,edep=True,polychord=polychord)

    spect_dir = work_dir + 'data/spect/'
    if not os.path.exists(spect_dir):
        os.mkdir(spect_dir)
    cs = fa.compute_spectra(b,run_tag_energy,f.CTB_en_bins[0],f.CTB_en_bins[-1],edep=True,plane_mask=plot_plane_mask,band_mask_range = [-plot_pmval,plot_pmval],lcut=plot_lcut,lmin=plot_lmin,lmax=plot_lmax,bcut=plot_bcut,bmin=plot_bmin,bmax=plot_bmax,mask_ring=plot_mask_ring,outer=plot_outer,inner=plot_inner,minuit_new = minuit_new,input_mask=force_ps_mask,the_input_mask=b.mask_total) # spectra over the whole energy range
    cs.mask_total_dict['bubs']=np.logical_not(b.templates_dict_nested['bubs']['summed_templates_not_compressed'])
    eachps_En_center = cs.En_center
    if psloop1 or pscomball:
        cs.make_norm_dict()
    if psloop2 or pscomball:
        cs.make_spectra_dict()
    if pscomball:
        cs.save_spectra_dict(spect_dir + save_spect_label,emin,emax,over_write= False)
        cs.save_norm_dict(spect_dir + save_norm_label,emin,emax,over_write= False)

    if psloop1:
        # Create pslooptemp and psalltemp 
        pslooptempar = np.zeros(hp.nside2npix(nside))
        if os.path.exists(ps_temp_dir + pslooptemp):
            pslooptempar += fits.open(ps_temp_dir + pslooptemp)[0].data
        for j in range(n_ps_groupi):
            pstempj = b.templates_dict['ps_' + str(i*n_ps_run+j+1) + '-0'][0]
            psnormj = cs.norm_dict['ps_' + str(i*n_ps_run+j+1) + '-0']
            pswritej = pstempj*psnormj
            pslooptempar += pswritej
            fits.append(ps_temp_dir + psalltemp, pswritej)
        fits.writeto(ps_temp_dir + pslooptemp,pslooptempar,clobber=True)

    # Add norms to eachps_dict dictionary
    if psloop2:
        eachps_dict['ps_' + str(i+1)] = cs.spectra_dict['ps_' + str(i+1) + '-0']
示例#15
0
def createVoronoiOutput(inputFile=datadir+cuberoot+'.fits',inputVoronoiFile=datadir+'voronoi_2d_binning_output.txt'):
    cubefits = pyfits.open(inputFile)
    
    cube = cubefits[0].data
    hdr = cubefits[0].header
    errors = cubefits[1].data
    quality = cubefits[2].data
    nframes = cubefits[3].data

    cubeShape = (cube.shape[0],cube.shape[1],cube.shape[2])

    yy, xx, binnum = np.loadtxt(inputVoronoiFile,unpack=True)
    xx = xx.astype(int)
    yy = yy.astype(int)
    binnum = binnum.astype(int)

    newCube = np.zeros(cubeShape, dtype=float)
    newErr = np.zeros(cubeShape, dtype=float)

    for nb in range(binnum.max()+1):
        idx = np.where(binnum == nb)
        nx = xx[idx]
        ny = yy[idx]
        nbins = len(idx[0])
        tmpCube = np.sum(cube[nx,ny,:],axis=0)/nbins
        tmpErr = np.sqrt(np.sum(errors[nx,ny,:]**2,axis=0))/nbins
        newCube[nx,ny,:] = tmpCube
        newErr[nx,ny,:] = tmpErr

    #pdb.set_trace()
    outfile = inputFile.replace('.fits','_vorcube.fits')
    pyfits.writeto(outfile,newCube,header=hdr)
    pyfits.append(outfile,newErr)
    pyfits.append(outfile,quality)
    pyfits.append(outfile,nframes)
示例#16
0
def gen_superdark(inlist, fname='./Superdark.fits'):
    '''
    Generate a median image from a list of input image files.
    
    The purpose of this script is to generate a "superdark";
    a median dark multi-frame image to mimic the noise present
    in NIRSpec-read images.
    
    The median (average) calculation is done across the 3-frame
    images expected from NIRSpec, and is more accurate as more
    images are used.
    
    Keyword arguments:
    inlist -- The list of image files to be used for generating
              the median image.
    fname  -- The name of the generated meadian image. Defaults
              to 'Superdark'.
    '''
    
    stacked_darks = 0
    
    # Read in the multiframe images and create a stack
    for ii in xrange(len(inlist)):
        fileBase = path.basename(inlist[ii])
        print('(gen_superdark): Reading/processing {} ...'.format(fileBase))
        
        stacked_darks = stacked_darks + fits.getdata(inlist[ii], 0)
    
    # Create a superdark frame initiazed with zeros
    superdark_frame = stacked_darks / len(inlist)
    
    # Save the above information to a FITS file for later use
    hdu = fits.PrimaryHDU()
    print('(gen_superdark): Writing {} to file ...'.format(fname))
    hdu.header.append('FILENAME', fname, '')
    hdu.writeto(fname, clobber=True)

    fits.append(fname, superdark_frame)
        
    return superdark_frame
示例#17
0
 def save_filter(self, filename, clobber=False):
     """Save filter to file"""
     hdu = fits.PrimaryHDU(self.filter, self.header)
     hdu.writeto(filename, clobber=clobber)
     fits.append(filename, self.approx, self.header)
     fits.append(filename, self.filter + self.approx, self.header)
     fits.append(filename, self.max_scale_image(), self.header)
示例#18
0
def write_imgspec(flux, wave, loglam, objtype):
    hdr = dict(FLUXUNIT='erg/s/cm^2/A')
    if wave is None:
        if loglam:
            hdr['LOGLAM'] = loglam
            hdr['CRVAL1'] = np.log10(wave1D[0])
            hdr['CDELT1'] = np.log10(1+dwave/wave1D[0])
        else:
            if loglam is not None:
                hdr['LOGLAM'] = loglam
            hdr['CRVAL1'] = wave1D[0]
            hdr['CDELT1'] = dwave

    if type(objtype) == str:
        hdr['OBJTYPE'] = objtype
    
    filename = get_next_filename()
    fits.writeto(filename, flux, header=hdr, clobber=True)
    if wave is not None:
        if loglam:
            fits.append(filename, wave, extname='LOGLAM')
        else:
            fits.append(filename, wave, extname='WAVELENGTH')
            
    if type(objtype) != str:
        fits.append(filename, objtype, extname='TARGETINFO')

    return filename
示例#19
0
    def writeFits(self, filename):
        """Write to FITS file

        This API is intended for use by the LSST data butler, which handles
        translating the desired identity into a filename.

        Parameters
        ----------
        filename : `str`
            Filename of FITS file.
        """
        self.validate()
        import astropy.io.fits
        fits = astropy.io.fits.HDUList()
        header = self.metadata.copy()
        header.update(self.flags.toFitsHeader())
        fits.append(astropy.io.fits.PrimaryHDU(header=astropyHeaderFromDict(header)))
        for attr in ("fiberId", "wavelength", "flux", "mask", "sky", "covar"):
            hduName = attr.upper()
            data = getattr(self, attr)
            fits.append(astropy.io.fits.ImageHDU(data, name=hduName))
        with open(filename, "w") as fd:
            fits.writeto(fd)
示例#20
0
    def _writeImpl(self, fits):
        """Implementation for writing to FITS file

        Parameters
        ----------
        fits : `astropy.io.fits.HDUList`
            List of FITS HDUs. This has a Primary HDU already, the header of
            which may be supplemented with additional keywords.
        """
        from astropy.io.fits import ImageHDU
        super()._writeImpl(fits)
        fits.append(ImageHDU(self.sky, name="SKY"))
        self.observations.toFits(fits)
        fits.append(ImageHDU(self.covar, name="COVAR"))
        fits.append(ImageHDU(self.covar2, name="COVAR2"))
示例#21
0
def write_flux_calibration(outfile, fluxcalib, header=None):
    """Writes  flux calibration.
    """
    hdr = fitsheader(header)
    hdr['EXTNAME'] = ('FLUXCALIB', 'CHECK UNIT')
    fits.writeto(outfile,fluxcalib.calib,header=hdr, clobber=True)

    hdr['EXTNAME'] = ('IVAR', 'CHECK UNIT')
    hdu = fits.ImageHDU(fluxcalib.ivar, header=hdr)
    fits.append(outfile, hdu.data, header=hdu.header)

    hdr['EXTNAME'] = ('MASK', 'no dimension')
    hdu = fits.ImageHDU(fluxcalib.mask, header=hdr)
    fits.append(outfile, hdu.data, header=hdu.header)

    hdr['EXTNAME'] = ('WAVELENGTH', '[Angstroms]')
    hdu = fits.ImageHDU(fluxcalib.wave, header=hdr)
    fits.append(outfile, hdu.data, header=hdu.header)
示例#22
0
    def test_append_filename(self):
        """
        Test fits.append with a filename argument.
        """
        data = np.arange(6)
        testfile = self.temp('test_append_1.fits')

        # Test case 1: creation of file
        fits.append(testfile, data=data, checksum=True)

        # Test case 2: append to existing file, with verify=True
        # Also test that additional keyword can be passed to fitsopen
        fits.append(testfile, data=data * 2, checksum=True, ignore_blank=True)

        # Test case 3: append to existing file, with verify=False
        fits.append(testfile, data=data * 3, checksum=True, verify=False)

        with fits.open(testfile, checksum=True) as hdu1:
            np.testing.assert_array_equal(hdu1[0].data, data)
            np.testing.assert_array_equal(hdu1[1].data, data * 2)
            np.testing.assert_array_equal(hdu1[2].data, data * 3)
示例#23
0
文件: sky.py 项目: profxj/desispec
def write_sky(outfile, skymodel, header=None):
    """Write sky model.

    Args:
        outfile : filename or (night, expid, camera) tuple
        skymodel : SkyModel object, with the following attributes
            wave : 1D wavelength in vacuum Angstroms
            flux : 2D[nspec, nwave] sky flux
            ivar : 2D inverse variance of sky flux
            mask : 2D mask for sky flux
        header : optional fits header data (fits.Header, dict, or list)
    """
    outfile = makepath(outfile, 'sky')

    #- Convert header to fits.Header if needed
    if header is not None:
        hdr = fitsheader(header)
    else:
        hdr = fitsheader(skymodel.header)

    hdr['EXTNAME'] = ('SKY', 'no dimension')
    fits.writeto(outfile, skymodel.flux,header=hdr, clobber=True)

    hdr['EXTNAME'] = ('IVAR', 'no dimension')
    hdu = fits.ImageHDU(skymodel.ivar, header=hdr)
    fits.append(outfile, hdu.data, header=hdu.header)

    hdr['EXTNAME'] = ('MASK', 'no dimension')
    hdu = fits.ImageHDU(skymodel.mask, header=hdr)
    fits.append(outfile, hdu.data, header=hdu.header)

    hdr['EXTNAME'] = ('WAVELENGTH', '[Angstroms]')
    hdu = fits.ImageHDU(skymodel.wave, header=hdr)
    fits.append(outfile, hdu.data, header=hdu.header)

    return outfile
### STITCH ALL THE ORDERS TOGETHER
wls, fluxes, sns = stitch(wlsol, spec, snr, cutoverlap='yes')

#step 4: save to a fits file
#write the primary data, and fill out the header
hdu = pyfits.PrimaryHDU()
hdu.writeto(filename_out, clobber=True)
#copy the target header
header = dataheader
header["EXTNAME"] = 'SPEC_STITCHED'
header.add_comment("This spectrum was created by combining the orders")
pyfits.update(filename_out, fluxes, header)

#add the rest of the extensions
header["EXTNAME"] = "WAVELENGTH"
pyfits.append(filename_out, wls, header)
###This one is new!
header["EXTNAME"] = "SNR"
pyfits.append(filename_out, sns, header)

print '*** File out ***'
print 'The divided spectra is being saved to ', filename_out

#step 5: plot
uncs = sns**-1
uncs_units = uncs * fluxes
plt.errorbar(wls, fluxes, yerr=uncs_units, marker='s')
plt.xlabel('Wavelength $\AA$')
plt.ylabel('Flux')
plt.ylim([-1.e6, 1.e6])
plt.xlim([19000, 25000])
示例#25
0
文件: ex2d.py 项目: baileyji/specter
def ex2d_patch(image, ivar, psf, specmin, nspec, wavelengths, xyrange=None,
         full_output=False, regularize=0.0, ndecorr=False):
    """
    2D PSF extraction of flux from image patch given pixel inverse variance.
    
    Inputs:
        image : 2D array of pixels
        ivar  : 2D array of inverse variance for the image
        psf   : PSF object
        specmin : index of first spectrum to extract
        nspec : number of spectra to extract
        wavelengths : 1D array of wavelengths to extract
        
    Optional Inputs:
        xyrange = (xmin, xmax, ymin, ymax): treat image as a subimage
            cutout of this region from the full image
        full_output : if True, return a dictionary of outputs including
            intermediate outputs such as the projection matrix.
        ndecorr : if True, decorrelate the noise between fibers, at the
            cost of residual signal correlations between fibers.
        
    Returns (flux, ivar, R):
        flux[nspec, nwave] = extracted resolution convolved flux
        ivar[nspec, nwave] = inverse variance of flux
        R : 2D resolution matrix to convert
    """

    #- Range of image to consider
    waverange = (wavelengths[0], wavelengths[-1])
    specrange = (specmin, specmin+nspec)
    
    if xyrange is None:
        xmin, xmax, ymin, ymax = xyrange = psf.xyrange(specrange, waverange)
        image = image[ymin:ymax, xmin:xmax]
        ivar = ivar[ymin:ymax, xmin:xmax]
    else:
        xmin, xmax, ymin, ymax = xyrange

    nx, ny = xmax-xmin, ymax-ymin
    npix = nx*ny
    
    nspec = specrange[1] - specrange[0]
    nwave = len(wavelengths)
    
    #- Solve AT W pix = (AT W A) flux
    
    #- Projection matrix and inverse covariance
    A = psf.projection_matrix(specrange, wavelengths, xyrange)

    #- Pixel weights matrix
    w = ivar.ravel()
    W = spdiags(ivar.ravel(), 0, npix, npix)

    #-----
    #- Extend A with an optional regularization term to limit ringing.
    #- If any flux bins don't contribute to these pixels,
    #- also use this term to constrain those flux bins to 0.
    
    #- Original: exclude flux bins with 0 pixels contributing
    # ibad = (A.sum(axis=0).A == 0)[0]
    
    #- Identify fluxes with very low weights of pixels contributing            
    fluxweight = W.dot(A).sum(axis=0).A[0]
    minweight = 0.01*np.max(fluxweight)
    ibad = fluxweight < minweight
    
    #- Original version; doesn't work on older versions of scipy
    # I = regularize*scipy.sparse.identity(nspec*nwave)
    # I.data[0,ibad] = minweight - fluxweight[ibad]
    
    #- Add regularization of low weight fluxes
    Idiag = regularize*np.ones(nspec*nwave)
    Idiag[ibad] = minweight - fluxweight[ibad]
    I = scipy.sparse.identity(nspec*nwave)
    I.setdiag(Idiag)

    #- Only need to extend A if regularization is non-zero
    if np.any(I.diagonal()):
        pix = np.concatenate( (image.ravel(), np.zeros(nspec*nwave)) )
        Ax = scipy.sparse.vstack( (A, I) )
        wx = np.concatenate( (w, np.ones(nspec*nwave)) )
    else:
        pix = image.ravel()
        Ax = A
        wx = w

    #- Inverse covariance
    Wx = spdiags(wx, 0, len(wx), len(wx))
    iCov = Ax.T.dot(Wx.dot(Ax))

    #- Solve (image = A flux) weighted by Wx:
    #-     A^T W image = (A^T W A) flux = iCov flux    
    y = Ax.T.dot(Wx.dot(pix))
    
    xflux = spsolve(iCov, y).reshape((nspec, nwave))

    #- Solve for Resolution matrix
    try:
        if ndecorr:
            R, fluxivar = resolution_from_icov(iCov)
        else:
            R, fluxivar = resolution_from_icov(iCov, decorr=[nwave for x in range(nspec)])
    except np.linalg.linalg.LinAlgError, err:
        outfile = 'LinAlgError_{}-{}_{}-{}.fits'.format(specrange[0], specrange[1], waverange[0], waverange[1])
        print("ERROR: Linear Algebra didn't converge")
        print("Dumping {} for debugging".format(outfile))
        from astropy.io import fits
        fits.writeto(outfile, image, clobber=True)
        fits.append(outfile, ivar, name='IVAR')
        fits.append(outfile, A.data, name='ADATA') 
        fits.append(outfile, A.indices, name='AINDICES')
        fits.append(outfile, A.indptr, name='AINDPTR')
        fits.append(outfile, iCov.toarray(), name='ICOV')
        raise err
示例#26
0
# Now, we construct an object that describes the data region and structure we
# want
cube = pf.h.covering_grid(int(level), # The level we are willing to extract to; higher
                          # levels than this will not contribute to the data!
                          # Now we set our spatial extent...
                          left_edge=[0.0, 0.0, 0.0],
                          right_edge=[1.0, 1.0, 1.0],
                          # How many dimensions along each axis
                          dims=DIMS,
                          # And any fields to preload (this is optional!)
                          fields=["Density"])

FlatFileName = tempdir+'/%s_flatrho_%04i.fits' %(name, num)

pyfits.writeto(FlatFileName, cube["Density"])
pyfits.append(FlatFileName, cube["x-velocity"])
pyfits.append(FlatFileName, cube["y-velocity"])
pyfits.append(FlatFileName, cube["z-velocity"])

#arg = 'caleb_flatrho_0025.fits/1/10'
#


subprocess.call('cp '+ppdir+'/dustkappa_silicate.inp '+tempdir)
subprocess.call('cp '+ppdir+'/molecule_13co.inp '+tempdir)
subprocess.call('cp '+ppdir+'/camera_wavelength_micron.inp '+tempdir)

Problem_setup(FlatFileName, face = face, dust_temp = 10.0)
command = ppdir+'radmc3d image npix '+str(DIMS[0])+' iline 1 widthkms 10 linenlam 500 loadlambda fluxcons inclline linelist nostar writepop doppcatch sizepc 10'
subprocess.call(command)
makefits()
示例#27
0
#------------------------------------------------------------------------
# Open the input event file
hdu = fits.open(args.infile)
# HDU[0] is dummy, 1-4 are quadrants

# Create output file. Exit if it already exists
fits.writeto(args.outfile, hdu[0].data, hdu[0].header)
if args.verbose:
    print("Wrote HDU 0")

for quad in np.arange(noe):
    # Note that the output file needs quadrants from 0-3, but HDU extensions are 1-4
    # Also process extension 5, which has veto spectrum
    if args.verbose:
        print("Processing quadrant {quad}/4 ".format(quad=quad + 1)),
    data = hdu[quad + 1].data
    if hdu[quad + 1].header['naxis2'] > 0:
        timestamps = data['Time']
        tmin = max([np.floor(min(timestamps)), args.tmin])
        tmax = min([np.floor(max(timestamps)), args.tmax])
        if args.verbose: print(tmin, tmax, args.tmin, args.tmax)
        if args.verbose: sys.stdout.flush()
        select = np.where((hdu[quad + 1].data['Time'] >= tmin)
                          & (hdu[quad + 1].data['Time'] <= tmax))
        data = hdu[quad + 1].data[select]
    fits.append(args.outfile, data,
                header=hdu[quad + 1].header)  #, verify=False)

#for ext in range(6, len(hdu)):
#fits.append(args.outfile, hdu[ext].data, hdu[ext].header)
示例#28
0
def process_image(imfn,
                  ivarfn,
                  dqfn,
                  outfn=None,
                  clobber=False,
                  outdir=None,
                  verbose=False,
                  nproc=numpy.inf,
                  resume=False,
                  outmodelfn=None,
                  profile=False):
    if profile:
        import cProfile
        import pstats
        pr = cProfile.Profile()
        pr.enable()

    with fits.open(imfn) as hdulist:
        extnames = [hdu.name for hdu in hdulist]
    if 'PRIMARY' not in extnames:
        raise ValueError('No PRIMARY header in file')
    prihdr = fits.getheader(imfn, extname='PRIMARY')
    if 'CENTRA' in prihdr:
        bstarfn = os.path.join(os.environ['DECAM_DIR'], 'data',
                               'tyc2brighttrim.fits')
        brightstars = fits.getdata(bstarfn)
        from astropy.coordinates.angle_utilities import angular_separation
        sep = angular_separation(numpy.radians(brightstars['ra']),
                                 numpy.radians(brightstars['dec']),
                                 numpy.radians(prihdr['CENTRA']),
                                 numpy.radians(prihdr['CENTDEC']))
        sep = numpy.degrees(sep)
        m = sep < 3
        brightstars = brightstars[m]
        dmjd = prihdr['MJD-OBS'] - 51544.5  # J2000 MJD.
        cosd = numpy.cos(
            numpy.radians(numpy.clip(brightstars['dec'], -89.9999, 89.9999)))
        brightstars['ra'] += dmjd * brightstars[
            'pmra'] / 365.25 / cosd / 1000 / 60 / 60
        brightstars[
            'dec'] += dmjd * brightstars['pmde'] / 365.25 / 1000 / 60 / 60
    else:
        brightstars = None
    filt = prihdr['filter']
    if outfn is None or len(outfn) == 0:
        outfn = os.path.splitext(os.path.basename(imfn))[0]
        if outfn[-5:] == '.fits':
            outfn = outfn[:-5]
        outfn = outfn + '.cat.fits'
    if outdir is not None:
        outfn = os.path.join(outdir, outfn)
    if not resume or not os.path.exists(outfn):
        fits.writeto(outfn, None, prihdr, clobber=clobber)
        extnamesdone = None
    else:
        hdulist = fits.open(outfn)
        extnamesdone = []
        for hdu in hdulist:
            if hdu.name == 'PRIMARY':
                continue
            ext, exttype = hdu.name.split('_')
            if exttype != 'CAT':
                continue
            extnamesdone.append(ext)
        hdulist.close()
    if outmodelfn and (not resume or not os.path.exists(outmodelfn)):
        fits.writeto(outmodelfn, None, prihdr, clobber=clobber)
    count = 0
    fwhms = []
    for name in extnames:
        if name is 'PRIMARY':
            continue
        hdr = fits.getheader(imfn, extname=name)
        if 'FWHM' in hdr:
            fwhms.append(hdr['FWHM'])
    fwhms = numpy.array(fwhms)
    fwhms = fwhms[fwhms > 0]
    for name in extnames:
        if name is 'PRIMARY':
            continue
        if extnamesdone is not None and name in extnamesdone:
            print('Skipping %s, extension %s; already done.' % (imfn, name))
            continue
        if verbose:
            print('Fitting %s, extension %s.' % (imfn, name))
            sys.stdout.flush()
        im, wt, dq = read_data(imfn, ivarfn, dqfn, name)
        hdr = fits.getheader(imfn, extname=name)
        fwhm = hdr.get('FWHM', numpy.median(fwhms))
        if fwhm <= 0.:
            fwhm = 4.
        fwhmmn, fwhmsd = numpy.mean(fwhms), numpy.std(fwhms)
        if fwhmsd > 0.4:
            fwhm = fwhmmn
        psf = decam_psf(filt[0], fwhm)
        wcs0 = wcs.WCS(hdr)
        if brightstars is not None:
            sep = angular_separation(numpy.radians(brightstars['ra']),
                                     numpy.radians(brightstars['dec']),
                                     numpy.radians(hdr['CENRA1']),
                                     numpy.radians(hdr['CENDEC1']))
            sep = numpy.degrees(sep)
            m = sep < 0.2
            # CCD is 4094 pix wide => everything is at most 0.15 deg
            # from center
            if numpy.any(m):
                yb, xb = wcs0.all_world2pix(brightstars['ra'][m],
                                            brightstars['dec'][m], 0)
                vmag = brightstars['vtmag'][m]
                # WCS module and I order x and y differently...
                m = ((xb > 0) & (xb < im.shape[0] + 1) & (yb > 0) &
                     (yb < im.shape[1] + 1))
                if numpy.any(m):
                    xb, yb = xb[m], yb[m]
                    vmag = vmag[m]
                    blist = [xb, yb, vmag]
                else:
                    blist = None
            else:
                blist = None
        else:
            blist = None

        if blist is not None:
            # we did not enable this for first DECaPS v1
            # dq = mask_very_bright_stars(dq, blist)
            pass

        # the actual fit
        res = mosaic.fit_sections(im,
                                  psf,
                                  4,
                                  2,
                                  weight=wt,
                                  dq=dq,
                                  psfderiv=True,
                                  refit_psf=True,
                                  verbose=verbose,
                                  blist=blist)

        cat, modelim, skyim, psfs = res
        if len(cat) > 0:
            ra, dec = wcs0.all_pix2world(cat['y'], cat['x'], 0.)
        else:
            ra = numpy.zeros(0, dtype='f8')
            dec = numpy.zeros(0, dtype='f8')
        from matplotlib.mlab import rec_append_fields
        decapsid = numpy.zeros(len(cat), dtype='i8')
        decapsid[:] = (prihdr['EXPNUM'] * 2**32 * 2**7 +
                       hdr['CCDNUM'] * 2**32 +
                       numpy.arange(len(cat), dtype='i8'))
        if verbose:
            print('Writing %s %s, found %d sources.' % (outfn, name, len(cat)))
            sys.stdout.flush()
        hdr['EXTNAME'] = hdr['EXTNAME'] + '_HDR'
        if numpy.any(wt > 0):
            hdr['GAINCRWD'] = numpy.nanmedian((im * wt**2.)[wt > 0])
            hdr['SKYCRWD'] = numpy.nanmedian(skyim[wt > 0])
        else:
            hdr['GAINCRWD'] = 4
            hdr['SKYCRWD'] = 0
        if len(cat) > 0:
            hdr['FWHMCRWD'] = numpy.nanmedian(cat['fwhm'])
        else:
            hdr['FWHMCRWD'] = 0.0
        gain = hdr['GAINCRWD'] * numpy.ones(len(cat), dtype='f4')
        cat = rec_append_fields(cat, ['ra', 'dec', 'decapsid', 'gain'],
                                [ra, dec, decapsid, gain])
        fits.append(outfn, numpy.zeros(0), hdr)
        outpsfs = numpy.concatenate(
            [tpsf.serialize(stampsz=19) for tpsf in psfs])
        hdupsfs = fits.BinTableHDU(outpsfs)
        hdupsfs.name = hdr['EXTNAME'][:-4] + '_PSF'
        hducat = fits.BinTableHDU(cat)
        hducat.name = hdr['EXTNAME'][:-4] + '_CAT'
        hdulist = fits.open(outfn, mode='append')
        hdulist.append(hdupsfs)
        hdulist.append(hducat)
        hdulist.close(closed=True)
        if outmodelfn:
            modhdulist = fits.open(outmodelfn, mode='append')
            hdr['EXTNAME'] = hdr['EXTNAME'][:-4] + '_MOD'
            # RICE should be significantly better here and supported in
            # mrdfits?, but compression_type=RICE_1 seems to cause
            # quantize_level to be ignored.
            compkw = {
                'compression_type': 'GZIP_1',
                'quantize_method': 1,
                'quantize_level': -4,
                'tile_size': modelim.shape
            }
            modhdulist.append(fits.CompImageHDU(modelim, hdr, **compkw))
            hdr['EXTNAME'] = hdr['EXTNAME'][:-4] + '_SKY'
            modhdulist.append(fits.CompImageHDU(skyim, hdr, **compkw))
            modhdulist.close(closed=True)
        count += 1
        if count > nproc:
            break
    if profile:
        pr.disable()
        pstats.Stats(pr).sort_stats('cumulative').print_stats(60)
示例#29
0
def create_master_img(imglist,
                      imgtype='',
                      RON=0.,
                      gain=1.,
                      clip=5.,
                      with_errors=True,
                      asint=False,
                      savefiles=True,
                      scalable=False,
                      remove_outliers=True,
                      diffimg=False,
                      noneg=False,
                      timit=False):
    """
    
    NOT CURRENTLY IN USE, BUT SNIPPETS FROM IT ARE USED IN VARIOUS CALIBRATION ROUTINES!!!!!
    
    This routine co-adds spectra from a given input list. It can also remove cosmics etc. by replacing outlier pixels that deviate by more than a certain
    number of sigmas from the median across all images with the mean pixel value of the remaining pixels.
    NOTE: raw images are in units of ADU, but the processed master images are in units of electrons!!!
    NOTE: if 'with_errors' is set to TRUE, then the error array is stored in the same FITS file as a second HDU
    
    INPUT:
    'imglist'           : list of filenames of images to co-add
    'imgtype'           : ['bias' / 'dark' / 'white' / 'stellar'] are valid options
    'RON'               : read-out noise in electrons per pixel
    'gain'              : camera amplifier (inverse) gain in electrons per ADU
    'clip'              : threshold for outlier identification (in sigmas above/below median)
    'with_errors'       : boolean - do you want to save/return the estimated error-array as well?
    'asint'             : boolean - do you want the master image to be rounded to the nearest integer?
    'savefiles'         : boolean - do you want to save the master image as a FITS file?
    'scalable'          : boolean - do you want to make the master image scalable (ie normalize to t_exp = 1s)
    'remove_outliers'   : boolean - do you want to remove outlier pixels (eg cosmics) with the median of the remaining images?
    'diffimg'           : boolean - do you want to save the difference image to a fits file? only works if 'remove_outliers' is also set to TRUE
    'noneg'             : boolean - do you want to allow negative pixel values?
    'timit'             : boolean - measure time taken for completion of function?
    
    OUTPUT:
    'master'            : the master image
    """

    if timit:
        start_time = time.time()

    print('Creating master image from: ' + str(len(imglist)) + ' ' +
          imgtype.upper() + ' images')

    if savefiles or diffimg:
        intstring = ''
        outie_string = ''
        noneg_string = ''
        normstring = ''

    while imgtype.lower() not in [
            'white', 'w', 'dark', 'd', 'bias', 'b', 'stellar', 's'
    ]:
        imgtype = raw_input(
            "WARNING: Image type not specified! What kind of images are they? ['(b)ias' / '(d)ark' / '(w)hite' / '(s)tellar']"
        )

    if imgtype.lower() in ['b', 'bias']:
        RON = 0.

    #proceed if list is not empty
    if len(imglist) > 0:

        if with_errors:
            allerr = []
        if remove_outliers:
            allimg = []
            outie_string = '_outliers_removed'

        for n, file in enumerate(imglist):
            #img = gain * pyfits.getdata(file).T
            img = gain * pyfits.getdata(file)
            if with_errors:
                err_img = np.sqrt(gain * img.astype(float) + RON * RON)
                allerr.append(err_img)
            if remove_outliers:
                allimg.append(img)
            if n == 0:
                h = pyfits.getheader(file)
                master = img.copy().astype(float)
                if remove_outliers:
                    ny, nx = img.shape
                    master_outie_mask = np.zeros((ny, nx), dtype='int')
                    if diffimg:
                        diff = np.zeros((ny, nx), dtype='float')
            else:
                master += img

        #add individual-image errors in quadrature
        if with_errors:
            err_master = np.sqrt(np.sum((np.array(allerr)**2), axis=0))

        if remove_outliers:
            medimg = np.median(np.array(allimg), axis=0)
            #for bias and dark frames just use the median image; for whites use something more sophisticated
            if imgtype[0] in ['b', 'd']:
                #now set master image equal to median image
                master = medimg
                #estimate of the corresponding error array (estimate only!!!)
                if with_errors:
                    err_master = err_master / len(imglist)
            #do THIS for whites
            else:
                #make sure we do not have any negative pixels for the sqrt
                medimgpos = medimg.copy()
                medimgpos[medimg < 0] = 0.
                med_sig_arr = np.sqrt(
                    RON * RON + gain * medimgpos
                )  #expected STDEV for the median image (from LB Eq 2.1)
                #             rms_arr = np.std(np.array(allimg),axis=0)             #but in the very low SNR regime, this takes over, as med_sig_arr will just be RON, and flag a whole bunch of otherwise good pixels
                #             mydev = np.maximum(med_sig_arr,rms_arr)
                #             #check that the median pixel value does not deviate significantly from the minimum pixel value (unlikely!!!)
                #             minimg = np.amin(allimg,axis=0)
                #             min_sig_arr = np.sqrt(RON*RON + gain*minimg)       #expected STDEV for the minimum image (from LB Eq 2.1)
                #             fu_mask = medimg - minimg > clip*min_sig_arr
                for n, img in enumerate(allimg):
                    #outie_mask = np.abs(img - medimg) > clip*med_sig_arr
                    outie_mask = img - medimg > clip * med_sig_arr  #do we only want HIGH outliers, ie cosmics?
                    #save info about which image contributes the outlier pixel using unique binary numbers technique
                    master_outie_mask += (outie_mask * 2**n).astype(int)
                #see which image(s) produced the outlier(s) and replace outies by mean of pixel value from remaining images
                n_outie = np.sum(master_outie_mask > 0)
                print('Correcting ' + str(n_outie) + ' outliers...')
                #loop over all outliers
                for i, j in zip(
                        np.nonzero(master_outie_mask)[0],
                        np.nonzero(master_outie_mask)[1]):
                    #access binary numbers and retrieve component(s)
                    outnum = binary_indices(
                        master_outie_mask[i, j]
                    )  #these are the indices (within allimg) of the images that contain outliers
                    dumix = np.arange(len(imglist))
                    #remove the images containing the outliers in order to compute mean from the remaining images
                    useix = np.delete(dumix, outnum)
                    if diffimg:
                        diff[i, j] = master[i, j] - (
                            len(outnum) *
                            np.mean(np.array([allimg[q][i, j]
                                              for q in useix])) +
                            np.sum(np.array([allimg[q][i, j] for q in useix])))
                    #now replace value in master image by the sum of all pixel values in the unaffected pixels
                    #plus the number of affected images times the mean of the pixel values in the unaffected images
                    master[i, j] = len(outnum) * np.mean(
                        np.array([allimg[q][i, j] for q in useix])) + np.sum(
                            np.array([allimg[q][i, j] for q in useix]))
                #once we have finished correcting the outliers, we want to "normalize" (ie divide by number of frames) the master image
                master = master / len(imglist)
                if with_errors:
                    err_master = err_master / len(imglist)

        #if not remove outliers, still need to "normalize" (ie divide by number of frames)
        else:
            master = master / len(imglist)
            if with_errors:
                err_master = err_master / len(imglist)

        if scalable:
            texp = pyfits.getval(imglist[0], 'exptime')
            master = master / texp
            if with_errors:
                err_master = err_master / texp
            normstring = normstring + '_scalable'

        if noneg:
            master[master < 0] = 0.
            noneg_string = '_noneg'

        if asint:
            master = np.round(master).astype(int)
            if with_errors:
                err_master = np.round(err_master).astype(int)
            intstring = '_int'

        if diffimg:
            hdiff = h.copy()
            dum = file.split('/')
            path = file[:-len(dum[-1])]
            hdiff[
                'HISTORY'] = '   DIFFERENCE IMAGE - created ' + time.strftime(
                    "%Y-%m-%d %H:%M:%S", time.gmtime()) + ' (GMT)'
            hdiff['COMMENT'] = 'Results are in units of ELECTRONS'
            pyfits.writeto(path + 'master_' + imgtype.lower() + normstring +
                           intstring + '_diffimg.fits',
                           diff,
                           hdiff,
                           clobber=True)

        if savefiles:
            dum = file.split('/')
            path = file[:-len(dum[-1])]
            #             while imgtype.lower() not in ['white','dark','bias']:
            #                 imgtype = raw_input("WARNING: Image type not specified! What kind of images are they ['white' / 'dark' / 'bias']: ")
            h['HISTORY'] = '   MASTER ' + imgtype.upper(
            ) + ' - created ' + time.strftime("%Y-%m-%d %H:%M:%S",
                                              time.gmtime()) + ' (GMT)'
            hdiff['COMMENT'] = 'Results are in units of ELECTRONS'
            pyfits.writeto(path + 'master_' + imgtype.lower() + normstring +
                           outie_string + intstring + noneg_string + '.fits',
                           master,
                           h,
                           clobber=True)
            #save error array in second HDU
            if with_errors:
                h_err = h.copy()
                h_err[
                    'LEGEND'] = 'estimated uncertainty in MASTER ' + imgtype.upper(
                    )
                pyfits.append(path + 'master_' + imgtype.lower() + normstring +
                              outie_string + intstring + noneg_string +
                              '.fits',
                              err_master,
                              h_err,
                              clobber=True)

    else:
        print('WARNING: empty input list')
        return

    print('DONE!!!')

    if timit:
        print('Elapsed time: ' + str(np.round(time.time() - start_time, 2)) +
              ' seconds')

    if with_errors:
        return master, err_master
    else:
        return master
示例#30
0
def lc_from_bulk_download(fits_path, target_list, fname_out, fname_targets,
                          fname_notes, path, fname_flagged,
                          custom_mask=[], apply_nan_mask=False):
    '''This function opens each _lc.fits file in fits_path, masks flagged data
    points from QUALITY and saves interpolated PDCSAP_FLUX, TIME and TICID to
    fits file fname_out.
    Parameters:
        * fits_path : directory containing all light curve fits files 
                      (ending with '/')
        * target_list : returned by lc_by_camera_ccd()
        * fname_out : name of fits file to save time, flux and ticids into
        * fname_targets : saves ticid of every target saved 
        * fname_notes : saves the ticid of any target it fails on
        * path : directory to save nan mask plots in
        * fname_flagged : name of fits file to save flagged light curves
    Returns:
        * confirmation : boolean, returns False if failure
    '''
    import fnmatch
    import gc
    
    # >> get list of all light curve fits files
    fnames_all = os.listdir(fits_path)
    fnames = fnmatch.filter(fnames_all, '*fits*')
    
    # >> find all light curves in a group
    fnames_group = []
    target_list = target_list[:,0]
    for target in target_list:
        try:
            fname = list(filter(lambda x: str(int(target)) in x, fnames))[0]
            fnames_group.append(fname)
        except:
            print('Missing ' + str(int(target)))
            with open(fname_notes, 'a') as f:
                f.write(str(int(target)) + '\n')
        
    fnames = fnames_group
    count = 0
    
    ticid_list = []
    intensity = []
    for n in range(len(fnames)):
        
        file = fnames[n]
        print(count)
        
        # >> open file
        with fits.open(fits_path + file, memmap=False) as hdul:
            hdu_data = hdul[1].data
            
            # >> get time array (only for the first light curve)
            if n == 0: 
                time = hdu_data['TIME']
                
            # >> get flux array
            i = hdu_data['PDCSAP_FLUX']
            intensity.append(i)
            
            # >> get quality mask
            quality = hdu_data['QUALITY']
            flagged_inds = np.nonzero(quality)
            i[flagged_inds] = np.nan
            
            # >> get ticid
            ticid = hdul[1].header['TICID']
            ticid_list.append(ticid)
            with open(fname_targets, 'a') as f:
                f.write(str(int(ticid)) + '\n')
            
            # >> clear memory
            del hdu_data
            del hdul[1].data
            del hdul[0].data
            gc.collect()
            
        count += 1
    
    # >> interpolate and NaN mask
    print('Interpolating...')
    intensity = np.array(intensity)
    ticid_list = np.array(ticid_list)
    intensity_interp, time, ticid_interp, flagged, ticid_flagged = \
        interpolate_all(intensity, time, ticid_list, custom_mask=custom_mask,
                        apply_nan_mask=apply_nan_mask)
    
    # >> save time array, intensity array and ticids to fits file
    print('Saving to fits file...')
    
    hdr = fits.Header()
    hdu = fits.PrimaryHDU(time, header=hdr)
    hdu.writeto(fname_out)
    fits.append(fname_out, intensity_interp)
    fits.append(fname_out, ticid_interp)
    # >> actually i'm going to save the raw intensities just in case
    fits.append(fname_out, intensity)
    
    # >> save flagged
    if np.shape(flagged)[0] != 0:
        hdr = fits.Header()
        hdu = fits.PrimaryHDU(flagged, header=hdr)
        hdu.writeto(fname_flagged)
        fits.append(fname_flagged, ticid_flagged)
    
    print("lc_from_bulk_download has finished running")
    return time, intensity_interp, ticid_interp, flagged, ticid_flagged
示例#31
0
def write_fits(filename, data, header, extension, extname, comm):
    """
    Collectively write local arrays into a single FITS file.

    Parameters
    ----------
    filename : str
        The FITS file name.
    data : ndarray
        The array to be written.
    header : pyfits.Header
        The data FITS header. None can be set, in which case a minimal FITS
        header will be inferred from the data.
    extension : boolean
        If True, the data will be written as an extension to an already
        existing FITS file.
    extname : str
        The FITS extension name. Use None to write the primary HDU.
    comm : mpi4py.Comm
        The MPI communicator of the local arrays. Use MPI.COMM_SELF if the data
        are not meant to be combined into a global array. Make sure that the
        MPI processes are not executing this routine with the same file name.

    """
    # check if the file name is the same for all MPI jobs
    files = comm.allgather(filename + str(extname))
    all_equal = all(f == files[0] for f in files)
    if comm.size > 1 and not all_equal:
        raise ValueError('The file name is not the same for all MPI jobs.')
    ndims = comm.allgather(data.ndim)
    if any(n != ndims[0] for n in ndims):
        raise ValueError("The arrays have an incompatible number of dimensions"
                         ": '{0}'.".format(', '.join(str(n) for n in ndims)))
    ndim = ndims[0]
    shapes = comm.allgather(data.shape)
    if any(s[1:] != shapes[0][1:] for s in shapes):
        raise ValueError("The arrays have incompatible shapes: '{0}'.".format(
            strshape(shapes)))

    # get header
    if header is None:
        header = create_fitsheader_for(data, extname=extname)
    else:
        header = header.copy()
    if extname is not None:
        header['extname'] = extname

    # we remove the file first to avoid an annoying pyfits informative message
    if not extension:
        if comm.rank == 0:
            try:
                os.remove(filename)
            except OSError:
                pass

    # case without MPI communication
    if comm.size == 1:
        if not extension:
            hdu = pyfits.PrimaryHDU(data, header)
            hdu.writeto(filename, overwrite=True)
        else:
            pyfits.append(filename, data, header)
        return

    # get global/local parameters
    nglobal = sum(s[0] for s in shapes)
    s = split(nglobal, comm.size, comm.rank)
    nlocal = s.stop - s.start
    if data.shape[0] != nlocal:
        raise ValueError(
            "On rank {}, the local array shape '{}' is invalid. T"
            "he first dimension does not match the expected local"
            " number '{}' given the global number '{}'.{}".format(
                comm.rank, data.shape, nlocal, nglobal,
                '' if comm.rank > 0 else ' Shapes are: {}.'.format(shapes)))

    # write FITS header
    if comm.rank == 0:
        header['NAXIS' + str(ndim)] = nglobal
        shdu = pyfits.StreamingHDU(filename, header)
        data_loc = shdu._datLoc
        shdu.close()
    else:
        data_loc = None
    data_loc = comm.bcast(data_loc)

    # get a communicator excluding the processes which have no work to do
    # (Create_subarray does not allow 0-sized subarrays)
    chunk = product(data.shape[1:])
    rank_nowork = min(comm.size, nglobal)
    group = comm.Get_group()
    group.Incl(list(range(rank_nowork)))
    newcomm = comm.Create(group)

    # collectively write data
    if comm.rank < rank_nowork:
        # mpi4py 1.2.2: pb with viewing data as big endian KeyError '>d'
        if sys.byteorder == 'little' and data.dtype.byteorder == '=' or \
           data.dtype.byteorder == '<':
            data = data.byteswap()
        data = data.newbyteorder('=')
        mtype = DTYPE_MAP[data.dtype]
        ftype = mtype.Create_subarray([nglobal * chunk], [nlocal * chunk],
                                      [s.start * chunk])
        ftype.Commit()
        f = MPI.File.Open(newcomm,
                          filename,
                          amode=MPI.MODE_APPEND | MPI.MODE_WRONLY
                          | MPI.MODE_CREATE)
        f.Set_view(data_loc, mtype, ftype, 'native', MPI.INFO_NULL)
        f.Write_all(data)
        f.Close()
        ftype.Free()
    newcomm.Free()

    # pad FITS file with zeros
    if comm.rank == 0:
        datasize = nglobal * chunk * data.dtype.itemsize
        BLOCK_SIZE = 2880
        padding = BLOCK_SIZE - (datasize % BLOCK_SIZE)
        with open(filename, 'a') as f:
            if f.tell() - data_loc != datasize:
                raise RuntimeError('Unexpected file size.')
            f.write(padding * '\0')

    comm.Barrier()
示例#32
0
                           basedir,
                           uncompressed=args.uncompressed)
    else:
        print('No bright star catalog, not marking bright stars.')

    res = process(im,
                  sqivar,
                  flag,
                  psf,
                  refit_psf=args.refit_psf,
                  verbose=args.verbose,
                  nx=4,
                  ny=4,
                  derivcentroids=True)
    outfn = args.outfn[0]

    x = (res[0])['x']
    y = (res[0])['y']

    wcs = wcs.WCS(hdr)
    ra, dec = wcs.all_pix2world(y, x, 0)

    import numpy.lib.recfunctions as rfn
    cat = rfn.append_fields(res[0], ['ra', 'dec'], [ra, dec])

    fits.writeto(outfn, cat)
    fits.append(outfn, res[1])
    fits.append(outfn, res[2])
    psfext = numpy.array([tpsf(0, 0, stampsz=19) for tpsf in res[3]])
    fits.append(outfn, psfext)
def fits_db(fits_address, model_db, ext_name='', header=None):

    line_labels = model_db['inputs']['line_list']
    params_traces = model_db['outputs']

    sec_label = 'synthetic_fluxes' if ext_name == '' else f'{ext_name}_synthetic_fluxes'

    # ---------------------------------- Input data

    # Data
    list_columns = []
    for data_label, data_format in FITS_INPUTS_EXTENSION.items():
        data_array = model_db['inputs'][data_label]
        data_col = fits.Column(name=data_label, format=data_format, array=data_array)
        list_columns.append(data_col)

    # Header
    hdr_dict = {}
    for i_line, lineLabel in enumerate(line_labels):
        hdr_dict[f'hierarch {lineLabel}'] = model_db['inputs']['line_fluxes'][i_line]
        hdr_dict[f'hierarch {lineLabel}_err'] = model_db['inputs']['line_err'][i_line]

    # User values:
    for key, value in header.items():
        if key not in ['logP_values', 'r_hat']:
            hdr_dict[f'hierarch {key}'] = value

    # Inputs extension
    cols = fits.ColDefs(list_columns)
    sec_label = 'inputs' if ext_name == '' else f'{ext_name}_inputs'
    hdu_inputs = fits.BinTableHDU.from_columns(cols, name=sec_label, header=fits.Header(hdr_dict))

    # ---------------------------------- Output data
    params_list = model_db['inputs']['parameter_list']
    param_matrix = np.array([params_traces[param] for param in params_list])
    param_col = fits.Column(name='parameters_list', format=FITS_OUTPUTS_EXTENSION['parameter_list'], array=params_list)
    param_val = fits.Column(name='parameters_fit', format='E', array=param_matrix.mean(axis=1))
    param_err = fits.Column(name='parameters_err', format='E', array=param_matrix.std(axis=1))
    list_columns = [param_col, param_val, param_err]

    # Header
    hdr_dict = {}
    for i, param in enumerate(params_list):
        param_trace = params_traces[param]
        hdr_dict[f'hierarch {param}'] = np.mean(param_trace)
        hdr_dict[f'hierarch {param}_err'] = np.std(param_trace)

    for lineLabel in line_labels:
        param_trace = params_traces[lineLabel]
        hdr_dict[f'hierarch {lineLabel}'] = np.mean(param_trace)
        hdr_dict[f'hierarch {lineLabel}_err'] = np.std(param_trace)

    # # Data
    # param_array = np.array(list(params_traces.keys()))
    # paramMatrix = np.array([params_traces[param] for param in param_array])
    #
    # list_columns.append(fits.Column(name='parameter', format='20A', array=param_array))
    # list_columns.append(fits.Column(name='mean', format='E', array=np.mean(paramMatrix, axis=0)))
    # list_columns.append(fits.Column(name='std', format='E', array=np.std(paramMatrix, axis=0)))
    # list_columns.append(fits.Column(name='median', format='E', array=np.median(paramMatrix, axis=0)))
    # list_columns.append(fits.Column(name='p16th', format='E', array=np.percentile(paramMatrix, 16, axis=0)))
    # list_columns.append(fits.Column(name='p84th', format='E', array=np.percentile(paramMatrix, 84, axis=0)))

    cols = fits.ColDefs(list_columns)
    sec_label = 'outputs' if ext_name == '' else f'{ext_name}_outputs'
    hdu_outputs = fits.BinTableHDU.from_columns(cols, name=sec_label, header=fits.Header(hdr_dict))

    # ---------------------------------- traces data
    list_columns = []

    # Data
    for param, trace_array in params_traces.items():
        col_trace = fits.Column(name=param, format='E', array=params_traces[param])
        list_columns.append(col_trace)

    cols = fits.ColDefs(list_columns)

    # Header fitting properties
    hdr_dict = {}
    for stats_dict in ['logP_values', 'r_hat']:
        for key, value in header[stats_dict].items():
            hdr_dict[f'hierarch {key}_{stats_dict}'] = value

    sec_label = 'traces' if ext_name == '' else f'{ext_name}_traces'
    hdu_traces = fits.BinTableHDU.from_columns(cols, name=sec_label, header=fits.Header(hdr_dict))

    # ---------------------------------- Save fits files
    hdu_list = [hdu_inputs, hdu_outputs, hdu_traces]

    if fits_address.is_file():
        for hdu in hdu_list:
            try:
                fits.update(fits_address, data=hdu.data, header=hdu.header, extname=hdu.name, verify=True)
            except KeyError:
                fits.append(fits_address, data=hdu.data, header=hdu.header, extname=hdu.name)
    else:
        hdul = fits.HDUList([fits.PrimaryHDU()] + hdu_list)
        hdul.writeto(fits_address, overwrite=True, output_verify='fix')

    return
示例#34
0
def keppixseries(infile, outfile, plotfile=None, plottype='global',
                 filterlc=False, function='boxcar', cutoff=1.0, overwrite=False,
                 verbose=False, logfile='keppixseries.log'):
    """
    keppixseries -- individual time series photometry for all pixels within a
    target mask

    keppixseries plots a light curve for each individual pixel in a target
    mask. Light curves are extracted from a target pixel file obtained from the
    Kepler data archive at MAST. If required, the data can be fed through a
    boxcar, gaussian or sinc function high bandpass filter in order to remove
    low frequency signal from the data. keppixseries is a diagnostic tool for
    identifying source contaminants in the background or foreground of the
    target. It can be employed to identify pixels for inclusion or exclusion
    when re-extracting a Kepler light curve from target pixel files.

    Parameters
    ----------
    infile : str
        The name of a MAST standard format FITS file containing Kepler Target
        Pixel data within the first data extension.
    outfile : str
        The name of the output FITS file. This file has two data extensions.
        The first called 'PIXELSERIES' contains a table with columns of
        barycenter-corrected time, barycenter time correction, cadence number,
        cadence quality flag and a series of photometric light curves, one for
        each pixel within the target mask. Each pixel is labeled COLx_ROWy,
        where :math:`x` is the pixel column number and :math:`y` is the pixel
        row number on the CCD module/output. The second extension contains the
        mask definition map copied directly from the input target pixel file.
    plotfile : str
        Name of an optional diagnostic output plot file containing the results
        of keppixseries. An example is provided in Figure 1. Typically this is
        a PNG format file. If no diagnostic file is required, plotfile can be
        'None'. The plot will be generated regardless of the value of this
        field, but the plot will not be saved to a file if ``plotfile='None'``.
    plottype : str
        keppixseries can plot light curves of three types.
        The choice is made using this argument. The options are:

        * local - All individual pixel light curves are scaled separately to
          provide the most dynamic range for each pixel.

        * global - All pixel light curves are scaled between zero and the
          maximum flux attained by the brightest pixel in the mask. This option
          provides the relative contribution to the archived light curve by each
          pixel.

        * full - All pixels light curves are scaled between zero and the
          maximum flux attained by that pixel. This provides the fraction of
          variability within each individual pixel.
    filterlc : bool
        If True, the light curve for each pixel will be treated by a high
        band-pass filter to remove long-term trends from e.g. differential
        velocity aberration.
    function : str
        The functional form of the high pass-band filter:

        * boxcar

        * gauss

        * sinc
    cutoff : float
        The frequency of the high pass-band cutoff in units of :math:`days^{-1}`.
    overwrite : bool
        Overwrite the output file?
    verbose : bool
        Print informative messages and warnings to the shell and logfile?
    logfile = str
        Name of the logfile containing error and warning messages.

    Examples
    --------
    .. code-block :: bash

        $ keppixseries kplr008256049-2010174085026_lpd-targ.fits.gz keppixseries.fits

    .. image:: ../_static/images/api/keppixseries.png
        :align: center
    """

    # log the call
    hashline = '--------------------------------------------------------------'
    kepmsg.log(logfile, hashline, verbose)
    call = ('KEPPIXSERIES -- '
            + ' infile={}'.format(infile)
            + ' outfile={}'.format(outfile)
            + ' plotfile={}'.format(plotfile)
            + ' plottype={}'.format(plottype)
            + ' filterlc={}'.format(filterlc)
            + ' function={}'.format(function)
            + ' cutoff={}'.format(cutoff)
            + ' overwrite={}'.format(overwrite)
            + ' verbose={}'.format(verbose)
            + ' logfile={}'.format(logfile))
    kepmsg.log(logfile, call+'\n', verbose)

    # start time
    kepmsg.clock('KEPPIXSERIES started at', logfile, verbose)

    # overwrite output file
    if overwrite:
        kepio.overwrite(outfile, logfile, verbose)
    if kepio.fileexists(outfile):
        errmsg = ('ERROR -- KEPPIXSERIES: {} exists. Use --overwrite'
                  .format(outfile))
        kepmsg.err(logfile, errmsg, verbose)

    # open TPF FITS file
    kepid, channel, skygroup, module, output, quarter, season, \
        ra, dec, column, row, kepmag, xdim, ydim, barytime = \
        kepio.readTPF(infile, 'TIME', logfile, verbose)
    kepid, channel, skygroup, module, output, quarter, season, \
        ra, dec, column, row, kepmag, xdim, ydim, tcorr = \
        kepio.readTPF(infile, 'TIMECORR', logfile, verbose)
    kepid, channel, skygroup, module, output, quarter, season, \
        ra, dec, column, row, kepmag, xdim, ydim, cadno = \
        kepio.readTPF(infile, 'CADENCENO', logfile, verbose)
    kepid, channel, skygroup, module, output, quarter, season, \
        ra, dec, column, row, kepmag, xdim, ydim, fluxpixels = \
        kepio.readTPF(infile, 'FLUX', logfile, verbose)
    kepid, channel, skygroup, module, output, quarter, season, \
        ra, dec, column, row, kepmag, xdim, ydim, errpixels = \
        kepio.readTPF(infile, 'FLUX_ERR', logfile, verbose)
    kepid, channel, skygroup, module, output, quarter, season, \
        ra, dec, column, row, kepmag, xdim, ydim, qual = \
        kepio.readTPF(infile, 'QUALITY', logfile, verbose)

    # read mask defintion data from TPF file
    maskimg, pixcoord1, pixcoord2 = kepio.readMaskDefinition(infile, logfile,
                                                             verbose)
    # print target data
    print('')
    print('      KepID:  {}'.format(kepid))
    print(' RA (J2000):  {}'.format(ra))
    print('Dec (J2000): {}'.format(dec))
    print('     KepMag:  {}'.format(kepmag))
    print('   SkyGroup:    {}'.format(skygroup))
    print('     Season:    {}'.format(season))
    print('    Channel:    {}'.format(channel))
    print('     Module:    {}'.format(module))
    print('     Output:     {}'.format(output))
    print('')
    # how many quality = 0 rows?
    npts = 0
    nrows = len(fluxpixels)
    for i in range(nrows):
        if (qual[i] == 0 and np.isfinite(barytime[i])
            and np.isfinite(fluxpixels[i, ydim * xdim // 2])):
            npts += 1
    time = np.empty((npts))
    timecorr = np.empty((npts))
    cadenceno = np.empty((npts))
    quality = np.empty((npts))
    pixseries = np.empty((ydim, xdim, npts))
    errseries = np.empty((ydim, xdim, npts))

    # construct output light curves
    nptsx = 0
    for i in tqdm(range(ydim)):
        for j in range(xdim):
            npts = 0
            for k in range(nrows):
                if (qual[k] == 0 and np.isfinite(barytime[k])
                    and np.isfinite(fluxpixels[k, int(ydim*xdim/2)])):
                    time[npts] = barytime[k]
                    timecorr[npts] = tcorr[k]
                    cadenceno[npts] = cadno[k]
                    quality[npts] = qual[k]
                    pixseries[i, j, npts] = fluxpixels[k, nptsx]
                    errseries[i, j, npts] = errpixels[k, nptsx]
                    npts += 1
            nptsx += 1
    # define data sampling
    if filterlc:
        tpf = pyfits.open(infile)
        cadence = kepkey.cadence(tpf[1], infile, logfile, verbose)
        tr = 1.0 / (cadence / 86400)
        timescale = 1.0 / (cutoff / tr)

        # define convolution function
        if function == 'boxcar':
            filtfunc = np.ones(int(np.ceil(timescale)))
        elif function == 'gauss':
            timescale /= 2
            dx = np.ceil(timescale * 10 + 1)
            filtfunc = filtfunc([1.0, dx / 2 - 1.0, timescale],
                                np.linspace(0, dx - 1, dx))
        elif function == 'sinc':
            dx = np.ceil(timescale * 12 + 1)
            fx = np.linspace(0, dx - 1, dx)
            fx = fx - dx / 2 + 0.5
            fx /= timescale
            filtfunc = np.sinc(fx)
        filtfunc /= np.sum(filtfunc)

        # pad time series at both ends with noise model
        for i in range(ydim):
            for j in range(xdim):
                ave, sigma = (np.mean(pixseries[i, j, :len(filtfunc)]),
                              np.std(pixseries[i, j, :len(filtfunc)]))
                padded = np.append(kepstat.randarray(np.ones(len(filtfunc)) * ave,
                                   np.ones(len(filtfunc)) * sigma), pixseries[i, j, :])
                ave, sigma = (np.mean(pixseries[i, j, -len(filtfunc):]),
                              np.std(pixseries[i, j, -len(filtfunc):]))
                padded = np.append(padded,
                                   kepstat.randarray(np.ones(len(filtfunc)) * ave,
                                   np.ones(len(filtfunc)) * sigma))
                # convolve data
                convolved = np.convolve(padded, filtfunc, 'same')
                # remove padding from the output array
                outdata = convolved[len(filtfunc): -len(filtfunc)]
                # subtract low frequencies
                outmedian = np.median(outdata)
                pixseries[i, j, :] = pixseries[i, j, :] - outdata + outmedian

    # construct output file
    if ydim * xdim < 1000:
        instruct = pyfits.open(infile, 'readonly')
        kepkey.history(call, instruct[0], outfile, logfile, verbose)
        hdulist = pyfits.HDUList(instruct[0])
        cols = []
        cols.append(pyfits.Column(name='TIME', format='D',
                                  unit='BJD - 2454833', disp='D12.7',
                                  array=time))
        cols.append(pyfits.Column(name='TIMECORR', format='E', unit='d',
                                  disp='E13.6', array=timecorr))
        cols.append(pyfits.Column(name='CADENCENO', format='J', disp='I10',
                                  array=cadenceno))
        cols.append(pyfits.Column(name='QUALITY', format='J', array=quality))
        for i in range(ydim):
            for j in range(xdim):
                colname = 'COL{}_ROW{}'.format(i + column, j + row)
                cols.append(pyfits.Column(name=colname, format='E',
                                          disp='E13.6',
                                          array=pixseries[i, j, :]))
        hdu1 = pyfits.BinTableHDU.from_columns(pyfits.ColDefs(cols))
        try:
            hdu1.header['INHERIT'] = (True, 'inherit the primary header')
        except:
            pass
        try:
            hdu1.header['EXTNAME'] = ('PIXELSERIES', 'name of extension')
        except:
            pass
        try:
            hdu1.header['EXTVER' ] = (instruct[1].header['EXTVER'],
                                      'extension version number (not format version)')
        except:
            pass
        try:
            hdu1.header['TELESCOP'] = (instruct[1].header['TELESCOP'],
                                       'telescope')
        except:
            pass
        try:
            hdu1.header['INSTRUME'] = (instruct[1].header['INSTRUME'],
                                       'detector type')
        except:
            pass
        try:
            hdu1.header['OBJECT' ] = (instruct[1].header['OBJECT'],
                                      'string version of KEPLERID')
        except:
            pass
        try:
            hdu1.header['KEPLERID'] = (instruct[1].header['KEPLERID'],
                                       'unique Kepler target identifier')
        except:
            pass
        try:
            hdu1.header['RADESYS'] = (instruct[1].header['RADESYS'],
                                      'reference frame of celestial coordinates')
        except:
            pass
        try:
            hdu1.header['RA_OBJ' ] = (instruct[1].header['RA_OBJ'],
                                      '[deg] right ascension from KIC')
        except:
            pass
        try:
            hdu1.header['DEC_OBJ'] = (instruct[1].header['DEC_OBJ'],
                                      '[deg] declination from KIC')
        except:
            pass
        try:
            hdu1.header['EQUINOX'] = (instruct[1].header['EQUINOX'],
                                      'equinox of celestial coordinate system')
        except:
            pass
        try:
            hdu1.header['TIMEREF'] = (instruct[1].header['TIMEREF'],
                                      'barycentric correction applied to times')
        except:
            pass
        try:
            hdu1.header['TASSIGN'] = (instruct[1].header['TASSIGN'],
                                      'where time is assigned')
        except:
            pass
        try:
            hdu1.header['TIMESYS'] = (instruct[1].header['TIMESYS'],
                                      'time system is barycentric JD')
        except:
            pass
        try:
            hdu1.header['BJDREFI'] = (instruct[1].header['BJDREFI'],
                                      'integer part of BJD reference date')
        except:
            pass
        try:
            hdu1.header['BJDREFF'] = (instruct[1].header['BJDREFF'],
                                      'fraction of the day in BJD reference date')
        except:
            pass
        try:
            hdu1.header['TIMEUNIT'] = (instruct[1].header['TIMEUNIT'],
                                       'time unit for TIME, TSTART and TSTOP')
        except:
            pass
        try:
            hdu1.header['TSTART'] = (instruct[1].header['TSTART'],
                                     'observation start time in BJD-BJDREF')
        except:
            pass
        try:
            hdu1.header['TSTOP'] = (instruct[1].header['TSTOP'],
                                    'observation stop time in BJD-BJDREF')
        except:
            pass
        try:
            hdu1.header['LC_START'] = (instruct[1].header['LC_START'],
                                       'mid point of first cadence in MJD')
        except:
            pass
        try:
            hdu1.header['LC_END'] = (instruct[1].header['LC_END'],
                                       'mid point of last cadence in MJD')
        except:
            pass
        try:
            hdu1.header['TELAPSE'] = (instruct[1].header['TELAPSE'],
                                       '[d] TSTOP - TSTART')
        except:
            pass
        try:
            hdu1.header['LIVETIME'] = (instruct[1].header['LIVETIME'],
                                       '[d] TELAPSE multiplied by DEADC')
        except:
            pass
        try:
            hdu1.header['EXPOSURE'] = (instruct[1].header['EXPOSURE'],
                                       '[d] time on source')
        except:
            pass
        try:
            hdu1.header['DEADC'] = (instruct[1].header['DEADC'],
                                    'deadtime correction')
        except:
            pass
        try:
            hdu1.header['TIMEPIXR'] = (instruct[1].header['TIMEPIXR'],
                                       'bin time beginning=0 middle=0.5 end=1')
        except:
            pass
        try:
            hdu1.header['TIERRELA'] = (instruct[1].header['TIERRELA'],
                                       '[d] relative time error')
        except:
            pass
        try:
            hdu1.header['TIERABSO'] = (instruct[1].header['TIERABSO'],
                                       '[d] absolute time error')
        except:
            pass
        try:
            hdu1.header['INT_TIME'] = (instruct[1].header['INT_TIME'],
                                       '[s] photon accumulation time per frame')
        except:
            pass
        try:
            hdu1.header['READTIME'] = (instruct[1].header['READTIME'],
                                       '[s] readout time per frame')
        except:
            pass
        try:
            hdu1.header['FRAMETIM'] = (instruct[1].header['FRAMETIM'],
                                       '[s] frame time (INT_TIME + READTIME)')
        except:
            pass
        try:
            hdu1.header['NUM_FRM'] = (instruct[1].header['NUM_FRM'],
                                      'number of frames per time stamp')
        except:
            pass
        try:
            hdu1.header['TIMEDEL'] = (instruct[1].header['TIMEDEL'],
                                      '[d] time resolution of data')
        except:
            pass
        try:
            hdu1.header['DATE-OBS'] = (instruct[1].header['DATE-OBS'],
                                       'TSTART as UTC calendar date')
        except:
            pass
        try:
            hdu1.header['DATE-END'] = (instruct[1].header['DATE-END'],
                                       'TSTOP as UTC calendar date')
        except:
            pass
        try:
            hdu1.header['BACKAPP'] = (instruct[1].header['BACKAPP'],
                                      'background is subtracted')
        except:
            pass
        try:
            hdu1.header['DEADAPP'] = (instruct[1].header['DEADAPP'],
                                      'deadtime applied')
        except:
            pass
        try:
            hdu1.header['VIGNAPP'] = (instruct[1].header['VIGNAPP'],
                                      'vignetting or collimator correction applied')
        except:
            pass
        try:
            hdu1.header['GAIN'] = (instruct[1].header['GAIN'],
                                   '[electrons/count] channel gain')
        except:
            pass
        try:
            hdu1.header['READNOIS'] = (instruct[1].header['READNOIS'],
                                       '[electrons] read noise')
        except:
            pass
        try:
            hdu1.header['NREADOUT'] = (instruct[1].header['NREADOUT'],
                                       'number of read per cadence')
        except:
            pass
        try:
            hdu1.header['TIMSLICE'] = (instruct[1].header['TIMSLICE'],
                                       'time-slice readout sequence section')
        except:
            pass
        try:
            hdu1.header['MEANBLCK'] = (instruct[1].header['MEANBLCK'],
                                       '[count] FSW mean black level')
        except:
            pass
        hdulist.append(hdu1)
        hdulist.writeto(outfile)
        kepkey.new('EXTNAME', 'APERTURE', 'name of extension', instruct[2],
                   outfile, logfile, verbose)
        pyfits.append(outfile, instruct[2].data, instruct[2].header)
        instruct.close()
    else:
        warnmsg = ('WARNING -- KEPPIXSERIES: output FITS file requires > 999'
                   'columns. Non-compliant with FITS convention.')
        kepmsg.warn(logfile, warnmsg)

    # plot pixel array
    fmin = 1.0e33
    fmax = -1.033
    plt.figure()
    plt.clf()
    dx = 0.93 / xdim
    dy = 0.94 / ydim
    ax = plt.axes([0.06, 0.05, 0.93, 0.94])
    plt.gca().xaxis.set_major_formatter(plt.ScalarFormatter(useOffset=False))
    plt.gca().yaxis.set_major_formatter(plt.ScalarFormatter(useOffset=False))
    plt.gca().xaxis.set_major_locator(plt.MaxNLocator(integer=True))
    plt.gca().yaxis.set_major_locator(plt.MaxNLocator(integer=True))
    plt.xlim(np.min(pixcoord1) - 0.5, np.max(pixcoord1) + 0.5)
    plt.ylim(np.min(pixcoord2) - 0.5, np.max(pixcoord2) + 0.5)
    plt.xlabel('time', {'color' : 'k'})
    plt.ylabel('arbitrary flux', {'color' : 'k'})
    for i in range(ydim):
        for j in range(xdim):
            tmin = np.amin(time)
            tmax = np.amax(time)
            try:
                np.isfinite(np.amin(pixseries[i, j, :]))
                np.isfinite(np.amin(pixseries[i, j, :]))
                fmin = np.amin(pixseries[i, j, :])
                fmax = np.amax(pixseries[i, j, :])
            except:
                ugh = 1
            xmin = tmin - (tmax - tmin) / 40
            xmax = tmax + (tmax - tmin) / 40
            ymin = fmin - (fmax - fmin) / 20
            ymax = fmax + (fmax - fmin) / 20
            if kepstat.bitInBitmap(maskimg[i, j], 2):
                plt.axes([0.06 + float(j) * dx, 0.05 + i * dy, dx, dy],
                         facecolor='lightslategray')
            elif maskimg[i, j] == 0:
                plt.axes([0.06 + float(j) * dx, 0.05 + i * dy, dx, dy],
                         facecolor='black')
            else:
                plt.axes([0.06 + float(j) * dx, 0.05 + i * dy, dx, dy])
            if j == int(xdim / 2) and i == 0:
                plt.setp(plt.gca(), xticklabels=[], yticklabels=[])
            elif j == 0 and i == int(ydim / 2):
                plt.setp(plt.gca(), xticklabels=[], yticklabels=[])
            else:
                plt.setp(plt.gca(), xticklabels=[], yticklabels=[])
            ptime = time * 1.0
            ptime = np.insert(ptime, [0], ptime[0])
            ptime = np.append(ptime, ptime[-1])
            pflux = pixseries[i, j, :] * 1.0
            pflux = np.insert(pflux, [0], -1000.0)
            pflux = np.append(pflux, -1000.0)
            plt.plot(time,pixseries[i, j, :], color='#0000ff', linestyle='-',
                     linewidth=0.5)
            if not kepstat.bitInBitmap(maskimg[i, j], 2):
                plt.fill(ptime, pflux, fc='lightslategray', linewidth=0.0,
                         alpha=1.0)
            plt.fill(ptime, pflux, fc='#FFF380', linewidth=0.0,alpha=1.0)
            if 'loc' in plottype:
                plt.xlim(xmin, xmax)
                plt.ylim(ymin, ymax)
            if 'glob' in plottype:
                plt.xlim(xmin, xmax)
                plt.ylim(1.0e-10, np.nanmax(pixseries) * 1.05)
            if 'full' in plottype:
                plt.xlim(xmin, xmax)
                plt.ylim(1.0e-10, ymax * 1.05)

    # render plot
    plt.show()
    plt.savefig(plotfile)

    # stop time
    kepmsg.clock('KEPPIXSERIES ended at', logfile, verbose)
示例#35
0
    def split_fits(filename=None, split_dir='.', size_limit = 1024.0):
        fits_list = []
        filestat = os.stat(filename)
        filesize = filestat.st_size/(1024.0**2)
        if filesize <= size_limit:
            logger.error("This file is only %f MB. It is smaller than our size limit %f MB, no split needed."%(filesize, size_limit))
            return []
        try:
            bighdulist = pyfits.open(filename, memmap=True)
            first_row = bighdulist[0]
            header = first_row.header
        except:
            raise IOError("Error encountered when trying to open FITS file %s"%filename)
            return []

        fn = filename[filename.rfind('/')+1:filename.rfind('.fits')]
        deltaf = header['DELTAF']
        fftlen = header['NAXIS1']
        fcntr = header['FCNTR']
        frange = [fcntr - fftlen*deltaf/2, fcntr + fftlen*deltaf/2]

        nfiles_min = int(math.ceil(filesize/size_limit))
        new_width_max = fftlen/nfiles_min
        new_width = 2**math.floor(np.log2(new_width_max))
        nfiles = int(math.ceil(fftlen/new_width))
        new_files = []
        new_fcntrs = []
        new_filenames = []
        indices = []
        new_primary_header = copy.deepcopy(header)
        to_create = []
        for i in range(0, nfiles):
            new_filename = split_dir + '/' + fn + '_%d'%i + '.fits'
            new_filenames.append(new_filename)
            new_fcntr_tmp = frange[0] + deltaf * new_width * (i + 0.5)
            new_fcntrs.append(new_fcntr_tmp)
            new_primary_header['FCNTR'] = new_fcntr_tmp
            ind = (i*new_width, min(fftlen, (i+1)*new_width))
            indices.append(ind)
            if os.path.isfile(new_filename):
                logger.error("file %s already existed!"%new_filename)
                to_create.append(False)
                continue
            to_create.append(True)
            data = first_row.data[0][ind[0]:ind[1]]
            prihdu = pyfits.PrimaryHDU(data, header = new_primary_header)
            prihdu.writeto(new_filename)
            logger.info("Created new file: %s"%new_filename)

        for i, ohdu in enumerate(bighdulist[1:]):
            logger.debug("Dealing with row %d"%i)
            new_header = copy.deepcopy(ohdu.header)
            for j, new_filename in enumerate(new_filenames):
                if not to_create[j]:
                    continue
                new_header['FCNTR'] = new_fcntrs[j]
                ind = indices[j]
                data = ohdu.data[0][ind[0]:ind[1]]
                pyfits.append(new_filename, data, new_header)

##EE This, so that dedoppler is run in all the little new fits files
        for new_filename in new_filenames:
            fits_obj = FITS(new_filename)
            fits_list.append(fits_obj)

        return fits_list
示例#36
0
def kepdiffim(infile, outfile, plotfile=None, imscale='logarithmic',
              colmap='PuBu', filterlc=False, function='boxcar', cutoff=1.0,
              overwrite=False, verbose=False, logfile='kepdiffim.log'):
    """
    kepdiffim -- difference imaging of pixels within a target mask

    kepdiffim plots the mean, standard deviation and chi distribution images
    for the mask contained within a target pixel file. The standard deviation
    on each pixel is defined as :math:`[flux - mean]^2 / [N - 1]`. The chi
    distribution is :math:`\sqrt{[mean - flux] ^ 2 / sigma ^ 2}`. If required,
    the data can be fed through a **boxcar**, **gaussian** or **sinc** function
    high bandpass filter in order to remove low frequency signal from the data.
    kepdiffim is a diagnostic tool for identifying source contaminants in the
    background or foreground of the target. It can be employed to identify
    pixels for inclusion or exclusion when re-extracting a Kepler light curve
    from target pixel files.

    Parameters
    ----------
    infile : str
        The name of a MAST standard format FITS file containing Kepler Target
        Pixel data within the first data extension.
    outfile : str
        The name of the output FITS file. This file has four data extensions.
        The first called 'FLUX' contains an image of the pixel-by-pixel
        mean flux within target mask. The second contains the pixel variance
        image of the mask pixels over time. The third contains the standard
        deviation image, in this case the variance image is normalized to the
        median 1-:math:`\sigma` error for each pixel. The fourth extension is
        the pixel mask, as defined in the second extension of the target pixel
        file.
    plotfile : str
        Name of an optional diagnostic output plot file containing the results
        of kepdiffim. Typically this is a PNG format file. If no diagnostic
        file is required, plotfile can be **None**. If **plotfile** is **None**
        the plot will be generated but the plot will not be saved to a file.
    imscale : str
        **kepdiffim** can plot images with three choices of image scales. The
        choice is made using this argument.
        The options are:

        * linear

        * logarithmic

        * squareroot
    cmap : str
        color intensity scheme for the image display.
    filter : bool
        If **filter** is **True**, the light curve for each pixel will be
        treated by a high band-pass filter to remove long-term trends from
        e. g. differential velocity aberration.
    function : str
        The functional form of the high pass-band filter. The options are:

        * boxcar

        * gauss

        * sinc
    cutoff : float [days]
        The frequency of the high pass-band cutoff.
    overwrite : bool
        Overwrite the output file?
    verbose : bool
        Print informative messages and warnings to the shell and logfile?
    logfile : str
        Name of the logfile containing error and warning messages.

    Examples
    --------
    .. code-block:: bash

        $ kepdiffim kplr011390659-2010355172524_lpd-targ.fits.gz kepdiffim.fits
        --filter --function boxcar --cutoff 0.1 --plotfile kepdiffim.png
        --cmap YlOrBr --imscale linear --verbose

    .. image:: ../_static/images/api/kepdiffim.png
        :align: center
    """
    # log the call
    hashline = '--------------------------------------------------------------'
    kepmsg.log(logfile,hashline,verbose)
    call = ('KEPDIFFIM -- '
            + ' infile={}'.format(infile)
            + ' outfile={}'.format(outfile)
            + ' plotfile={}'.format(plotfile)
            + ' imscale={}'.format(imscale)
            + ' cmap={}'.format(colmap)
            + ' filterlc={}'.format(filterlc)
            + ' function={}'.format(function)
            + ' cutoff={}'.format(cutoff)
            + ' overwrite={}'.format(overwrite)
            + ' verbose={}'.format(verbose)
            + ' logfile={}'.format(logfile))
    kepmsg.log(logfile, call+'\n', verbose)

    # start time
    kepmsg.clock('KEPDIFFIM started at: ', logfile, verbose)

    # overwrite output file
    if overwrite:
        kepio.overwrite(outfile, logfile, verbose)
    if kepio.fileexists(outfile):
        errmsg = 'ERROR -- KEPDIFFIM: {} exists. Use --overwrite'.format(outfile)
        kepmsg.err(logfile, errmsg, verbose)

    # open TPF FITS file
    kepid, channel, skygroup, module, output, quarter, season, \
        ra, dec, column, row, kepmag, xdim, ydim, barytime = \
        kepio.readTPF(infile, 'TIME', logfile, verbose)
    kepid, channel, skygroup, module, output, quarter, season, \
        ra, dec, column, row, kepmag, xdim, ydim, tcorr = \
        kepio.readTPF(infile, 'TIMECORR', logfile, verbose)
    kepid, channel, skygroup, module, output, quarter, season, \
        ra, dec, column, row, kepmag, xdim, ydim, cadno = \
        kepio.readTPF(infile, 'CADENCENO',logfile, verbose)
    kepid, channel, skygroup, module, output, quarter, season, \
        ra, dec, column, row, kepmag, xdim, ydim, fluxpixels = \
        kepio.readTPF(infile, 'FLUX', logfile, verbose)
    kepid, channel, skygroup, module, output, quarter, season, \
        ra, dec, column, row, kepmag, xdim, ydim, errpixels = \
        kepio.readTPF(infile, 'FLUX_ERR', logfile, verbose)
    kepid, channel, skygroup, module, output, quarter, season, \
        ra, dec, column, row, kepmag, xdim, ydim, qual = \
        kepio.readTPF(infile, 'QUALITY', logfile, verbose)

    # read mask defintion data from TPF file
    maskimg, pixcoord1, pixcoord2 = kepio.readMaskDefinition(infile, logfile,
                                                             verbose)

    # print target data
    print('')
    print('      KepID:  %s' % kepid)
    print(' RA (J2000):  %s' % ra)
    print('Dec (J2000): %s' % dec)
    print('     KepMag:  %s' % kepmag)
    print('   SkyGroup:    %2s' % skygroup)
    print('     Season:    %2s' % str(season))
    print('    Channel:    %2s' % channel)
    print('     Module:    %2s' % module)
    print('     Output:     %1s' % output)
    print('')

    # how many quality = 0 rows?
    npts = 0
    nrows = len(fluxpixels)
    for i in range(nrows):
        if (qual[i] == 0 and np.isfinite(barytime[i])
            and np.isfinite(fluxpixels[i, int(ydim * xdim / 2)])):
            npts += 1
    time = np.empty((npts))
    timecorr = np.empty((npts))
    cadenceno = np.empty((npts))
    quality = np.empty((npts))
    pixseries = np.empty((ydim * xdim, npts))
    errseries = np.empty((ydim * xdim, npts))

    # construct output light curves
    nptsx = 0
    for i in range(ydim*xdim):
        npts = 0
        for k in range(nrows):
            if (qual[k] == 0
                and np.isfinite(barytime[k])
                and np.isfinite(fluxpixels[k, int(ydim * xdim / 2)])):
                time[npts] = barytime[k]
                timecorr[npts] = tcorr[k]
                cadenceno[npts] = cadno[k]
                quality[npts] = qual[k]
                pixseries[i, npts] = fluxpixels[k, nptsx]
                errseries[i, npts] = errpixels[k, nptsx]
                npts += 1
        nptsx += 1

    # define data sampling
    if filterlc:
        tpf = pyfits.open(infile)
        cadence = kepkey.cadence(tpf[1], infile, logfile, verbose)
        tr = 1.0 / (cadence / 86400)
        timescale = 1.0 / (cutoff / tr)

        # define convolution function
        if function == 'boxcar':
            filtfunc = np.ones(int(np.ceil(timescale)))
        elif function == 'gauss':
            timescale /= 2
            dx = np.ceil(timescale * 10 + 1)
            filtfunc = kepfunc.gauss()
            filtfunc = filtfunc([1.0, dx / 2 - 1.0, timescale],
                                linspace(0, dx - 1, dx))
        elif function == 'sinc':
            dx = np.ceil(timescale * 12 + 1)
            fx = linspace(0, dx - 1, dx)
            fx = fx - dx / 2 + 0.5
            fx /= timescale
            filtfunc = np.sinc(fx)
        filtfunc /= np.sum(filtfunc)

    # pad time series at both ends with noise model
        for i in range(ydim * xdim):
            ave, sigma  = (np.mean(pixseries[i, :len(filtfunc)]),
                           np.std(pixseries[i, :len(filtfunc)]))
            padded = np.append(kepstat.randarray(np.ones(len(filtfunc)) * ave,
                               np.ones(len(filtfunc)) * sigma), pixseries[i, :])
            ave, sigma  = (np.mean(pixseries[i, -len(filtfunc):]),
                           np.std(pixseries[i, -len(filtfunc):]))
            padded = np.append(padded,
                               kepstat.randarray(np.ones(len(filtfunc)) * ave,
                               np.ones(len(filtfunc)) * sigma))

            # convolve data
            convolved = np.convolve(padded,filtfunc,'same')
            # remove padding from the output array
            outdata = convolved[len(filtfunc):-len(filtfunc)]
            # subtract low frequencies
            outmedian = np.nanmedian(outdata)
            pixseries[i, :] = pixseries[i, :] - outdata + outmedian

    # sum pixels over cadence
    nptsx = 0
    nrows = len(fluxpixels)
    pixsum = np.zeros((ydim*xdim))
    errsum = np.zeros((ydim*xdim))
    for i in range(npts):
        if quality[i] == 0:
            pixsum += pixseries[:, i]
            errsum += errseries[:, i] **2
            nptsx += 1
    pixsum /= nptsx
    errsum = np.sqrt(errsum) / nptsx

    # calculate standard deviation pixels
    pixvar = np.zeros((ydim*xdim))
    for i in range(npts):
        if quality[i] == 0:
            pixvar += (pixsum - pixseries[:,i] / errseries[:,i])**2
    pixvar = np.sqrt(pixvar)

    # median pixel errors
    errmed = np.empty((ydim*xdim))
    for i in range(ydim*xdim):
        errmed[i] = np.median(errseries[:,i])

    # calculate chi distribution pixels
    pixdev = np.zeros((ydim*xdim))
    for i in range(npts):
        if quality[i] == 0:
            pixdev += ((pixsum - pixseries[:,i]) / pixsum)**2
    pixdev = np.sqrt(pixdev)

    # image scale and intensity limits
    pixsum_pl, zminsum, zmaxsum = kepplot.intScale1D(pixsum, imscale)
    pixvar_pl, zminvar, zmaxvar = kepplot.intScale1D(pixvar, imscale)
    pixdev_pl, zmindev, zmaxdev = kepplot.intScale1D(pixdev, imscale)

    # construct output summed image
    imgsum = np.empty((ydim, xdim))
    imgvar = np.empty((ydim, xdim))
    imgdev = np.empty((ydim, xdim))
    imgsum_pl = np.empty((ydim, xdim))
    imgvar_pl = np.empty((ydim, xdim))
    imgdev_pl = np.empty((ydim, xdim))
    n = 0
    for i in range(ydim):
        for j in range(xdim):
            imgsum[i, j] = pixsum[n]
            imgvar[i, j] = pixvar[n]
            imgdev[i, j] = pixdev[n]
            imgsum_pl[i, j] = pixsum_pl[n]
            imgvar_pl[i, j] = pixvar_pl[n]
            imgdev_pl[i, j] = pixdev_pl[n]
            n += 1

    # construct output file
    instruct = pyfits.open(infile)
    kepkey.history(call, instruct[0], outfile, logfile, verbose)
    hdulist = pyfits.HDUList(instruct[0])
    hdulist.writeto(outfile)
    kepkey.new('EXTNAME', 'FLUX', 'name of extension', instruct[2], outfile,
               logfile, verbose)
    pyfits.append(outfile, imgsum, instruct[2].header)
    kepkey.new('EXTNAME', 'CHI', 'name of extension', instruct[2], outfile,
               logfile, verbose)
    pyfits.append(outfile, imgvar, instruct[2].header)
    kepkey.new('EXTNAME', 'STDDEV', 'name of extension', instruct[2], outfile,
               logfile, verbose)
    pyfits.append(outfile, imgdev, instruct[2].header)
    kepkey.new('EXTNAME', 'APERTURE', 'name of extension', instruct[2], outfile,
               logfile, verbose)
    pyfits.append(outfile, instruct[2].data,instruct[2].header)
    instruct.close()

    # pixel limits of the subimage
    ymin = row
    ymax = ymin + ydim
    xmin = column
    xmax = xmin + xdim

    # plot limits for summed image
    ymin = float(ymin) - 0.5
    ymax = float(ymax) - 0.5
    xmin = float(xmin) - 0.5
    xmax = float(xmax) - 0.5

    # plot style
    plotimage(imgsum_pl, imgvar_pl, imgdev_pl, zminsum, zminvar, zmindev,
              zmaxsum, zmaxvar, zmaxdev, xmin, xmax, ymin, ymax, colmap,
              plotfile)

    # stop time
    kepmsg.clock('KEPDIFFIM ended at: ',logfile,verbose)
示例#37
0
def ex2d_patch(image,
               ivar,
               psf,
               specmin,
               nspec,
               wavelengths,
               xyrange=None,
               full_output=False,
               regularize=0.0,
               ndecorr=False):
    """
    2D PSF extraction of flux from image patch given pixel inverse variance.
    
    Inputs:
        image : 2D array of pixels
        ivar  : 2D array of inverse variance for the image
        psf   : PSF object
        specmin : index of first spectrum to extract
        nspec : number of spectra to extract
        wavelengths : 1D array of wavelengths to extract
        
    Optional Inputs:
        xyrange = (xmin, xmax, ymin, ymax): treat image as a subimage
            cutout of this region from the full image
        full_output : if True, return a dictionary of outputs including
            intermediate outputs such as the projection matrix.
        ndecorr : if True, decorrelate the noise between fibers, at the
            cost of residual signal correlations between fibers.
        
    Returns (flux, ivar, R):
        flux[nspec, nwave] = extracted resolution convolved flux
        ivar[nspec, nwave] = inverse variance of flux
        R : 2D resolution matrix to convert
    """

    #- Range of image to consider
    waverange = (wavelengths[0], wavelengths[-1])
    specrange = (specmin, specmin + nspec)

    if xyrange is None:
        xmin, xmax, ymin, ymax = xyrange = psf.xyrange(specrange, waverange)
        image = image[ymin:ymax, xmin:xmax]
        ivar = ivar[ymin:ymax, xmin:xmax]
    else:
        xmin, xmax, ymin, ymax = xyrange

    nx, ny = xmax - xmin, ymax - ymin
    npix = nx * ny

    nspec = specrange[1] - specrange[0]
    nwave = len(wavelengths)

    #- Solve AT W pix = (AT W A) flux

    #- Projection matrix and inverse covariance
    A = psf.projection_matrix(specrange, wavelengths, xyrange)

    #- Pixel weights matrix
    w = ivar.ravel()
    W = spdiags(ivar.ravel(), 0, npix, npix)

    #-----
    #- Extend A with an optional regularization term to limit ringing.
    #- If any flux bins don't contribute to these pixels,
    #- also use this term to constrain those flux bins to 0.

    #- Original: exclude flux bins with 0 pixels contributing
    # ibad = (A.sum(axis=0).A == 0)[0]

    #- Identify fluxes with very low weights of pixels contributing
    fluxweight = W.dot(A).sum(axis=0).A[0]
    minweight = 0.01 * np.max(fluxweight)
    ibad = fluxweight < minweight

    #- Original version; doesn't work on older versions of scipy
    # I = regularize*scipy.sparse.identity(nspec*nwave)
    # I.data[0,ibad] = minweight - fluxweight[ibad]

    #- Add regularization of low weight fluxes
    Idiag = regularize * np.ones(nspec * nwave)
    Idiag[ibad] = minweight - fluxweight[ibad]
    I = scipy.sparse.identity(nspec * nwave)
    I.setdiag(Idiag)

    #- Only need to extend A if regularization is non-zero
    if np.any(I.diagonal()):
        pix = np.concatenate((image.ravel(), np.zeros(nspec * nwave)))
        Ax = scipy.sparse.vstack((A, I))
        wx = np.concatenate((w, np.ones(nspec * nwave)))
    else:
        pix = image.ravel()
        Ax = A
        wx = w

    #- Inverse covariance
    Wx = spdiags(wx, 0, len(wx), len(wx))
    iCov = Ax.T.dot(Wx.dot(Ax))

    #- Solve (image = A flux) weighted by Wx:
    #-     A^T W image = (A^T W A) flux = iCov flux
    y = Ax.T.dot(Wx.dot(pix))

    xflux = spsolve(iCov, y).reshape((nspec, nwave))

    #- TODO: could check for outliers, remask and re-extract
    #- Be careful in case masking blocks off all inputs to a flux bin and
    #- thus creates a singular array.  May need to keep regularization piece.
    # model = A.dot(xflux.ravel())
    # chi = (image.ravel() - model) * np.sqrt(ivar.ravel())
    # good = np.abs(chi)<5
    # ...

    #- Solve for Resolution matrix
    try:
        if ndecorr:
            R, fluxivar = resolution_from_icov(iCov)
        else:
            R, fluxivar = resolution_from_icov(
                iCov, decorr=[nwave for x in range(nspec)])
    except np.linalg.linalg.LinAlgError as err:
        outfile = 'LinAlgError_{}-{}_{}-{}.fits'.format(
            specrange[0], specrange[1], waverange[0], waverange[1])
        print("ERROR: Linear Algebra didn't converge")
        print("Dumping {} for debugging".format(outfile))
        from astropy.io import fits
        fits.writeto(outfile, image, clobber=True)
        fits.append(outfile, ivar, name='IVAR')
        fits.append(outfile, A.data, name='ADATA')
        fits.append(outfile, A.indices, name='AINDICES')
        fits.append(outfile, A.indptr, name='AINDPTR')
        fits.append(outfile, iCov.toarray(), name='ICOV')
        raise err

    #- Convolve with Resolution matrix to decorrelate errors
    fluxivar = fluxivar.reshape((nspec, nwave))
    rflux = R.dot(xflux.ravel()).reshape(xflux.shape)

    if full_output:
        results = dict(flux=rflux,
                       ivar=fluxivar,
                       R=R,
                       xflux=xflux,
                       A=A,
                       iCov=iCov)
        results['options'] = dict(specmin=specmin,
                                  nspec=nspec,
                                  wavelengths=wavelengths,
                                  xyrange=xyrange,
                                  regularize=regularize,
                                  ndecorr=ndecorr)
        return results
    else:
        return rflux, fluxivar, R
示例#38
0
def main(specpath, tblpath, obj_ind, outfile, *normto):
    tbl = fits.getdata(tblpath, ext=1)

    #input arrays
    masklist = tbl['maskname'][obj_ind]
    idlist = tbl['id'][obj_ind]
    zarray = tbl['z_mosfire'][obj_ind]
    ha6565_lum = tbl['ha6565_lum'][obj_ind]
    hb4863_lum = tbl['hb4863_lum'][obj_ind]
    luvcorr = tbl['luv'][obj_ind]
    ha6565_abs_flux = tbl['ha6565_abs_flux'][obj_ind]
    hb4863_abs_flux = tbl['hb4863_abs_flux'][obj_ind]
    ha6565_lum_err = tbl['ha6565_lum_err'][obj_ind]
    hb4863_lum_err = tbl['hb4863_lum_err'][obj_ind]
    idliststr = [str(e) for e in idlist]

    specfile = specpath + masklist.strip() + '.*.' + idliststr + '.ell.1d.fits'

    #counting the number of files
    speccount = 0
    for i, f in enumerate(specfile):
        filelisttemp = glob(f)
        for n in filelisttemp:
            speccount += 1

    #making a list of file paths
    filelist = ['' for i in range(speccount)]
    zlist = np.zeros(speccount)
    halumlist = np.zeros(speccount)
    hblumlist = np.zeros(speccount)
    uvlumlist = np.zeros(speccount)
    ha6565_abs_fluxlist = np.zeros(speccount)
    hb4863_abs_fluxlist = np.zeros(speccount)
    specind = 0
    for i, f in enumerate(specfile):
        filelisttemp = glob(f)
        for n in filelisttemp:
            filelist[specind] = n
            zlist[specind] = zarray[i]
            halumlist[specind] = ha6565_lum[i]
            hblumlist[specind] = hb4863_lum[i]
            uvlumlist[specind] = luvcorr[i]
            ha6565_abs_fluxlist[specind] = ha6565_abs_flux[i]
            hb4863_abs_fluxlist[specind] = hb4863_abs_flux[i]
            specind += 1
    import sys
    if sys.argv[5] == 'Ha':
        norm = 1. / halumlist
        normbalm = 1. / ha6565_lum
    if sys.argv[5] == 'Hb':
        norm = 1. / hblumlist
        normbalm = 1. / hb4863_lum
    if sys.argv[5] == 'UV':
        norm = 1. / uvlumlist
        normbalm = 1. / luvcorr
    if sys.argv[5] == 'none':
        norm = np.ones(len(halumlist))
        normbalm = np.ones(len(ha6565_lum))
    if ((sys.argv[5] != 'Ha') & (sys.argv[5] != 'Hb') & (sys.argv[5] != 'UV') &
        (sys.argv[5] != 'none')):
        print(
            'normto keyword should be set to one of these: "Ha","Hb","UV", or "none" '
        )

#make a grid of wavelength with the desired resolution
    wavemin = 3250
    wavemax = 10000
    delwave = 0.5  #in AA
    gridwl = np.arange(wavemin, wavemax, delwave)
    nwave = len(gridwl)
    specall = np.zeros((nwave, speccount))
    specerrall = np.zeros((nwave, speccount))

    t0 = time.time()
    print('# Stacking ', speccount, ' spectra of ', len(idlist), ' objects')
    for i, specfile in enumerate(filelist):

        header, spec, specerr, specwl = read_spec(specfile)

        #cut the bad beginning and end of the spectra
        goodpix = removebad(spec)
        spec = spec[goodpix]
        specerr = specerr[goodpix]
        specwl = specwl[goodpix]

        ldist = cosmo.luminosity_distance(zlist[i]).to(u.cm)
        lspec = 1e-40 * ldist * ldist * 4 * np.pi * (1 + zlist[i]) * spec
        lspecerr = 1e-40 * ldist * ldist * 4 * np.pi * (1 + zlist[i]) * specerr

        lspec = lspec * norm[i]
        lspecerr = lspecerr * norm[i]

        #calculate rest-frame wavelength
        specwl /= 1. + zlist[i]

        #interpolate to the new wavelength grid
        UNDEF = -999.
        gridspec = np.interp(gridwl, specwl, lspec, left=UNDEF, right=UNDEF)
        gridspecerr = np.interp(gridwl,
                                specwl,
                                lspecerr,
                                left=UNDEF,
                                right=UNDEF)

        #take into account the resampling for the error spectrum
        errfac = np.sqrt(header['CDELT1'] / (1. + zlist[i]) / delwave)
        gridspecerr = gridspecerr * errfac

        #remove sky lines (remove those with error > 3. * median(err))
        mederr = np.median(gridspecerr[gridspecerr > 0.])
        keep = (np.abs(gridspecerr) < mederr * 3.)
        gridspec[keep == False] = UNDEF
        #pdb.set_trace()
        #assign the spetra and error spectra to arrays:
        specall[:, i] = gridspec
        specerrall[:, i] = gridspecerr

    #declare arrays
    wt_avg = np.zeros(nwave)
    nwt_avg = np.zeros(nwave)
    med = np.zeros(nwave)
    clip_avg = np.zeros(nwave)
    wt_err = np.zeros(nwave)
    disp = np.zeros(nwave)
    z = np.zeros(nwave)
    num = np.zeros(nwave)
    #stacking
    for j in range(nwave):
        speccol = specall[j, :]
        specerrcol = specerrall[j, :]
        good = np.where(speccol != UNDEF)
        ngood = len(speccol[good])
        if ngood == 0: continue
        if ngood == 1:
            speccol = speccol[good]
            specerrcol = specerrcol[good]
            zcol = zlist[good]
            weight = 1 / (specerrcol * specerrcol)
            wt_avg[j] = np.nansum(speccol * weight) / np.nansum(weight)
            nwt_avg[j] = np.nanmean(speccol)
            med[j] = speccol
            wt_err[j] = np.sqrt(1. / np.nansum(weight))
            disp[j] = 0.
            z[j] = zcol.mean()
            num[j] = ngood
        if ngood > 1:
            speccol = speccol[good]
            specerrcol = specerrcol[good]
            zcol = zlist[good]

            weight = 1 / (specerrcol * specerrcol)
            wt_avg[j] = np.nansum(speccol * weight) / np.nansum(weight)
            nwt_avg[j] = np.nanmean(speccol)
            med[j] = np.nanmedian(speccol)
            clip_avg[j] = clip(speccol, 3., .01)
            wt_err[j] = np.sqrt(1. / np.nansum(weight))
            disp[j] = np.nanstd(speccol)
            z[j] = zcol.mean()
            num[j] = ngood

    #Measure weighted average Balmer absorption
    ldistarr = cosmo.luminosity_distance(zarray).to(u.cm)
    goodha = np.where((ha6565_abs_flux != -999.) & (ha6565_lum_err != 0.)
                      & (np.isfinite(ha6565_abs_flux)))
    n1 = len(ha6565_lum_err[goodha])
    goodhb = np.where((hb4863_abs_flux != -999.)
                      & (np.isfinite(hb4863_abs_flux)))
    n2 = len(hb4863_abs_flux[goodhb])
    print('# Number of objs with Balmer absorption of two lines: ', n1,
          ' and ', n2)
    lhaabs = 1e-40 * ldistarr * ldistarr * 4 * np.pi * ha6565_abs_flux * normbalm
    lhbabs = 1e-40 * ldistarr * ldistarr * 4 * np.pi * hb4863_abs_flux * normbalm
    habalm = (np.ma.masked_invalid(
        lhaabs[goodha] /
        (ha6565_lum_err[goodha] * ha6565_lum_err[goodha])).sum() /
              np.ma.masked_invalid(
                  1 / (ha6565_lum_err[goodha] * ha6565_lum_err[goodha])).sum())
    hbbalm = (np.ma.masked_invalid(
        lhbabs[goodhb] /
        (hb4863_lum_err[goodhb] * hb4863_lum_err[goodhb])).sum() /
              np.ma.masked_invalid(
                  1 / (hb4863_lum_err[goodhb] * hb4863_lum_err[goodhb])).sum())

    #defining the columns for the output
    #wt_avg_col=fits.Column(name='wt_avg',format='D',array=wt_avg)
    #nwt_avg_col=fits.Column(name='nwt_avg',format='D',array=nwt_avg)
    #med_col=fits.Column(name='med',format='D',array=med)
    #wt_err_col=fits.Column(name='wt_err',format='D',array=wt_err)
    #disp_col=fits.Column(name='disp',format='D',array=disp)
    #z_col=fits.Column(name='z',format='D',array=z)
    #num_col=fits.Column(name='num',format='D',array=num)

    #cols = fits.ColDefs([wt_avg_col,nwt_avg_col,med_col,wt_err_col,disp_col,z_col,num_col])
    #define the output file
    #out = fits.BinTableHDU.from_columns(cols)

    out = fits.PrimaryHDU()
    hdr = out.header

    #making the header of the output file
    out.header['UNITS'] = '1.d40 erg/s/A'
    out.header['CTYPE1'] = 'LINEAR'
    out.header['CRPIX1'] = 1.0
    out.header['CRVAL1'] = wavemin
    out.header['CDELT1'] = delwave
    out.header['CD1_1'] = delwave
    out.header['haabs'] = habalm.value
    out.header['hbabs'] = hbbalm.value
    out.header['uvcor'] = np.median(uvlumlist)
    out.header['uvcorerr'] = uvlumlist.std() / len(uvlumlist)
    out.header['COMMENT'] = 'Ext 1: weighted average'
    out.header['COMMENT'] = 'Ext 2: error'
    out.header['COMMENT'] = 'Ext 3: unweighted average'
    out.header['COMMENT'] = 'Ext 4: median'
    out.header['COMMENT'] = 'Ext 5: 3sigma-clipped average'
    out.header['COMMENT'] = 'Ext 6: standard deviation in each wavelength bin'
    out.header[
        'COMMENT'] = 'Ext 7: average redshift of objs contributing to each wavelength bin'
    out.header['COMMENT'] = 'Ext 8: number of objs in each wavelength bin'
    for i in range(len(idlist)):
        hdrspecname = masklist[i] + '_' + str(idlist[i])
        out.header['COMMENT'] = hdrspecname

    #Writing the output
    #out.writeto('test_py.fits', clobber=True)
    outname = outfile
    out.writeto(outname, clobber=True)
    hdr['OBJ'] = 'wt_avg'
    fits.append(outname, wt_avg, hdr)
    hdr['OBJ'] = 'wt_err'
    fits.append(outname, wt_err, hdr)
    hdr['OBJ'] = 'nwt_avg'
    fits.append(outname, nwt_avg, hdr)
    hdr['OBJ'] = 'med'
    fits.append(outname, med, hdr)
    hdr['OBJ'] = 'clip_avg'
    fits.append(outname, clip_avg, hdr)
    hdr['OBJ'] = 'disp'
    fits.append(outname, disp, hdr)
    hdr['OBJ'] = 'z'
    fits.append(outname, z, hdr)
    hdr['OBJ'] = 'num'
    fits.append(outname, num, hdr)

    print('# Stacking took: {0:3.2f}'.format((time.time() - t0) / 60.),
          ' min.')
示例#39
0
def lc_from_target_list(yourpath, targetList, fname_time_intensities_raw,
                             fname_targets, fname_notes, path='./',
                             custom_mask=[], apply_nan_mask=False):
    """ runs getting the files and data for all targets on the list
    then appends the time & intensity arrays and the TIC number into text files
    that can later be accessed
    modified [lcg 07092020]
    """
    intensity = []
    ticids = []
    for n in range(len(targetList)): #for each item on the list
        
        if n == 0: #for the first target only do you need to get the time index
            target = targetList[n][0] #get that target number
            time1, i1, tic = get_lc_file_and_data(yourpath, target) #grab that data
            
            if type(i1) == np.ndarray: #if the data IS data
                intensity.append(i1)
                ticids.append(tic)
            else: #if the data is NOT a data
                print("First target failed, no time index was saved")
                with open(fname_notes, 'a') as file_object:
                    file_object.write("\n")
                    file_object.write(str(int(target)))
        else: #only saving the light curve into the fits file because it's all you need
            target = targetList[n][0] #get that target number
            time1, i1, tic = get_lc_file_and_data(yourpath, target)
            if type(i1) == np.ndarray:
                intensity.append(i1)    
                ticids.append(tic)
            else: #IF THE DATA IS NOT DATA
                print("File failed to return targets")
                with open(fname_notes, 'a') as file_object:
                    file_object.write("\n")
                    file_object.write(str(int(target)))
        
        #if n %10 == 0: #every 50, print how many have been done
        print(str(n), "completed")
            
    intensity = np.array(intensity)
    ticids = np.array(ticids)
    
    # >> interpolate and nan mask
    print('Interpolating and applying nan mask')
    intensity_interp, time, ticids, flagged, ticid_flagged = \
        interpolate_all(intensity, time1, ticids, custom_mask=custom_mask,
                        apply_nan_mask=apply_nan_mask)
    
    print('Saving to fits file')
    # i_interp = np.array(i_interp)
    hdr = fits.Header() # >> make the header
    hdu = fits.PrimaryHDU(time, header=hdr)
    hdu.writeto(fname_time_intensities_raw)
    fits.append(fname_time_intensities_raw, intensity_interp)
    fits.append(fname_time_intensities_raw, ticids)
    
    # >> actually i'm going to save the raw intensities just in case
    fits.append(fname_time_intensities_raw, intensity)
    
    # with open(fname_time_intensities_raw, 'rb+') as f:
    #     # >> don't want to save 2x data we need to, so only save interpolated
    #     # fits.append(fname_time_intensities_raw, intensity)
    #     fits.append(fname_time_intensities_raw, i_interp)
    #     fits.append(fname_time_intensities_raw, ticids)
    
    confirmation = "lc_from_target_list has finished running"
    return confirmation
示例#40
0
    'SPECTER_DIR'] + "/data/bigboss/designs/20120827difdet/bbpsf-I.fits"
outfile = os.environ['SPECTER_DIR'] + "/test/data/psf-spot.fits"

fx = fits.open(infile)
x, xhdr = fx[0].data, fx[0].header
y, yhdr = fx[1].data, fx[1].header
w, whdr = fx[2].data, fx[2].header

spots, spotshdr = fx[3].data, fx[3].header
spotx, spotxhdr = fx[4].data, fx[4].header
spoty, spotyhdr = fx[5].data, fx[5].header

fiberpos, fposhdr = fx[6].data, fx[6].header
spotpos, sposhdr = fx[7].data, fx[7].header
spotwave, swavehdr = fx[8].data, fx[8].header
throughput, thruhdr = fx[9].data, fx[9].header

fits.writeto(outfile, x[:, 0::10], header=xhdr, clobber=True)
fits.append(outfile, y[:, 0::10], header=yhdr)
fits.append(outfile, w[:, 0::10], header=whdr)

fits.append(outfile, spots[0::5, 0::5, :, :], header=spotshdr)
fits.append(outfile, spotx[0::5, 0::5], header=spotxhdr)
fits.append(outfile, spoty[0::5, 0::5], header=spotyhdr)

fits.append(outfile, fiberpos, header=fposhdr)
fits.append(outfile, spotpos[0::5], header=sposhdr)
fits.append(outfile, spotwave[0::5], header=swavehdr)

fits.append(outfile, throughput, header=thruhdr)
示例#41
0
def targetwise_lc(yourpath, target_list, fname_time_intensities,fname_notes):
    """ runs getting the files and data for all targets on the list
    then appends the time & intensity arrays and the TIC number into text files
    that can later be accessed
    parameters: 
        * yourpath = folder into which things will get saved
        * target_list = list of ticids, as integers
        * fname_time_intensities = direct path to the file to save into
        * fname_notes = direct path to file to save TICIDS of targets that 
            return no data into
    returns: list of ticids as an array
    requires: get_lc_file_and_data(), interpolate_lc()
    modified [lcg 07112020]
    """

    ticids = []
    for n in range(len(target_list)): #for each item on the list
        
        if n == 0: #for the first target only do you need to get the time index
            target = target_list[0] #get that target number
            time1, i1, tic = get_lc_file_and_data(yourpath, target) #grab that data
            
            if type(i1) == np.ndarray: #if the data IS data
                i_interp, flag = interpolate_lc(i1, time1, flux_err=False, interp_tol=20./(24*60),
                                   num_sigma=10, DEBUG_INTERP=False,
                                   output_dir=yourpath, prefix='')
                TI = [time1, i1]
                TI_array = np.asarray(TI)
                hdr = fits.Header() # >> make the header
                hdu = fits.PrimaryHDU(TI_array, header=hdr)
                hdu.writeto(fname_time_intensities)
                ticids.append(tic)
                
            else: #if the data is NOT a data
                print("First target failed, no time index was saved")
                with open(fname_notes, 'a') as file_object:
                    file_object.write("\n")
                    file_object.write(str(int(target)))
        else: 
            target = target_list[n] #get that target number
            time1, i1, tic = get_lc_file_and_data(yourpath, target) #grab that data
            
            if type(i1) == np.ndarray: #if the data IS data
                i_interp, flag = interpolate_lc(i1, time1, flux_err=False, interp_tol=20./(24*60),
                                   num_sigma=10, DEBUG_INTERP=False,
                                   output_dir=yourpath, prefix='')
                TI = [time1, i1]
                TI_array = np.asarray(TI)
                fits.append(fname_time_intensities, TI_array)
                ticids.append(tic)
                
            else: #if the data is NOT a data
                print("Target failed to return a light curve")
                with open(fname_notes, 'a') as file_object:
                    file_object.write("\n")
                    file_object.write(str(int(target)))
        print(n, " completed")
    fits.append(fname_time_intensities, np.asarray(ticids))
        
    print("lc_from_target_list has finished running")
    return np.asarray(ticids)
示例#42
0
def createVoronoiInput(cubeFile=None):
    # makes a version of the median flux map that is all positive, and a
    # version of the error array that is directly scaled from this new
    # flux map and the SNR map created empirically (via ifu.map_snr)
    if cubeFile:
        cubefits = pyfits.open(cubeFile)
    else:
        cubefits = pyfits.open(datadir + cuberoot + '.fits')
    
    cube = cubefits[0].data
    hdr = cubefits[0].header
    errors = cubefits[1].data
    quality = cubefits[2].data
    #nframes = cubefits[3].data

    #snrimg = pyfits.getdata(datadir + cuberoot + '_snr.fits')
    if cubeFile:
        #snrimg = pyfits.getdata(cubeFile.replace('_bulgesub.fits','_snr.fits'))
        snrimg = pyfits.getdata(cubeFile.replace('.fits','_snr.fits'))
    else:
        snrimg = pyfits.getdata(datadir+'m31_all_scalederr_cleanhdr_snr.fits')
    
    xx = np.arange(cube.shape[0])
    yy = np.arange(cube.shape[1])
    imgShape = (cube.shape[0],cube.shape[1])

    cubeVor = np.zeros(imgShape, dtype=float)
    errVor = np.zeros(imgShape, dtype=float)

    for nx in xx:
        for ny in yy:
            tmpcube = cube[nx,ny,:]
            # add a constant to the spectrum to make it above 0
            #print "old cube mean is %f " % tmpcube.mean()
            #minFlux = tmpcube.mean() - (1.0 * tmpcube.std())
            #print "minflux is %f" % minFlux
            #tmpcube += np.abs(minFlux)
            #print "new cube mean is %f " % tmpcube.mean()
            tmpcubeavg = tmpcube.mean()
            tmpcubeavg = np.median(tmpcube)
            tmpsnr = np.abs(snrimg[ny,nx])
            #tmpsnr = np.abs(snrimg[nx,ny])
            # calc errors for tessellation based on the empirical
            # S/N already calculated
            tmperr = tmpcubeavg/tmpsnr
            cubeVor[nx,ny] = tmpcubeavg
            errVor[nx,ny] = tmperr
            #if ny==71:
            #    print 'ny = 71'
            #    if nx==28:
            #        pdb.set_trace()
            
    # change NaN to 0
    errVor = np.nan_to_num(errVor)

    if cubeFile:
        outfile = cubeFile.replace('.fits','_vor.fits')
    else:
        outfile = datadir + cuberoot + '_vor.fits'
    pyfits.writeto(outfile, cubeVor, header=hdr)
    pyfits.append(outfile, errVor)
示例#43
0
        ##
        print('Writing meta to %s' % meta_name)
        meta.write(meta_name, format='ascii', overwrite=True)

        ##
        print('Writing spectra to %s' % infile)
        meta.write(meta_name, format='ascii', overwrite=True)

        ##
        hdr = fits.Header()

        hdr['EXTNAME'] = 'WAVELENGTH'
        hdr['BUNIT'] = 'Angstrom'

        fits.writeto(infile, wave, header=hdr, overwrite=True)

        hdr['EXTNAME'] = 'FLUX'
        hdr['BUNIT'] = '10^-17 erg/(s*cm^2*Angstrom)'  # Satisifes FITS standard AND Astropy-compatible.

        fits.append(infile, flux, header=hdr)

        hdr['EXTNAME'] = 'REDSHIFTS'
        hdr['BUNIT'] = 'DIMENSIONLESS'  # Satisifes FITS standard AND Astropy-compatible.
        fits.append(infile, redshifts, header=hdr)

        hdr['EXTNAME'] = 'MAGNITUDES'
        hdr['BUNIT'] = 'DIMENSIONLESS'  # Satisifes FITS standard AND Astropy-compatible.
        fits.append(infile, magnitudes, header=hdr)

    print('\n\nDone.\n\n')
示例#44
0
            print(files)
        elif '.' not in a and '106' in a:
            files.append(findfiles(html + '/' + a))
            print(files)
    return files


fits = []
tbl = []
dir = findfiles(
    'https://irsa.ipac.caltech.edu/ibe/data/wise/allsky/4band_p1bm_frm/6a/02206a'
)
print('dir: ', dir[0])
for i in dir[0]:
    if '.fits' in i:
        fits.append(i)
    elif '.tbl' in i:
        tbl.append(i)

print('fits files: ', fits)
print('tbl files: ', tbl)

with open('fits_files.csv', 'w') as csvFile:
    for name in fits:
        csvFile.write(name)
        csvFile.write('\n')
csvFile.close()

with open('tbl_files.csv', 'w') as csvFile:
    for name in tbl:
        csvFile.write(name)
示例#45
0
                        default=numpy.inf,
                        help='pixel brightness limit for saturation')
    args = parser.parse_args()
    imagefn = args.imagefn[0]
    ivarfn = args.ivarfn[0]
    flagfn = args.flagfn[0]
    if getattr(args, 'psffn', None):
        # stamp = numpy.clip(fits.getdata(args.psffn), 1e-10, numpy.inf)
        stamp = fits.getdata(args.psffn)
        stamp[stamp < 0] = 0.
        stamp = stamp / numpy.sum(stamp)
        psf = psfmod.SimplePSF(stamp)
        from functools import partial
        psf.fitfun = partial(psfmod.wise_psf_fit, fname=args.psffn)
    else:
        print('using moffat')
        psf = psfmod.SimplePSF(psfmod.moffat_psf(2.5, beta=2.5)[0])
    res = process(imagefn,
                  ivarfn,
                  flagfn,
                  psf,
                  refit_psf=args.refit_psf,
                  verbose=args.verbose,
                  nx=4,
                  ny=4,
                  satlimit=args.satlimit)
    outfn = args.outfn[0]
    fits.writeto(outfn, res[0])
    fits.append(outfn, res[1])
    fits.append(outfn, res[2])
def process_whites(white_list,
                   MB=None,
                   ronmask=None,
                   MD=None,
                   gain=None,
                   P_id=None,
                   scalable=False,
                   fancy=False,
                   remove_bg=True,
                   clip=5.,
                   savefile=True,
                   saveall=False,
                   diffimg=False,
                   path=None,
                   debug_level=0,
                   timit=False):
    """
    This routine processes all whites from a given list of files. It corrects the orientation of the image and crops the overscan regions,
    and subtracts both the MASTER BIAS frame [in ADU], and the MASTER DARK frame [in e-] from every image before combining them to create a MASTER WHITE frame.
    NOTE: the input image has units of ADU, but the output image has units of electrons!!!
    
    INPUT:
    'white_list'  : list of filenames of raw white images (incl. directories)
    'MB'          : the master bias frame (bias only, excluding OS levels) [ADU]
    'ronmask'     : the read-noise mask (or frame) [e-]
    'MD'          : the master dark frame [e-]
    'gain'        : the gains for each quadrant [e-/ADU]
    'P_id'        : order tracing dictionary (only needed if remove_bg is set to TRUE)
    'scalable'    : boolean - do you want to normalize the dark current to an exposure time of 1s? (ie do you want to make it "scalable"?)
    'fancy'       : boolean - do you want to use the 'fancy' method for creating the master white frame? (otherwise a simple median image will be used)
    'remove_bg'   : boolean - do you want to remove the background from the output master white?
    'clip'        : number of 'expected-noise sigmas' a pixel has to deviate from the median pixel value across all images to be considered an outlier when using the 'fancy' method
    'savefile'    : boolean - do you want to save the master white frame as a FITS file?
    'saveall'     : boolean - do you want to save all individual bias- & dark-corrected images as well?
    'diffimg'     : boolean - do you want to save the difference image (ie containing the outliers)? only used if 'fancy' is set to TRUE
    'path'        : path to the output file directory (only needed if savefile is set to TRUE)
    'debug_level' : for debugging...
    'timit'       : boolean - do you want to measure execution run time?
    
    OUTPUT:
    'master'      : the master white image [e-] (also has been brought to 'correct' orientation, overscan regions cropped, and (if desired) bg-corrected) 
    'err_master'  : the corresponding uncertainty array [e-]    
    
    """

    if timit:
        start_time = time.time()

    if debug_level >= 1:
        print('Creating master white frame from ' + str(len(white_list)) +
              ' fibre flats...')

    # if INPUT arrays are not given, read them from default files
    if path is None:
        print('WARNING: output file directory not provided!!!')
        print('Using same directory as input file...')
        dum = white_list[0].split('/')
        path = white_list[0][0:-len(dum[-1])]

    date = path.split('/')[-2]

    if MB is None:
        # no need to fix orientation, this is already a processed file [ADU]
        #         MB = pyfits.getdata(path+'master_bias.fits')
        MB = pyfits.getdata(path + date + '_median_bias.fits')
    if ronmask is None:
        # no need to fix orientation, this is already a processed file [e-]
        ronmask = pyfits.getdata(path + date + '_read_noise_mask.fits')
    if MD is None:
        if scalable:
            # no need to fix orientation, this is already a processed file [e-]
            MD = pyfits.getdata(path + date + '_master_dark_scalable.fits', 0)
#             err_MD = pyfits.getdata(path+'master_dark_scalable.fits', 1)
        else:
            # no need to fix orientation, this is already a processed file [e-]
            texp = pyfits.getval(white_list[0])
            MD = pyfits.getdata(
                path + date + '_master_dark_t' + str(int(np.round(texp, 0))) +
                '.fits', 0)


#             err_MD = pyfits.getdata(path+'master_dark_t'+str(int(np.round(texp,0)))+'.fits', 1)

# prepare arrays
    allimg = []
    allerr = []

    # loop over all files in "white_list"; correct for bias and darks on the fly
    for n, fn in enumerate(sorted(white_list)):
        if debug_level >= 1:
            print('Now processing file ' + str(n + 1) + '/' +
                  str(len(white_list)) + '   (' + fn + ')')

        # call routine that does all the bias and dark correction stuff and converts from ADU to e-
        if scalable:
            # if the darks have a different exposure time than the whites, then we need to re-scale the master dark
            texp = pyfits.getval(white_list[0], 'ELAPSED')
            img = correct_for_bias_and_dark_from_filename(
                fn,
                MB,
                MD * texp,
                gain=gain,
                scalable=scalable,
                savefile=saveall,
                path=path,
                timit=timit
            )  #these are now bias- & dark-corrected images; units are e-
        else:
            img = correct_for_bias_and_dark_from_filename(
                fn,
                MB,
                MD,
                gain=gain,
                scalable=scalable,
                savefile=saveall,
                path=path,
                timit=timit
            )  # these are now bias- & dark-corrected images; units are e-

        if debug_level >= 2:
            print('min(img) = ' + str(np.min(img)))
        allimg.append(img)
        #         err_img = np.sqrt(img + ronmask*ronmask)   # [e-]
        # TEMPFIX: (how should I be doing this properly???)
        err_img = np.sqrt(np.clip(img, 0, None) + ronmask * ronmask)  # [e-]
        allerr.append(err_img)

    # list of individual exposure times for all whites (should all be the same, but just in case...)
    texp_list = [pyfits.getval(file, 'ELAPSED') for file in white_list]
    # scale to the median exposure time
    tscale = np.array(texp_list) / np.median(texp_list)

    #########################################################################
    ### now we do essentially what "CREATE_MASTER_IMG" does for whites... ###
    #########################################################################
    # add individual-image errors in quadrature (need it either way, not only for fancy method)
    err_summed = np.sqrt(np.sum((np.array(allerr)**2), axis=0))
    #     # get plain median image
    #     medimg = np.median(np.array(allimg), axis=0)
    # take median after scaling to median exposure time
    medimg = np.median(np.array(allimg) / tscale.reshape(len(allimg), 1, 1),
                       axis=0)

    if fancy:
        # need to create a co-added frame if we want to do outlier rejection the fancy way
        summed = np.sum((np.array(allimg)), axis=0)
        if diffimg:
            diff = np.zeros(summed.shape)

        master_outie_mask = np.zeros(summed.shape, dtype='int')

        # make sure we do not have any negative pixels for the sqrt
        medimgpos = medimg.copy()
        medimgpos[medimgpos < 0] = 0.
        med_sig_arr = np.sqrt(
            medimgpos + ronmask * ronmask
        )  # expected STDEV for the median image (from LB Eq 2.1); still in ADUs
        for n, img in enumerate(allimg):
            # outie_mask = np.abs(img - medimg) > clip*med_sig_arr
            outie_mask = (
                img - medimg
            ) > clip * med_sig_arr  # do we only want HIGH outliers, ie cosmics?
            # save info about which image contributes the outlier pixel using unique binary numbers technique
            master_outie_mask += (outie_mask * 2**n).astype(int)
        # see which image(s) produced the outlier(s) and replace outies by mean of pixel value from remaining images
        n_outie = np.sum(master_outie_mask > 0)
        print('Correcting ' + str(n_outie) + ' outliers...')
        # loop over all outliers
        for i, j in zip(
                np.nonzero(master_outie_mask)[0],
                np.nonzero(master_outie_mask)[1]):
            # access binary numbers and retrieve component(s)
            outnum = binary_indices(
                master_outie_mask[i, j]
            )  # these are the indices (within allimg) of the images that contain outliers
            dumix = np.arange(len(white_list))
            # remove the images containing the outliers in order to compute mean from the remaining images
            useix = np.delete(dumix, outnum)
            if diffimg:
                diff[i, j] = summed[i, j] - (len(outnum) * np.mean(
                    np.array([allimg[q][i, j] for q in useix])) + np.sum(
                        np.array([allimg[q][i, j] for q in useix])))
            # now replace value in master image by the sum of all pixel values in the unaffected pixels
            # plus the number of affected images times the mean of the pixel values in the unaffected images
            summed[i, j] = len(outnum) * np.mean(
                np.array([allimg[q][i, j] for q in useix])) + np.sum(
                    np.array([allimg[q][i, j] for q in useix]))
        # once we have finished correcting the outliers, we want to "normalize" (ie divide by number of frames) the master image and the corresponding error array
        master = summed / len(white_list)
        err_master = err_summed / len(white_list)
    else:
        # ie not fancy, just take the median image to remove outliers
        # now set master image equal to median image
        master = medimg.copy()
        nw = len(white_list)  # number of whites
        #         # estimate of the corresponding error array (estimate only!!!)
        #         err_master = err_summed / nw     # I don't know WTF I was thinking here...
        # if roughly Gaussian distribution of values: error of median ~= 1.253*error of mean
        # err_master = 1.253 * np.std(allimg, axis=0) / np.sqrt(nw-1)     # normally it would be sigma/sqrt(n), but np.std is dividing by sqrt(n), not by sqrt(n-1)
        # need to rescale by exp time here, too
        if nw == 1:
            err_master = allerr[0]
        else:
            err_master = 1.253 * np.std(
                np.array(allimg) / tscale.reshape(len(allimg), 1, 1), axis=0
            ) / np.sqrt(
                nw - 1
            )  # normally it would be sigma/sqrt(n), but np.std is dividing by sqrt(n), not by sqrt(n-1)
        # err_master = np.sqrt( np.sum( (np.array(allimg) - np.mean(np.array(allimg), axis=0))**2 / (nw*(nw-1)) , axis=0) )   # that is equivalent, but slower

    # now subtract background (errors remain unchanged)
    if remove_bg:
        # identify and extract background
        bg = extract_background_pid(master,
                                    P_id,
                                    slit_height=30,
                                    exclude_top_and_bottom=True,
                                    timit=timit)
        # fit background
        bg_coeffs, bg_img = fit_background(bg,
                                           clip=10,
                                           return_full=True,
                                           timit=timit)
        # subtract background
        master = master - bg_img

    # now save master white to file
    if savefile:
        outfn = path + date + '_master_white.fits'
        pyfits.writeto(outfn, np.float32(master), overwrite=True)
        pyfits.setval(outfn,
                      'HISTORY',
                      value='   MASTER WHITE frame - created ' +
                      time.strftime("%Y-%m-%d %H:%M:%S", time.gmtime()) +
                      ' (GMT)')
        # pyfits.setval(outfn, 'EXPTIME', value=texp, comment='exposure time [s]')
        pyfits.setval(outfn, 'UNITS', value='ELECTRONS')
        if fancy:
            pyfits.setval(
                outfn,
                'METHOD',
                value='fancy',
                comment='method to create master white & remove outliers')
        else:
            pyfits.setval(
                outfn,
                'METHOD',
                value='median',
                comment='method to create master white & remove outliers')
        h = pyfits.getheader(outfn)
        h_err = h.copy()
        h_err[
            'HISTORY'] = 'estimated uncertainty in MASTER WHITE frame - created ' + time.strftime(
                "%Y-%m-%d %H:%M:%S", time.gmtime()) + ' (GMT)'
        pyfits.append(outfn, np.float32(err_master), h_err, overwrite=True)

    # also save the difference image if desired
    if diffimg:
        hdiff = h.copy()
        hdiff[
            'HISTORY'] = '   MASTER WHITE DIFFERENCE IMAGE - created ' + time.strftime(
                "%Y-%m-%d %H:%M:%S", time.gmtime()) + ' (GMT)'
        pyfits.writeto(path + date + '_master_white_diffimg.fits',
                       diff,
                       hdiff,
                       overwrite=True)

    if timit:
        print('Total time elapsed: ' +
              str(np.round(time.time() - start_time, 1)) + ' seconds')

    return master, err_master
Squeeze a DESI GAUSS-HERMITE PSF into something smaller for testing.
Also works for GAUSS-HERMITE2
"""

import sys
import numpy as np
from astropy.io import fits
from astropy.table import Table

infile, outfile = sys.argv

fx = fits.open(infile)

hdr = fx[0].header.copy()
fits.writeto(outfile, np.zeros(0), header=hdr)

hdr = fx[1].header.copy()
nspec = 25
hdr['FIBERMAX'] = nspec-1

data = fx[1].data
tx = Table()
tx['PARAM'] = data['PARAM']
tx['WAVEMIN'] = data['WAVEMIN']
tx['WAVEMAX'] = data['WAVEMAX']
if 'NCOEFF' in data.dtype.names:
    tx['NCOEFF'] = data['NCOEFF']
tx['COEFF'] = data['COEFF'][:, 0:nspec, :]

fits.append(outfile, np.array(tx), header=hdr)
示例#48
0
def feature_gen_from_lc_fits(path, sector, feature_version=0):
    """Given a path to a folder containing ALL the light curve metafiles 
    for a sector, produces the feature vector metafile for each group and then
    one main feature vector metafile containing ALL the features in [0] and the
    TICIDS in [1]. 
    Parameters: 
        * folderpath to where the light curve metafiles are saved
            *must end in a backslash
        * sector number
        * what version of features you want generated (default is 0)
    modified [lcg 07112020]"""

    import datetime
    from datetime import datetime

    now = datetime.now()
    dt_string = now.strftime("%d/%m/%Y %H:%M:%S")
    print("Starting Feature Generation at", dt_string)

    ticids_all = [10]
    ticids_all = np.asarray(ticids_all)
    sector = sector
    for n in range(1, 5):
        camera = int(n)
        for m in range(1, 5):
            ccd = int(m)
            file_label = "Sector" + str(sector) + "Cam" + str(
                camera) + "CCD" + str(ccd)
            folderpath = path + "/" + file_label + "/"

            t, i1, targets = load_group_from_fits(folderpath, sector, camera,
                                                  ccd)
            ticids_all = np.concatenate((ticids_all, targets))

            i2, t2 = nan_mask(i1,
                              t,
                              flux_err=False,
                              DEBUG=False,
                              debug_ind=1042,
                              ticid=False,
                              output_dir=folderpath,
                              prefix='',
                              tol1=0.05,
                              tol2=0.1)

            i3 = normalize(i2, axis=1)

            now = datetime.now()
            dt_string = now.strftime("%d/%m/%Y %H:%M:%S")
            print("Starting feature vectors for camera ", camera, "ccd ", ccd,
                  "at ", dt_string)

            create_save_featvec(folderpath,
                                t2,
                                i3,
                                file_label,
                                version=0,
                                save=True)

    ticids_all = ticids_all[1:]
    feats_all = np.zeros((2, 16))

    #make main listing
    for n in range(1, 5):
        camera = int(n)
        for m in range(1, 5):
            ccd = int(m)
            file_label = "Sector" + str(sector) + "Cam" + str(
                camera) + "CCD" + str(ccd)
            folderpath = path + "/" + file_label + "/"
            f = fits.open(folderpath + file_label + "_features.fits",
                          memmap=False)
            feats = f[0].data
            feats_all = np.concatenate((feats_all, feats))
            f.close()
            #print(n,m)

    feats_all = feats_all[2:]

    hdr = fits.Header()  # >> make the header
    hdr["Sector"] = sector
    hdr["Version"] = feature_version
    hdr["Date"] = str(datetime.now())
    #hdr["Creator"] = "L. Gordon"
    hdu = fits.PrimaryHDU(feats_all, header=hdr)
    hdu.writeto(folderpath + "Sector" + str(sector) + "_features_v" +
                str(feature_version) + "_all.fits")
    fits.append(
        folderpath + "Sector" + str(sector) + "_features_v" +
        str(feature_version) + "_all.fits", ticids_all)

    return feats_all, ticids_all
from astropy.io import fits

msName = 'HDFC0155MFSC_CAL_vis.ms'
x=fits.getdata(msName[:-3]+'_'+str(0)+'.fits',0)
header_primary = fits.getheader(msName[:-3]+'_'+str(0)+'.fits')
fits.writeto('HDFC0155_cube.fits', x, header_primary)

for i in range(1,8):
    x=fits.getdata(msName[:-3]+'_'+str(i)+'.fits',0)
    header_primary = fits.getheader(msName[:-3]+'_'+str(i)+'.fits')
    fits.append('HDFC0155_cube.fits', x, header_primary)

print fits.info("HDFC0155_cube.fits")
示例#50
0
def remove_cosmics(img,
                   ronmask,
                   obsname,
                   path,
                   Flim=3.0,
                   siglim=5.0,
                   maxiter=20,
                   savemask=True,
                   savefile=False,
                   save_err=False,
                   verbose=False,
                   timit=False):
    """
    Top-level wrapper function for the cosmic-ray cleaning of an image. 
    
    INPUT:
    'img'      : input image (2-dim numpy array)
    'ronmask'  : read-out noise mask (or ron-image really...) from "make_master_bias_and_ronmask"
    'obsname'  : the obsname in "obsname.fits"
    'path'     : the directory of the files
    'Flim'     : lower threshold for the identification of a pixel as a cosmic ray when using L+/F (ie Laplacian image divided by fine-structure image) (= lbarplus/F2 in the implementation below)
    'siglim'   : sigma threshold for identification as cosmic in S_prime
    'maxiter'  : maximum number of iterations
    'savemask' : boolean - do you want to save the cosmic-ray mask?
    'savefile' : boolean - do you want to save the cosmic-ray corrected image?
    'save_err' : boolean - do you want to save the corresponding error array as well? (remains unchanged though)
    'verbose'  : boolean - for user information / debugging...
    'timit'    : boolean - do you want to measure execution run time?
    
    OUTPUT:
    'cleaned'  : the cosmic-ray corrected image
    """

    if timit:
        start_time = time.time()

    if verbose:
        print('Cleaning cosmic rays...')

    #some preparations
    global_mask = np.cast['bool'](np.zeros(img.shape))
    n_cosmics = 0
    niter = 0
    n_new = 0
    cleaned = img.copy()

    #remove cosmics iteratively
    while ((niter == 0) or n_new > 0) and (niter < maxiter):
        print('Now running iteration ' + str(niter + 1) + '...')
        #go and identify cosmics
        mask = identify_cosmics(cleaned,
                                ronmask,
                                Flim=Flim,
                                siglim=siglim,
                                verbose=verbose,
                                timit=timit)
        n_new = np.sum(mask)
        #add to global mask
        global_mask = np.logical_or(global_mask, mask)
        n_cosmics += n_new
        #n_global = np.sum(global_mask)     #should be equal to n_cosmics!!!!! if they're not, this means that some of the "cleaned" cosmics from a previous round are identified as cosmics again!!! well, they're not...
        #now go and clean these newly found cosmics
        cleaned = clean_cosmics(cleaned, mask, verbose=verbose, timit=timit)
        niter += 1

    #save cosmic-ray mask
    if savemask:
        outfn = path + obsname + '_CR_mask.fits'
        #get header from the BIAS- & DARK-subtracted image if it exits; otherwise from the original image FITS file
        try:
            h = pyfits.getheader(path + obsname + '_BD.fits')
        except:
            h = pyfits.getheader(path + obsname + '.fits')
        h['HISTORY'] = '   (boolean) COSMIC-RAY MASK- created ' + time.strftime(
            "%Y-%m-%d %H:%M:%S", time.gmtime()) + ' (GMT)'
        pyfits.writeto(outfn, global_mask.astype(int), h, clobber=True)

    #save cosmic-ray corrected image
    if savefile:
        outfn = path + obsname + '_BD_CR.fits'
        #get header from the BIAS- & DARK-subtracted images if they exit; otherwise from the original image FITS file
        try:
            h = pyfits.getheader(path + obsname + '_BD.fits')
        except:
            h = pyfits.getheader(path + obsname + '.fits')
            h['UNITS'] = 'ELECTRONS'
        h['HISTORY'] = '   COSMIC-RAY corrected image - created ' + time.strftime(
            "%Y-%m-%d %H:%M:%S", time.gmtime()) + ' (GMT)'
        pyfits.writeto(outfn, cleaned, h, clobber=True)
        #also save the error array if desired
        if save_err:
            try:
                err = pyfits.getdata(path + obsname + '_BD.fits', 1)
                h_err = h.copy()
                h_err[
                    'HISTORY'] = 'estimated uncertainty in COSMIC-RAY corrected image - created ' + time.strftime(
                        "%Y-%m-%d %H:%M:%S", time.gmtime()) + ' (GMT)'
                pyfits.append(outfn, err, h_err, clobber=True)
            except:
                print(
                    'WARNING: error array not found - cannot save error array')

    if verbose:
        print('Done!')

    if timit:
        print('Total time elapsed: ' +
              str(np.round(time.time() - start_time, 1)) + ' seconds')

    return cleaned
示例#51
0
def main(massfunction = 0, starformationhistory = 0, A_v = 10.0, sfr = .01, apera = 24000,\
 maxage = 2000000., distance = 8.0, appendix='default', quiet=0, precise=0):
    """main(massfunction = 0, starformationhistory = 0, A_v = 10.0, sfr = .01, apera = 24000,\
          maxage = 2000000., distance = 8.0, appendix='default', quiet=0, precise=0)

    Creates a sample of stars

    Parameters
    ----------
    massfunction            distribution:
        relatively in mass, with lower and upper restriction, see also what the distribution must provide
    starformation history   distribution:
        relatively in age, with lower and upper restriction, see also what the distribution must provide
    A_v       float:
        value for the visual extinction 
    sfr       float:
        average star formation rate in M_sun/year (only if precise = True)
    apera     float:
        used aperture size for selecting the fluxes of the protostars
    maxage    float:
        age of the star formation site, sfr is assumed to be constant
    distance  float:
        distance to the simulated starformation site
    appendix  String:
        sets the outputfilename, default is the starting time (via time.time())
    quiet     boolean:
        if true (=1) suppresses all standard output
    precise   boolean:
        if true (=1) sample single star till expected mass reached based on the 
        integrated starformationhistory times the starformationrate
        else sfr is the number of expected stars and the starformationrate is
        calculated by the cumulated mass divided by the formation time

    The distributions must provide an object which has the following members:
        float    cdf(float x)   returns the integrated distribution up to x, is used to calculate
                                the expected mass
        float    _upperbound    returns the upper limit of the distribution, is used to calculate
                                the expected mass
        float[]  sample(int n)  returns an array of n floats, sampled from the distribution
        float    mean()         returns the mean value of the distribution


    Returns
    ----------
    returns a fits file in the out-folder, either using the appendix as filename or the time of the
          starting of the script in order to prevent overwriting existing files
          In the header of the fits-file are the values: A_v, sfr, apera, maxage and distance recorded
          In the data part are the age, mass, modelnumber and the uncorrected and corrected fluxes
    """
    
    if quiet:
        output_stream = StringIO()
    else:
        output_stream = sys.stdout

    t0 = time()                 
    if appendix=='default':  # making sure not to overwrite former output
        appendix=t0          # by using the starting time as an unique id
    #parameter settings
    k_v = 211.4   # opacity in v_band in cm^2/g
    # wavelength of the corresponding filterband in microns
    wavelength = [1.235, 1.662, 2.159, 3.550, 4.493, 5.731, 7.872, 23.68, 71.42, 155.9] 
    models = ['2H', '2J', '2K', 'I1', 'I2', 'I3', 'I4', 'M1', 'M2', 'M3']


    if massfunction == 0 and starformationhistory == 0:
        # star mass function
        kroupa = np.vectorize(functions.kroupa)
        massfunction = dist.Distribution(kroupa, .1, 50.)

        #star formation history
        constant_sfr = np.vectorize(functions.constant_sfr)
        starformationhistory = dist.Distribution(constant_sfr, 1000., maxage)


    cumass = 0.  #sampled mass
    stars = []  #storing for the sample
    sfh = starformationhistory

    t1 = time()  #startup completed

    if precise:
        n = 0
        exmass = sfh.cdf()(sfh._upperbound)*sfr     #expected mass formed
        while cumass < exmass:
            mass, age = massfunction.sample(), sfh.sample()
            cumass = cumass + mass
            stars.append([n, age, mass])
            if n % 10000 == 0:
                print (n, cumass, file=output_stream)                                 #reporting progress
            n = n+1
    else:
        n = sfr
        mass, age = massfunction.sample(n), sfh.sample(n)
        cumass = np.sum(mass)
        exmass = n * massfunction.mean()
        stars = [[i, age[i], mass[i]] for i in range(n)]
    sfr = cumass/(sfh._upperbound-sfh._lowerbound)  #average star formation rate

    print ('number of sampled stars: %s' %n , file=output_stream)  
    print ('mass of sampled stars: %s' % cumass , file=output_stream)  
    print ('mean mass: %s' % (cumass/n), file=output_stream)
    print ('expected mass of stars: %s' % exmass , file=output_stream)
    t2 = time()  # sampleing completed


    # python code for model contact
    #initial parameters
    model = [ fits.open('models/%s.fits' % mod) for mod in models ]    # fits-data for the model
    param = fits.open('models/parameters.fits.gz')  # modelparameter
    app_num = [ np.interp(apera, model[i][2].data.field(0), range(model[i][2].data.field(0).size)) for i in range(len(models)) ] 


    # sampling viewing angle
    angle = np.random.random_integers(0,9,len(stars))
    #reading model grid
    mass = param[1].data['MASSC'][::10]
    age = param[1].data['TIME'][::10]
    grid = np.vstack([age, mass]).transpose()

    #converting to logspace
    stars = np.asarray(stars)
    grid = np.log10(grid)
    stars[:,1:] = np.log10(stars[:,1:])

    output = stars.tolist()  #creating output
    
    #normalizing for nearest neighbor search
    grid[0,:] = grid[0,:]/(grid[0,:].max() - grid[0,:].min())
    grid[1,:] = grid[1,:]/(grid[1,:].max() - grid[1,:].min())
    stars[1,:] = stars[1,:]/(grid[0,:].max() - grid[0,:].min())
    stars[2,:] = stars[2,:]/(grid[1,:].max() - grid[1,:].min())

    t3 = time()  #model data load complete

    tree = scipy.spatial.cKDTree(grid,leafsize=10)  #search tree
    matches = [tree.query(star[1:] , k=1)[1] for star in stars]  #saves matches with (dist, index)

    t4 = time()  #matching sample to data complete

    # extracting fluxes
    fluxes = [0 for j in range(len(models)) ]
    indices = 10*np.asarray(matches) + angle
    for j in range(len(models)):
        fluxes[j] = model[j][1].data[indices]['TOTAL_FLUX'][:,app_num[j]]



    # applying extinction
    extinction = np.loadtxt('models/extinction_law.ascii')
    k_lambda = np.interp(wavelength, extinction[:,0], extinction[:,1])
    correctionfactor = 10.**(-.4 * A_v * k_lambda / k_v)

    newfluxes = [0 for j in range(len(models)) ]
    for j in range(len(models)):
        newfluxes[j] = np.asarray(fluxes[j]) * correctionfactor[j] * (1./distance)**2


    t5 = time()  #extracting fluxes complete

    # saving data
    fluxes = np.asarray(fluxes)
    newfluxes = np.asarray(newfluxes)
    output = np.vstack([np.asarray(output).transpose(), matches, fluxes, newfluxes]).transpose()

    # create table
    # data table
    t = Table()
    t.add_column(Column(name='age', data=output[:,1]))
    t.add_column(Column(name='mass', data=output[:,2]))
    t.add_column(Column(name='model', data=output[:,3]))
    for i in range(len(models)):
        t.add_column(Column(name='%s' % models[i], data=output[:,4+i]))
    for i in range(len(models)):
        t.add_column(Column(name='c%s' % models[i], data=output[:,4+len(models)+i]))
    # head table
    header = Table()
    header.add_column(Column(name='AV', data = [A_v]))
    header.add_column(Column(name='SFR', data = [sfr]))
    header.add_column(Column(name='APPERA', data = [apera])  )   
    header.add_column(Column(name='MAXAGE', data = [maxage]))
    header.add_column(Column(name='DIST', data = [distance]))


    fits.writeto('out/%s' % appendix, np.array(t), clobber=True)
    fits.append('out/%s' % appendix, np.array(header), clobber=True)
    
    t6 = time()  #saving complete

    # timing possibility for optimization efforts

    print( 'starting script at %f'  %(t0), file=output_stream)
    print( 'initializing       %f'  %(t1-t0), file=output_stream)
    print( "sampleing          %f"  %(t2-t1), file=output_stream)
    print( "model data load    %f"  %(t3-t2), file=output_stream)
    print( "matching model     %f"  %(t4-t3), file=output_stream)
    print( "extracting fluxes  %f"  %(t5-t4), file=output_stream)
    print( "saving             %f"  %(t6-t5), file=output_stream)
    print( "________________________", file=output_stream)
    print( "total runtime      %f"  %(t6-t0), file=output_stream)
    print( "finishing script   %f"  %t6, file=output_stream)

#main(sfr = .08)  # for testing purposes and directly called from bash
示例#52
0
    def _check_hdu_list(self, cutout_dimensions, hdu_list):
        has_match = False
        pixel_matches_left = len(cutout_dimensions)
        for curr_extension_idx, hdu in enumerate(hdu_list):
            # If we encounter a PrimaryHDU, write it at the top and continue.
            if isinstance(hdu, PrimaryHDU) and hdu.data is None:
                logger.debug('Appending Primary from index {}'.format(
                    curr_extension_idx))
                fits.append(filename=self.output_writer,
                            header=hdu.header,
                            data=None,
                            overwrite=False,
                            output_verify='silentfix',
                            checksum='remove')
            elif hdu.is_image:
                header = hdu.header
                ext_name = header.get('EXTNAME')
                ext_ver = header.get('EXTVER', 0)
                curr_ext_name_ver = None

                if ext_name is not None:
                    curr_ext_name_ver = (ext_name, ext_ver)

                try:
                    if isinstance(cutout_dimensions[0], PixelCutoutHDU):
                        for cutout_dimension in cutout_dimensions:
                            if self._is_extension_requested(
                                    curr_extension_idx, curr_ext_name_ver,
                                    cutout_dimension):
                                logger.debug(
                                    '*** Extension {} does match ({} | {})'.
                                    format(cutout_dimension.get_extension(),
                                           curr_extension_idx,
                                           curr_ext_name_ver))
                                pixel_matches_left -= 1
                                self._pixel_cutout(header, hdu.data,
                                                   cutout_dimension)
                                has_match = True

                        if pixel_matches_left == 0:
                            return has_match
                    else:
                        logger.debug('Handling WCS transform.')
                        # Handle WCS transform.
                        transform = Transform()
                        transformed_cutout_dimension = \
                            transform.world_to_pixels(cutout_dimensions, header)
                        logger.debug('Transformed {} into {}'.format(
                            cutout_dimensions, transformed_cutout_dimension))
                        self._pixel_cutout(header, hdu.data,
                                           transformed_cutout_dimension)
                        has_match = True

                except NoContentError:
                    # Skip for now as we're iterating the loop.
                    logger.debug('No overlap with extension {}'.format(
                        curr_extension_idx))

            logger.debug('Finished extension {}'.format(curr_extension_idx))

        logger.debug('Has match in list? -- {}'.format(has_match))
        return has_match
示例#53
0
        inv_sq_root_nuisance = 0
        white_covar = 0
        Un = 0
        Dn = 0
        ortho_r = 0
        Ln = 0
        Ln_minus = 0
        Pn = 0
        vv = 0

    Rq = 0
    Rq_nuisance = 0

    pyfits.append(
        'ilc_weights_' + output_suffix + "_" + str(j).strip() + '.fits',
        w_target)

    w_target = 0

############################################################################    ############################################################################

##### Apply GNILC weights to wavelet maps

print "Applying the GNILC weights to the observed wavelet maps."

for i in range(0, nf):
    pyfits.append('wavelet_gnilc_target_' + str(i).strip() + '.fits',
                  bands[:, 0:relevant_band_max[i] + 1])

for j in range(0, nbands):  # loop over needlet bands
示例#54
0
def fitting_ve(name):

    image_path = name
    if not os.path.exists(image_path):
        print("{}/{} could not be found: {}".format(i + 1, N, image_path))
        keep[i] = False

    # We only store flux,ivar,inf_flux,parameters,parameters_new,parameters_sim,ve(n*3)(include ve, ve_new,ve_sim)
    try:
        image = fits.open(image_path, ignore_missing_end=True)
        dat = Table.read(image_path)

        flux = image[1].data
        flux_err = image[2].data

        flux = np.atleast_2d(flux)
        flux_err = np.atleast_2d(flux_err)


    except IOError:

        print("opts. This one fail")
        em =0

    else:

        em =1

        badpix = get_pixmask(flux, flux_err)
        ivar = 1.0 / flux_err ** 2
        error = flux_err
        # badpix is a array and the length is 8575
        flux = np.array(flux, dtype=np.float64)
        ivar = np.array(ivar, dtype=np.float64)

        flux[badpix] = np.median(flux)
        ivar[badpix] = 0.0

        flux = np.array(flux)
        ivar = np.array(ivar)

        # normalize flux:
        # value

        tr_ID = image_path

        test_labels_all_i = np.array([5000, 1, 1])

        ds = dataset.Dataset(wl, tr_ID, flux, ivar,
                             test_labels_all_i, tr_ID, flux, ivar)

        ds.ranges = [[371, 3192], [3697, 5997], [6461, 8255]]

        # set sudo-continuous spectrum
        pseudo_tr_flux, pseudo_tr_ivar = ds.continuum_normalize_training_q \
            (q=0.90, delta_lambda=50)

        # set mask
        contmask = ds.make_contmask(pseudo_tr_flux, pseudo_tr_ivar, frac=0.07)

        # get continuous mask

        ds.set_continuum(contmask)

        # fit the normalized-spectrum in the continuous region

        cont = ds.fit_continuum(3, "sinusoid")

        # Obtain the normalized flux
        norm_tr_flux, norm_tr_ivar, norm_test_flux, norm_test_ivar = \
            ds.continuum_normalize(cont)

        norm_tr_flux = np.atleast_2d(norm_tr_flux)

        if len(norm_tr_flux[:,0])<3:
            em=0
        else:
            nothing=1

        # infer labels


        # inf_labels = model.fit(norm_tr_flux, norm_tr_ivar)


        # Use inferred labels from the combined spectra:


        inf_labels = model.fit(norm_tr_flux, norm_tr_ivar)
        # only use the inf labels from the combined spectra

        com = len(inf_labels[:, 0])

        inf_labels_com = inf_labels[0, :]

        inf_labels = []
        for z in range(0, com):
            inf_labels.append(inf_labels_com)

        inf_labels = np.array(inf_labels)

        v = model.vectorizer.get_label_vector(inf_labels)
        inf_flux = np.dot(v, model.theta.T)
        opt_flux, parameters = model.fitting_spectrum_parameters_single \
            (norm_tr_flux, norm_tr_ivar, inf_flux)


        # calculate chi-squared!

        chi_inf = (norm_tr_flux-inf_flux)**2*norm_tr_ivar
        chi_inf = np.sum(chi_inf,axis=1)

        chi_mix = (norm_tr_flux-opt_flux)**2*norm_tr_ivar
        chi_mix = np.sum(chi_mix,axis=1)



        ve = (parameters[:, 2] - parameters[:, 0]) / (parameters[:, 0] + parameters[:, 1] + parameters[:, 2]) * 4144.68

        ve_un = model.uncertainty

        # old
        a0 = parameters
        a1 = ve
        a2 = ve_un

        # covariance matrix for abc
        a3 = model.un_cov

        # spectra

        a4 = norm_tr_flux
        a5 = norm_tr_ivar
        a6 = inf_flux
        a7 = opt_flux

        # inf_labels are from the
        a8 = inf_labels

        a9 = chi_inf

        a10 = chi_mix

        # VHELIO
        a11 = np.array(dat[0]["VHELIO"])

        # Fiber

        a12 = np.array(dat[0]["FIBER"])

        # Files

        # BJD

        RA = image[0].header["RA"]

        DEC = image[0].header["DEC"]

        SNR = image[0].header["SNR"]

        MJD = dat[0]["MJD"]

        c = SkyCoord(RA, DEC, frame='icrs', unit='deg')

        BJD = MJD2BJD(MJD, c)

        a13 = np.array(BJD)

        # calculate chi-squared:


        try:
            # save them

            # pay attention to the fits file saving

            path_fits_i = image_path.replace("/Volumes/Data_2TB/Data/DR13_rc/apStar-r6-",
                                             "/Users/caojunzhi/Desktop/Data/dr13_red_clump/")

            print("saving files" + path_fits_i)

            hdu = fits.PrimaryHDU(data=a0)
            hdu.header[
                'COMMENT'] = "Simple orange juice"

            # add header info

            hdu.header['SNR'] = SNR
            hdu.header['RA'] = RA
            hdu.header['DEC'] = DEC

            hdu.writeto(path_fits_i, clobber=True)

            ts.append(path_fits_i, a1)
            ts.append(path_fits_i, a2)
            ts.append(path_fits_i, a3)
            ts.append(path_fits_i, a4)
            ts.append(path_fits_i, a5)
            ts.append(path_fits_i, a6)
            ts.append(path_fits_i, a7)
            ts.append(path_fits_i, a8)

            ts.append(path_fits_i, a9)
            ts.append(path_fits_i, a10)
            ts.append(path_fits_i, a11)
            ts.append(path_fits_i, a12)
            ts.append(path_fits_i, a13)

        except OSError:
            print("fail")
            em=0

    return em
def table_model(modelname, userparfile, specfile, outfile, clobber=False):

    # Create Xspec/Sherpa style table model
    #
    # :rtype: None, <outfile> fits file created.
    #
    # :param modelname: name of the table model displayed in Xspec/Sherpa
    # :param userparfile: file with user keywords for the table model
    # :specfile: file with grid of energy spectra, 1st column: energy grid
    #            in keV; consecutive columns: model spectra
    # :param outfile: name of the output fits file
    # :param clobber: T/F, if T outfile will be overwritten
    #
    # Tmp file 'tmp_tabmod.fits' created and removed.

    # ----------- READ IN USER INPUT --------------------

    if os.path.isfile(outfile) and not clobber:
        raise NameError("Output file " + outfile + " exists and clobber set to false\n")

    userdict = read_userparams(userparfile)

    # Convert userdict['value'] into a list of tuples; one tuple
    # for each model parameter

    # value_not_padded: used in get_paramval to calculate array with
    # combinations of model parameters
    idx = 0
    value_not_padded = []
    for item in userdict["numbvals"]:
        value_not_padded.append(tuple(userdict["value"][idx : idx + item]))
        idx += item

    # value_padded: format required for col10 of the fits file
    value_padded = []
    maxnum = max(userdict["numbvals"])
    for val in value_not_padded:
        if len(val) != maxnum:
            n = maxnum - len(val)
            value_padded.append(val + (0.0,) * n)
        else:
            value_padded.append(val)

    # model energy grid (bin edges!)
    energy = np.loadtxt(specfile)[:, 0]  # energy in keV
    energ_lo = energy[:-1]
    energ_hi = energy[1:]

    paramval = get_paramval(value_not_padded)

    # list of tuples with spectra
    input_spectra = read_input_spectra(specfile, userdict["numbvals"])

    # ----------- END ---------------------------------

    # initialize fits file by creating user parameters extension

    col1 = fits.Column(name="NAME", format="12A", array=userdict["name"])
    col2 = fits.Column(name="METHOD", format="J", array=userdict["method"])
    col3 = fits.Column(name="INITIAL", format="E", array=userdict["initial"])
    col4 = fits.Column(name="DELTA", format="E", array=userdict["delta"])
    col5 = fits.Column(name="MINIMUM", format="E", array=userdict["minimum"])
    col6 = fits.Column(name="BOTTOM", format="E", array=userdict["bottom"])
    col7 = fits.Column(name="TOP", format="E", array=userdict["top"])
    col8 = fits.Column(name="MAXIMUM", format="E", array=userdict["maximum"])
    col9 = fits.Column(name="NUMBVALS", format="J", array=userdict["numbvals"])
    col10 = fits.Column(name="VALUE", format=np.str(np.max(userdict["numbvals"])) + "E", array=value_padded)

    cols = fits.ColDefs([col1, col2, col3, col4, col5, col6, col7, col8, col9, col10])

    tbhdu = fits.BinTableHDU.from_columns(cols)

    # update header of user parameters extension

    nintparm = len(userdict["numbvals"])

    tbhdr = tbhdu.header

    tbhdr.set("EXTNAME", "PARAMETERS", "name of this binary table extension")
    tbhdr.set("HDUCLASS", "OGIP", "format conforms to OGIP standard")
    tbhdr.set("HDUCLAS1", "XSPEC TABLE MODEL", "model spectra for XSPEC")
    tbhdr.set("HDUCLAS2", "PARAMETERS", "extension containing parameter info")
    tbhdr.set("HDUVERS1", "1.0.0", "version of format")
    tbhdr.set("NINTPARM", nintparm, "Number of interpolation parameters")
    tbhdr.set("NADDPARM", 0, "Number of additional parameters")

    if os.path.isfile("tmp_tabmod.fits"):
        os.remove("tmp_tabmod.fits")

    tbhdu.writeto("tmp_tabmod.fits")

    # update primary header

    hdulist = fits.open("tmp_tabmod.fits")

    prihdr = hdulist[0].header
    prihdr["bitpix"] = 16
    prihdr.set("modlname", modelname, "model name")
    prihdr.set("modlunit", "photons/cm^2/s", "model units")
    prihdr.set("redshift", True, "If true then redshift will be included as a par")
    prihdr.set("addmodel", userdict["addmodel"], "If true then this is an additive table model")
    prihdr.set("hduclass", "OGIP", "format conforms to OGIP standard")
    prihdr.set("hduclas1", "XSPEC TABLE MODEL", "model spectra for XSPEC")
    prihdr.set("hduvers1", "1.0.0", "version of format")

    if os.path.isfile(outfile):
        os.remove(outfile)

    hdulist.writeto(outfile)

    hdulist.close()
    os.remove("tmp_tabmod.fits")

    # append extension energies and update its header

    col1 = fits.Column(name="ENERG_LO", format="E", array=energ_lo, unit="keV")
    col2 = fits.Column(name="ENERG_HI", format="E", array=energ_hi, unit="keV")

    cols = fits.ColDefs([col1, col2])

    tbhdu_energies = fits.BinTableHDU.from_columns(cols)

    hdr = tbhdu_energies.header
    hdr.set("EXTNAME", "ENERGIES", "name of this binary table extension")
    hdr.set("HDUCLASS", "OGIP", "format conforms to OGIP standard")
    hdr.set("HDUCLAS1", "XSPEC TABLE MODEL", "model spectra for XSPEC")
    hdr.set("HDUCLAS2", "ENERGIES", "extension containing energy bins info")
    hdr.set("HDUVERS1", "1.0.0", "version of format")

    fits.append(outfile, tbhdu_energies.data, hdr)

    # append extension spectra and update its header

    col1 = fits.Column(name="PARAMVAL", format=np.str(nintparm) + "E", array=paramval)
    col2 = fits.Column(name="INTPSPEC", format=np.str(len(energ_lo)) + "E", array=input_spectra, unit="photons/cm^2/s")

    cols = fits.ColDefs([col1, col2])

    tbhdu_spectra = fits.BinTableHDU.from_columns(cols)

    hdr = tbhdu_spectra.header
    hdr.set("EXTNAME", "SPECTRA", "name of this binary table extension")
    hdr.set("HDUCLASS", "OGIP", "format conforms to OGIP standard")
    hdr.set("HDUCLAS1", "XSPEC TABLE MODEL", "model spectra for XSPEC")
    hdr.set("HDUCLAS2", "MODEL SPECTRA", "extension containing model spectra")
    hdr.set("HDUVERS1", "1.0.0", "version of format")

    fits.append(outfile, tbhdu_spectra.data, hdr)

    return
示例#56
0
    Version  : $Rev: 514 $
    Last Update: $Date: 2015-10-16 10:30:00 +0530 (Fri, 16 Oct 2015) $

""")
parser.add_argument("infile",
                    help="Input Detector Plane Histogram (DPH) file name",
                    type=str)
parser.add_argument("relqefile",
                    nargs="?",
                    default="relativeQE.fits",
                    help="Input relative qe file name",
                    type=str)
parser.add_argument("outfile",
                    nargs="?",
                    default="output.dph",
                    help="Output Detector Plane Histrogram file name",
                    type=str)
args = parser.parse_args()
warnings.simplefilter(action="ignore", category=RuntimeWarning)
#------------------------------------------------------------------------------
dphhdu = fits.open(args.infile)
qehdu = fits.open(args.relqefile)
fits.writeto(args.outfile, dphhdu[0].data, dphhdu[0].header)
for hdunum in range(1, 5):
    dph = dphhdu[hdunum].data
    qe = qehdu[hdunum].data
    dph = dph / qe
    dph_ = np.where(np.isnan(dph), 0, dph)
    fits.append(args.outfile, dph_, dphhdu[hdunum].header)
#outhdu.writeto(args.outfile);
示例#57
0
        night = args[0]
        expid = int(args[1])

        #- Make sure we have what we need as input
        assert 'DESI_SPECTRO_SIM' in os.environ
        assert 'PIXPROD' in os.environ
        if isinstance(expid, str):
            expid = int(expid)

        #- Where are the input files?
        simpath = os.path.join(os.getenv('DESI_SPECTRO_SIM'),
                               os.getenv('PIXPROD'), night)
        opts.inspec = '{}/simspec-{:08d}.fits'.format(simpath, expid)

    stdfiber, wave, flux = get_simstds(opts.inspec, opts.spectroid)

    if opts.outfile is None:
        assert 'DESI_SPECTRO_REDUX' in os.environ
        assert 'PRODNAME' in os.environ

        outdir = os.path.join(os.getenv('DESI_SPECTRO_REDUX'),
                              os.getenv('PRODNAME'), 'exposures', night,
                              '{:08d}'.format(expid))
        opts.outfile = '{}/stdflux-sp{}-{:08d}.fits'.format(
            outdir, opts.spectroid, expid)
        print opts.outfile

    fits.writeto(opts.outfile, flux, clobber=True)
    fits.append(opts.outfile, wave)
    fits.append(opts.outfile, stdfiber)
示例#58
0
def run_ppxf(flux, ivar, mask, wave, specres, flux_hdr, outname, redshift, verbose=True):
    dx = flux_hdr['CD1_1'] * 3600.  # deg to arcsec
    dy = flux_hdr['CD2_2'] * 3600.  # deg to arcsec

    ppxf_obj=ppxf_wrap(redshift, wave, specres)
    nx, ny, _ = flux.shape
    velarr=np.zeros((nx,ny))
    sigarr=np.zeros((nx,ny))
    medsnarr=np.zeros((nx,ny))
    flagarr=np.zeros((nx,ny),dtype=np.int16) #define flag array
    
    do_not_use = (mask & 2**10) > 0
    flux_masked=np.ma.array(flux, mask=do_not_use)
    flux_median=np.median(flux_masked, axis=2)
    nflux=np.sum(flux_median>0)
    
    print('Start kinematic measurement')
    t_start=perf_counter()
    if verbose:
        print("%2s %2s %10s %10s %5s %11s %4s" % ('i', 'j', 'Velocity', 'Dispersion', 'Chi2', 'Median S/N','t'))
    
    count=0
    for j in range(ny):
        for i in range(nx):
            t = perf_counter()
            if flux_median[j,i] > 0:
                ppxf_obj.flux=flux[j,i]
                ppxf_obj.ivar=ivar[j,i]
                ppxf_obj.mask=((mask[j,i] & 2**10) == 0)
                res, medsn=ppxf_obj.run()
                
                if not res:
                    continue
                velarr[j,i]=res.sol[0]
                sigarr[j,i]=res.sol[1]
                medsnarr[j,i]=medsn
                flagarr[j,i]=1
                count+=1

                if verbose:
                    print("%02d %02d %10.3f %10.3f %5.2f %11.1f %4.1f" % (i, j, res.sol[0], res.sol[1], res.chi2, medsn, perf_counter()-t))
                else:
                    if count==1:
                        t1=perf_counter()
                    if count==int(nflux*0.1):
                        remain_time=(nflux-int(nflux*0.1))*(perf_counter()-t1)/int(nflux*0.1)*1.1
                        print('remaining time to finish kinematic measurement (approx.): '+('%d' % int(remain_time))+' sec')

                    
    print('End measurement')
    print('Elapsed time: '+str(int(perf_counter()-t_start))+' sec')
    print('Save velocity measurement data in to FITS file: ', outname)
    hdu = fits.PrimaryHDU()
    hdu.writeto(outname, overwrite=True)

    fits.append(outname, velarr)
    fits.append(outname, sigarr)
    fits.append(outname, medsnarr)
    fits.append(outname, flagarr.astype(np.int16))

    append_file=fits.open(outname, mode='update')

    extnames=['STELLAR_VEL','STELLAR_SIGMA','MEDSN','FLAG']
    for k in range(4):
        hdr=append_file[k+1].header
        hdr['EXTNAME']=extnames[k]
        hdr['CD1_1']=dx/3600
        hdr['CD2_2']=dy/3600

    append_file.close()
示例#59
0
#import h5py

name = sys.argv[1]
num = float(sys.argv[2])
path=sys.argv[4]



fn = path+"/DD%04i/DD%04i" % (num, num) # parameter file to load

print(fn)

pf = load(fn) # load data

# This is the resolution we will extract at
DIMS = int(sys.argv[3])
# Now, we construct an object that describes the data region and structure we
# want
cube = pf.h.covering_grid(0, # The level we are willing to extract to; higher
                          left_edge=[0.0, 0.0, 0.0],
                          right_edge=[1.0, 1.0, 1.0],
                          dims=[DIMS,DIMS,DIMS],
                          fields=["Density"])

#print cube

pyfits.writeto('%s_flatrho_%04i.fits' %(name, num), cube["Density"])
pyfits.append('%s_flatrho_%04i.fits' %(name, num), cube["x-velocity"])
pyfits.append('%s_flatrho_%04i.fits' %(name, num), cube["y-velocity"])
pyfits.append('%s_flatrho_%04i.fits' %(name, num), cube["z-velocity"])
示例#60
0
 def _saveCube(self, total_cube, name):
     fits_file_name = os.path.join(Caliball._storageFolder(), name)
     pyfits.writeto(fits_file_name, total_cube.data)
     pyfits.append(fits_file_name, total_cube.mask.astype(int))