コード例 #1
0
    def resize_image_from_rp(self, resize_rp=True):
	if resize_rp:
	    rp_pixel_in_kpc = 0.016 * self.r_petro_kpc			# P. Torrey -- this is my target scale; was 0.008, upping to 0.016 for GZ based on feedback
	    Ntotal_new = (self.noisy_image.pixel_in_kpc / rp_pixel_in_kpc ) * self.noisy_image.n_pixels
	    rebinned_image = congrid.congrid(self.noisy_image.image            ,  (Ntotal_new, Ntotal_new) )

	    diff = n_pixels_galaxy_zoo - Ntotal_new		#

            if diff >= 0:		# P. Torrey.  --  desired FOV is larger than already rendered... 
					# this is not a problem is image edges have ~0 flux.  
					# Otherwise, can cause artifacts.
                shift = 0
                shiftc = np.floor(1.0*diff/2.0)
	        fake_image = np.zeros( (n_pixels_galaxy_zoo, n_pixels_galaxy_zoo) )
                fake_image[shiftc:shiftc+Ntotal_new,shiftc:shiftc+Ntotal_new] = rebinned_image[0:Ntotal_new, 0:Ntotal_new]
                rp_image = fake_image

	        rp_image = congrid.congrid(self.noisy_image.image_in_nmaggies,  (n_pixels_galaxy_zoo, n_pixels_galaxy_zoo) )
            else:
                shift = np.floor(-1.0*diff/2.0)
                rp_image = rebinned_image[shift:shift+n_pixels_galaxy_zoo,shift:shift+n_pixels_galaxy_zoo]

	    self.rp_image.init_image(rp_image, self, fov = 424.0*(0.016 * self.r_petro_kpc) )
	else:
	    self.rp_image.init_image(self.noisy_image.image, self, fov=self.noisy_image.pixel_in_kpc*self.noisy_image.n_pixels)
コード例 #2
0
def do_psf(initial_image, PSF_use, image_pixsize, psf_pixsize):

    orig_size = initial_image.shape[0]

    size = orig_size * image_pixsize / psf_pixsize

    print orig_size, size

    binned_image = congrid.congrid(initial_image, (size, size),
                                   method='linear')

    print binned_image.shape

    conv_im = scipy.signal.convolve2d(binned_image,
                                      PSF_use / np.sum(PSF_use),
                                      mode='same',
                                      boundary='wrap')

    print conv_im.shape

    out_im = congrid.congrid(conv_im, (orig_size, orig_size), method='linear')

    print out_im.shape

    return out_im
コード例 #3
0
    def add_background(self,
                       seed=1,
                       add_background=True,
                       rebin_gz=False,
                       n_target_pixels=424):
        if add_background and (len(backgrounds[self.band]) > 0):
            #=== load *full* bg image, and its properties ===#
            bg_filename = (backgrounds[self.band])[0]
            file = pyfits.open(bg_filename)
            header = file[0].header
            pixsize = get_pixelsize_arcsec(header)
            Nx = header.get('NAXIS2')
            Ny = header.get('NAXIS1')

            #=== figure out how much of the image to extract ===#
            Npix_get = np.floor(self.rp_image.n_pixels *
                                self.rp_image.pixel_in_arcsec / pixsize)

            if (
                    Npix_get > self.rp_image.n_pixels
            ):  # P. Torrey 9/10/14   -- sub optimal, but avoids strange noise ...
                Npix_get = self.rp_image.n_pixels  #		... in the images.  Could cause problems for automated analysis.

            im = file[0].data  # this is in some native units
            halfval_i = np.floor(np.float(Nx) / 1.3)
            halfval_j = np.floor(np.float(Ny) / 1.3)
            np.random.seed(seed=seed)

            starti = np.random.random_integers(5, halfval_i)
            startj = np.random.random_integers(5, halfval_j)

            bg_image_raw = im[starti:starti + Npix_get,
                              startj:startj + Npix_get]

            #=== need to convert to microJy / str ===#
            bg_image_muJy = bg_image_raw * 10.0**(
                -0.4 * (bg_zpt[self.band][0] - 23.9))
            pixel_area_in_str = pixsize**2 / n_arcsec_per_str
            bg_image = bg_image_muJy / pixel_area_in_str

            #=== need to rebin bg_image  ===#
            bg_image = congrid.congrid(
                bg_image, (self.rp_image.n_pixels, self.rp_image.n_pixels))
            new_image = bg_image + self.rp_image.image
            new_image[new_image <
                      self.rp_image.image.min()] = self.rp_image.image.min()
        else:
            new_image = self.rp_image.image

        if rebin_gz:
            new_image = congrid.congrid(new_image,
                                        (n_target_pixels, n_target_pixels))

        print new_image.shape

        self.bg_image.init_image(new_image,
                                 self,
                                 fov=self.rp_image.pixel_in_kpc *
                                 self.rp_image.n_pixels)
コード例 #4
0
ファイル: myImage.py プロジェクト: kirillzhuravlev/atrex
    def search_for_peaks (self, peaks, thr, max_peak_size, num_of_segments, perc):
        #thr=self.threshold                       # 100:       raw counts threshold for locating peaks
        #max_peak_size=self.mindist               # 10:        max allowed peak size with pixels above local background + Imin
        #num_of_segments = [self.pbox,self.pbox]  # [50.,50.]: number of segments in X and Y for local labckground estimation
        #perc=self.bbox                           # 1.0:       percent of median for background

        topX=self.imArraySize[0]
        topY=self.imArraySize[1]
        img1=cgd.congrid(self.imArray, [1000,1000])
        bg=self.estimate_local_background (img1, 50, 50, 100, 1.0)

        w=np.where(img1-bg > thr)
        for i in range(len(w[0])):
            XYs=[w[1][i],w[0][i]]
            if img1[XYs[1],XYs[0]]-bg[XYs[1],XYs[0]] > thr :
                XY=[0.0,0.0]
                aa=self.grow_peak(img1, bg, XYs[1],XYs[0], thr/2., 1000, 1000)
                if (max([aa[1]-aa[0], aa[3]-aa[2]]) < max_peak_size) and (aa[6] > thr) :
                    XY[0]=aa[5]*topX/1000
                    XY[1]=aa[4]*topY/1000
                    peak=myPeakTable.myPeak()
                    peak.setDetxy(XY)
                    #peak.setIntAD=img1[aa[4],aa[5]]
                    #ref_peak.setgonio=im.sts.gonio
                    peaks.addPeak(peak)
                img1[aa[2]:aa[3],aa[0]:aa[1]]=0
        peaks.find_multiple_peak_copies()
コード例 #5
0
    def add_gaussian_psf(self, add_psf=True, sample_factor=1.0):		# operates on sunrise_image -> creates psf_image
	if add_psf:
	    current_psf_sigma_pixels = self.telescope.psf_fwhm_arcsec * (1.0/2.355) / self.sunrise_image.pixel_in_arcsec

	    if current_psf_sigma_pixels<8:	# want the psf sigma to be resolved with (at least) 8 pixels...
	        target_psf_sigma_pixels  = 8.0
	        n_pixel_new = np.floor(self.sunrise_image.n_pixels * target_psf_sigma_pixels / current_psf_sigma_pixels )

	        if n_pixel_new > 2500:		# an upper limit owing to memory constraints...  
						# beyond this, the PSF is already very small...
		    n_pixel_new = 2500
		    target_psf_sigma_pixels = n_pixel_new * current_psf_sigma_pixels / self.sunrise_image.n_pixels

	        new_image = congrid.congrid(self.sunrise_image.image,  (n_pixel_new, n_pixel_new) )
	        current_psf_sigma_pixels = target_psf_sigma_pixels * (
			(self.sunrise_image.n_pixels * target_psf_sigma_pixels 
				/ current_psf_sigma_pixels) / n_pixel_new )
	    else:
	        new_image = self.sunrise_image.image

	    psf_image = np.zeros_like( new_image ) * 1.0
	    dummy = sp.ndimage.filters.gaussian_filter(new_image, 
			current_psf_sigma_pixels, output=psf_image, mode='constant')

	    self.psf_image.init_image(psf_image, self) 
	else:
	    self.psf_image.init_image(self.sunrise_image.image, self)
コード例 #6
0
    def search_for_peaks(self, peaks, thr, max_peak_size, num_of_segments,
                         perc):
        #thr=self.threshold                       # 100:       raw counts threshold for locating peaks
        #max_peak_size=self.mindist               # 10:        max allowed peak size with pixels above local background + Imin
        #num_of_segments = [self.pbox,self.pbox]  # [50.,50.]: number of segments in X and Y for local labckground estimation
        #perc=self.bbox                           # 1.0:       percent of median for background

        #need to add intssd, gonio and any other parameters for the peak.
        topX = self.imArraySize[0]
        topY = self.imArraySize[1]
        img1 = cgd.congrid(self.imArray, [1000, 1000])
        bg = self.estimate_local_background(img1, self.locBcgr, self.locBcgr,
                                            100, 1.0)

        w = np.where(img1 - bg > thr)
        for i in range(len(w[0])):
            XYs = [w[1][i], w[0][i]]
            if img1[XYs[1], XYs[0]] - bg[XYs[1], XYs[0]] > thr:
                XY = [0.0, 0.0]
                aa = self.grow_peak(img1, bg, XYs[1], XYs[0], thr / 2., 1000,
                                    1000)
                if (max([aa[1] - aa[0], aa[3] - aa[2]]) <
                        max_peak_size) and (aa[6] > thr):
                    XY[0] = aa[5] * topX / 1000
                    XY[1] = aa[4] * topY / 1000
                    peak = myPeakTable.myPeak()
                    peak.setDetxy(XY)
                    #peak.setIntAD=img1[aa[4],aa[5]]
                    #ref_peak.setgonio=im.sts.gonio
                    peaks.addPeak(peak)
                img1[aa[2]:aa[3], aa[0]:aa[1]] = 0
        peaks.find_multiple_peak_copies()
コード例 #7
0
def convert_galaxy(filename, outfilename, sfid_all, merger_bool_all):
    print(filename, outfilename)

    hdu = pyfits.open(filename)[0]

    array = pyfits.open(filename)[0].data
    new_array = congrid.congrid(array[61:162, 61:162], (64, 64))

    sfid = float(hdu.header['SUBH_ID'])

    #mbi = np.where(sfid_all==sfid)[0]
    #mb = merger_bool_all[mbi]
    #print(mb,mbi,sfid_all)

    index = sfid_all == sfid

    mb = merger_bool_all[sfid_all == sfid]

    fu = pyfits.PrimaryHDU(new_array)

    fu.header['SUBH_ID'] = sfid
    fu.header['ISMERGER'] = float(mb)

    ful = pyfits.HDUList([fu])

    ful.writeto(outfilename, overwrite=True)

    return
コード例 #8
0
    def search_for_peaks_arr(self, arr, peaks, thr, max_peak_size,
                             num_of_segments, perc):
        sxy = arr.shape
        topX = sxy[1]
        topY = sxy[0]
        img1 = cgd.congrid(arr, [1000, 1000])
        bg = self.estimate_local_background(img1, self.locBcgr, self.locBcgr,
                                            100, 1.0)

        w = np.where(img1 - bg > thr)
        for i in range(len(w[0])):
            XYs = [w[1][i], w[0][i]]
            if img1[XYs[1], XYs[0]] - bg[XYs[1], XYs[0]] > thr:
                XY = [0.0, 0.0]
                aa = self.grow_peak(img1, bg, XYs[1], XYs[0], thr / 2., 1000,
                                    1000)
                if (max([aa[1] - aa[0], aa[3] - aa[2]]) <
                        max_peak_size) and (aa[6] > thr):
                    XY[0] = aa[5] * topX / 1000
                    XY[1] = aa[4] * topY / 1000
                    peak = myPeakTable.myPeak()
                    peak.setDetxy(XY)
                    #peak.setIntAD=img1[aa[4],aa[5]]
                    #ref_peak.setgonio=im.sts.gonio
                    peaks.addPeak(peak)
                img1[aa[2]:aa[3], aa[0]:aa[1]] = 0
        peaks.find_multiple_peak_copies()
コード例 #9
0
ファイル: test.py プロジェクト: elehcim/simifucube
def congrid_debug(snap_name):
    sp, pos, my_last_valid_freq = spectra_from_snap(snap_name)
    print('Last valid freq:', my_last_valid_freq)
    my_idx = 26
    import matplotlib.pyplot as plt
    plt.step(sp.spectral_axis, sp[my_idx].flux)

    new_bins_limits = {}
    wvl = sp.wavelength.value
    # if wvl.min() < MUSE_LIMITS['start']:
    new_bins_limits['start'] = max(wvl.min(), MUSE_LIMITS['start'])
    new_bins_limits['stop'] = min(wvl.max(), MUSE_LIMITS['stop'])
    new_bins_limits['step'] = MUSE_LIMITS['step']
    # if wvl.max() > MUSE_LIMITS['stop']:
    #     new_bins_limits['stop'] = wvl.max()
    print(new_bins_limits)
    new_bins = np.arange(**new_bins_limits)

    # rebinned = rebin(sp, new_bins=new_bins)
    # wvl = sp.spectral_axis.value
    limit = np.where(np.logical_and(wvl > new_bins.min(), wvl < new_bins.max()))[0]
    limited_fl = sp.flux[:, limit].value
    nstar = sp.shape[0]
    n_channels = len(new_bins)
    new_fl = congrid(limited_fl, (nstar, n_channels))
    new_sp = Spectrum1D(spectral_axis=new_bins * sp.spectral_axis_unit, flux=new_fl * sp.flux.unit)
    # return new_sp
    plt.step(new_sp.spectral_axis, new_sp[my_idx].flux)

    plt.show()
コード例 #10
0
    def add_background(self, seed=1, add_background=True, rebin_gz=False, n_target_pixels=424):
	if add_background and (len(backgrounds[self.band]) > 0):
	    #=== load *full* bg image, and its properties ===#  
	        bg_filename = (backgrounds[self.band])[0]
                file = pyfits.open(bg_filename) ; 
                header = file[0].header ; 
                pixsize = get_pixelsize_arcsec(header) ; 
                Nx = header.get('NAXIS2') ; Ny = header.get('NAXIS1')
	
	        #=== figure out how much of the image to extract ===#
                Npix_get = np.floor(self.rp_image.n_pixels * self.rp_image.pixel_in_arcsec / pixsize)

	        if (Npix_get > self.rp_image.n_pixels):	# P. Torrey 9/10/14   -- sub optimal, but avoids strange noise ...
	            Npix_get = self.rp_image.n_pixels	#		... in the images.  Could cause problems for automated analysis.
  
    	        im = file[0].data 	# this is in some native units
                halfval_i = np.floor(np.float(Nx)/1.3)
	        halfval_j = np.floor(np.float(Ny)/1.3)
	        np.random.seed(seed=seed)

                starti = np.random.random_integers(5,halfval_i)
                startj = np.random.random_integers(5,halfval_j)

                bg_image_raw = im[starti:starti+Npix_get,startj:startj+Npix_get]

	        #=== need to convert to microJy / str ===#
	        bg_image_muJy = bg_image_raw * 10.0**(-0.4*(bg_zpt[self.band][0]- 23.9 ))
	        pixel_area_in_str       = pixsize**2 / n_arcsec_per_str
	        bg_image = bg_image_muJy / pixel_area_in_str 

	        #=== need to rebin bg_image  ===#
                bg_image = congrid.congrid(bg_image, (self.rp_image.n_pixels, self.rp_image.n_pixels))
	        new_image = bg_image + self.rp_image.image
	        new_image[ new_image < self.rp_image.image.min() ] = self.rp_image.image.min()
	else:
	        new_image = self.rp_image.image

	if rebin_gz:
	    new_image = congrid.congrid( new_image, (n_target_pixels, n_target_pixels) )
	        
	print new_image.shape

	self.bg_image.init_image(new_image, self, fov = self.rp_image.pixel_in_kpc * self.rp_image.n_pixels)	
コード例 #11
0
ファイル: myDetector.py プロジェクト: comptech/atrex
 def local_background (self, myarr) :
     # scale this to a 1000 by 1000 grid but return an array in original format
     # the returned array is the median on a 10x10 blocksize
     s = myarr.shape
     myarr0 = cgd.congrid (myarr, (1000, 1000) , method='nearest', minusone=True)
     #myarr0 = imresize(myarr, (1000,1000))
     n = int (math.sqrt(s[0]*s[1]))
     out=np.zeros(myarr0.shape,myarr0.dtype)
     for i in range (100) :
         for j in range (100) :
             box = myarr0[i*10:(i+1)*10, j*10:(j+1)*10]
             nz = np.where (box != 0)
             npts = len(nz[0])
             if (npts == 0) :
                 m = 1
             else :
                 m = np.median(box[nz[0],nz[1]])
             out[i*10:(i+1)*10, j*10:(j+1)*10]=m
     outnew = cgd.congrid (out, s, method='nearest', minusone=True)
     #outnew = imresize (out, s)
     return outnew
コード例 #12
0
    def rebin_to_physical_scale(self, rebin_phys=True):
	if rebin_phys:
	    print " "
	    print " "
	    print self.psf_image.pixel_in_arcsec * self.psf_image.n_pixels
	    print " "
	    print " "
	    n_pixel_new = np.floor( ( self.psf_image.pixel_in_arcsec / self.telescope.pixelsize_arcsec )  * self.psf_image.n_pixels )
	    rebinned_image = congrid.congrid(self.psf_image.image,  (n_pixel_new, n_pixel_new) )
  	    self.rebinned_image.init_image(rebinned_image, self) 
	else:
	    self.rebinned_image.init_image(self.psf_image.image, self)
コード例 #13
0
 def local_background (self, myarr) :
     # scale this to a 1000 by 1000 grid but return an array in original format
     # the returned array is the median on a 10x10 blocksize
     s = myarr.shape
     myarr0 = cgd.congrid (myarr, (1000, 1000) , method='nearest', minusone=True)
     #myarr0 = imresize(myarr, (1000,1000))
     n = int (math.sqrt(s[0]*s[1]))
     out=np.zeros(myarr0.shape,myarr0.dtype)
     for i in range (100) :
         for j in range (100) :
             box = myarr0[i*10:(i+1)*10, j*10:(j+1)*10]
             nz = np.where (box != 0)
             npts = len(nz[0])
             if (npts == 0) :
                 m = 1
             else :
                 m = np.median(box[nz[0],nz[1]])
             out[i*10:(i+1)*10, j*10:(j+1)*10]=m
     outnew = cgd.congrid (out, s, method='nearest', minusone=True)
     #outnew = imresize (out, s)
     return outnew
コード例 #14
0
    def resize_image_from_rp(self, resize_rp=True):
        if resize_rp:
            rp_pixel_in_kpc = 0.016 * self.r_petro_kpc  # P. Torrey -- this is my target scale; was 0.008, upping to 0.016 for GZ based on feedback
            Ntotal_new = (self.noisy_image.pixel_in_kpc /
                          rp_pixel_in_kpc) * self.noisy_image.n_pixels
            rebinned_image = congrid.congrid(self.noisy_image.image,
                                             (Ntotal_new, Ntotal_new))

            diff = n_pixels_galaxy_zoo - Ntotal_new  #

            if diff >= 0:  # P. Torrey.  --  desired FOV is larger than already rendered...
                # this is not a problem is image edges have ~0 flux.
                # Otherwise, can cause artifacts.
                shift = 0
                shiftc = np.floor(1.0 * diff / 2.0)
                fake_image = np.zeros(
                    (n_pixels_galaxy_zoo, n_pixels_galaxy_zoo))
                fake_image[shiftc:shiftc + Ntotal_new, shiftc:shiftc +
                           Ntotal_new] = rebinned_image[0:Ntotal_new,
                                                        0:Ntotal_new]
                rp_image = fake_image

                rp_image = congrid.congrid(
                    self.noisy_image.image_in_nmaggies,
                    (n_pixels_galaxy_zoo, n_pixels_galaxy_zoo))
            else:
                shift = np.floor(-1.0 * diff / 2.0)
                rp_image = rebinned_image[shift:shift + n_pixels_galaxy_zoo,
                                          shift:shift + n_pixels_galaxy_zoo]

            self.rp_image.init_image(rp_image,
                                     self,
                                     fov=424.0 * (0.016 * self.r_petro_kpc))
        else:
            self.rp_image.init_image(self.noisy_image.image,
                                     self,
                                     fov=self.noisy_image.pixel_in_kpc *
                                     self.noisy_image.n_pixels)
コード例 #15
0
 def rebin_to_physical_scale(self, rebin_phys=True):
     if rebin_phys:
         print " "
         print " "
         print self.psf_image.pixel_in_arcsec * self.psf_image.n_pixels
         print " "
         print " "
         n_pixel_new = np.floor(
             (self.psf_image.pixel_in_arcsec /
              self.telescope.pixelsize_arcsec) * self.psf_image.n_pixels)
         rebinned_image = congrid.congrid(self.psf_image.image,
                                          (n_pixel_new, n_pixel_new))
         self.rebinned_image.init_image(rebinned_image, self)
     else:
         self.rebinned_image.init_image(self.psf_image.image, self)
コード例 #16
0
ファイル: myImage.py プロジェクト: kirillzhuravlev/atrex
    def search_for_peaks_arr (self, arr, peaks, thr, max_peak_size, num_of_segments, perc):
        sxy = arr.shape
        topX=sxy[1]
        topY=sxy[0]
        img1=cgd.congrid(arr, [1000,1000])
        bg=self.estimate_local_background (img1, 50, 50, 100, 1.0)

        w=np.where(img1-bg > thr)
        for i in range(len(w[0])):
            XYs=[w[1][i],w[0][i]]
            if img1[XYs[1],XYs[0]]-bg[XYs[1],XYs[0]] > thr :
                XY=[0.0,0.0]
                aa=self.grow_peak(img1, bg, XYs[1],XYs[0], thr/2., 1000, 1000)
                if (max([aa[1]-aa[0], aa[3]-aa[2]]) < max_peak_size) and (aa[6] > thr) :
                    XY[0]=aa[5]*topX/1000
                    XY[1]=aa[4]*topY/1000
                    peak=myPeakTable.myPeak()
                    peak.setDetxy(XY)
                    #peak.setIntAD=img1[aa[4],aa[5]]
                    #ref_peak.setgonio=im.sts.gonio
                    peaks.addPeak(peak)
                img1[aa[2]:aa[3],aa[0]:aa[1]]=0
        peaks.find_multiple_peak_copies()
コード例 #17
0
    def add_gaussian_psf(self,
                         add_psf=True,
                         sample_factor=1.0
                         ):  # operates on sunrise_image -> creates psf_image
        if add_psf:
            current_psf_sigma_pixels = self.telescope.psf_fwhm_arcsec * (
                1.0 / 2.355) / self.sunrise_image.pixel_in_arcsec

            if current_psf_sigma_pixels < 8:  # want the psf sigma to be resolved with (at least) 8 pixels...
                target_psf_sigma_pixels = 8.0
                n_pixel_new = np.floor(self.sunrise_image.n_pixels *
                                       target_psf_sigma_pixels /
                                       current_psf_sigma_pixels)

                if n_pixel_new > 2500:  # an upper limit owing to memory constraints...
                    # beyond this, the PSF is already very small...
                    n_pixel_new = 2500
                    target_psf_sigma_pixels = n_pixel_new * current_psf_sigma_pixels / self.sunrise_image.n_pixels

                new_image = congrid.congrid(self.sunrise_image.image,
                                            (n_pixel_new, n_pixel_new))
                current_psf_sigma_pixels = target_psf_sigma_pixels * (
                    (self.sunrise_image.n_pixels * target_psf_sigma_pixels /
                     current_psf_sigma_pixels) / n_pixel_new)
            else:
                new_image = self.sunrise_image.image

            psf_image = np.zeros_like(new_image) * 1.0
            dummy = sp.ndimage.filters.gaussian_filter(
                new_image,
                current_psf_sigma_pixels,
                output=psf_image,
                mode='constant')

            self.psf_image.init_image(psf_image, self)
        else:
            self.psf_image.init_image(self.sunrise_image.image, self)
コード例 #18
0
def do_redshifting(cube, factor, psf, convolve=True):
  outputShape = cube.shape[1]/factor, cube.shape[2]/factor
  rebinned = np.empty((cube.shape[0], outputShape[0], outputShape[1]), dtype=np.float64)
  diff = np.empty((cube.shape[0], 1), dtype=np.float64)
  print cube.shape, 'cshape', psf.shape, 'psf', rebinned.shape, 'rebinned', outputShape, 'os'
  #fluxFactor = (cube.shape[1]/rebinned.shape[1])*(cube.shape[2]/rebinned.shape[2])

  #print outputShape, 'os',  areaFactor, 'area scale'

  for i in range(0, cube.shape[0]):
	  cube_slice = cube[i, :, :]
	  rebinned[i, :, :] = congrid(cube_slice, outputShape, method='linear', centre=True, minusone=False)
	  fluxRatio = np.sum(cube_slice)/np.sum(rebinned[i, :, :])

	  rebinned[i, :, :] = rebinned[i, :, :]*fluxRatio #flux preserving
	  diff[i] = np.sum(rebinned[i, :, :]) - np.sum(cube_slice)	  
	  #print np.sum(rebinned[i, :, :]) - np.sum(cube_slice), 'diff'
	  #rebinned[i, :, :] = np.floor(rebinned[i, :, :])
	  print np.sum(cube_slice)/np.sum(rebinned[i, :, :]), 'FL2', np.sum(cube_slice), np.sum(rebinned[i, :, :])
	  if convolve == True:
	    rebinned[i, :, :] = get_convolved_image(rebinned[i, :, :], psf)		  

  print np.sum(cube), np.sum(rebinned), np.sum(diff), 'diff'
  return rebinned
コード例 #19
0
def ngdc_get_image_from_file(file, resample_pct=None):

    fstruct = ngdc_get_fnames(file)

    if fstruct.fname == '':
        print('ERROR: FILE NOT FOUND %s' % file)
        return None
    if fstruct.fname_hdr == '':
        print('ERROR: FILE HEADER NOT FOUND FOR %s' % file)
        return None
    hdr_meta = ngdc_read_envi_hdr(fstruct.fname_hdr)
    ns = hdr_meta['n_samples']
    nl = hdr_meta['n_lines']
    dt = hdr_meta['data type']

    if resample_pct is not None:
        pct = resample_pct
    else:
        pct = 1
    ns_out = np.floor(pct * ns)
    nl_out = np.floor(pct * nl)
    dtype = dt_lookup(dt)

    if fstruct.compress:
        f = gzip.GzipFile(file)
        fc = f.read()
        image = np.frombuffer(fc, dtype=dtype)
        f.close()
    else:
        image = np.fromfile(file, dtype=dtype)
    image = np.reshape(image, (nl, ns))

    if pct != 1:
        return congrid(image, [nl_out, ns_out])
    else:
        return image
コード例 #20
0
import astropy
import astropy.cosmology
import astropy.io.fits as pyfits
import astropy.units as u
from astropy.constants import G
from astropy.cosmology import WMAP7, z_at_value
from astropy.coordinates import SkyCoord
import copy
import medianstats_bootstrap as msbs

if __name__ == "__main__":
    bb = pyfits.open('bbdata.fits')
    Npix = bb['STELLAR_MASS'].data.shape[0]
    if Npix != 4096:
        mtot = np.sum(bb['STELLAR_MASS'].data)
        mstar = congrid.congrid(bb['STELLAR_MASS'].data, (4096, 4096),
                                centre=True)
        mnew = np.sum(mstar)
        mstar = mstar * (mtot / mnew)
        print mnew, mtot

        zs = bb['WEIGHTED_Z'].data

        #sfrtot = np.sum(bb['SFR'].data)
        #sfr = congrid.congrid( bb['SFR'].data, (4096,4096),centre=True)
        #sfrnew = np.sum(sfr)
        #sfr = sfr*(sfrtot/sfrnew)

        z_f160 = congrid.congrid(zs[10, :, :], (4096, 4096), centre=True)
        z_f444 = congrid.congrid(zs[18, :, :], (4096, 4096), centre=True)
    else:
        mstar = bb['STELLAR_MASS'].data
コード例 #21
0
def single_run_test(ind,ysc1,ysc2,q,vd,pha,zl,zs):
    dsx_sdss     = 0.396         # pixel size of SDSS detector.
    R  = 3.0000     #
    #zl = 0.2     #zl is the redshift of the lens galaxy.
    #zs = 1.0
    #vd = 520    #Velocity Dispersion.
    nnn = 128      #Image dimension
    bsz = dsx_sdss*nnn # arcsecs
    dsx = bsz/nnn         # pixel size of SDSS detector.
    nstd = 59 #^2

    xx01 = np.linspace(-bsz/2.0,bsz/2.0,nnn)+0.5*dsx
    xx02 = np.linspace(-bsz/2.0,bsz/2.0,nnn)+0.5*dsx
    xi2,xi1 = np.meshgrid(xx01,xx02)
    #----------------------------------------------------------------------
    #ysc1 = 0.2
    #ysc2 = 0.5
    dsi = 0.03
    g_source = pyfits.getdata("./439.0_149.482739_1.889989_processed.fits")
    g_source = np.array(g_source,dtype="<d")*10.0
    g_source[g_source<=0.0001] = 1e-6
    #print np.sum(g_source)
    #print np.max(g_source)
    #pl.figure()
    #pl.contourf(g_source)
    #pl.colorbar()
    #g_source = p2p.cosccd2mag(g_source)
    ##g_source = p2p.mag2sdssccd(g_source)
    ##print np.max(g_source*13*13*52.0)
    #pl.figure()
    #pl.contourf(g_source)
    #pl.colorbar()
    #----------------------------------------------------------------------
    xc1 = 0.0       #x coordinate of the center of lens (in units of Einstein radius).
    xc2 = 0.0       #y coordinate of the center of lens (in units of Einstein radius).
    #q   = 0.7       #Ellipticity of lens.
    rc  = 0.0       #Core size of lens (in units of Einstein radius).
    re  = re_sv(vd,zl,zs)       #Einstein radius of lens.
    #pha = 45.0      #Orintation of lens.
    lpar = np.asarray([xc1,xc2,q,rc,re,pha])
    #----------------------------------------------------------------------
    ai1,ai2,mua = lens_equation_sie(xi1,xi2,lpar)

    yi1 = xi1-ai1
    yi2 = xi2-ai2

    g_limage = lv4.call_ray_tracing(g_source,yi1,yi2,ysc1,ysc2,dsi)
    g_limage[g_limage<=0.0001] = 1e-6
    g_limage = p2p.cosccd2mag(g_limage)
    g_limage = p2p.mag2sdssccd(g_limage)

    #pl.figure()
    #pl.imshow((g_limage),interpolation='lanczos',cmap=cm.gray)
    #pl.colorbar()

    #-------------------------------------------------------------
    # Need to be Caliborate the mags
    dA = Planck13.comoving_distance(zl).value*1000./(1+zl)
    Re = dA*np.sin(R*np.pi/180./3600.)
    counts  =Brightness(Re,vd)
    vpar = np.asarray([counts,R,xc1,xc2,q,pha])
    #g_lens = deVaucouleurs(xi1,xi2,xc1,xc2,counts,R,1.0-q,pha)
    g_lens = de_vaucouleurs_2d(xi1,xi2,vpar)

    #pl.figure()
    #pl.imshow((g_lens),interpolation='nearest',cmap=cm.gray)
    #pl.colorbar()

    g_clean_ccd = g_lens+g_limage

    #pl.figure()
    #pl.imshow((g_clean_ccd),interpolation='nearest',cmap=cm.gray)
    #pl.colorbar()

    g_clean_ccd = congrid.congrid(g_clean_ccd,[128,128])

    #-------------------------------------------------------------
    file_psf = "../PSF_and_noise/sdsspsf.fits"
    g_psf = pyfits.getdata(file_psf)-1000.0
    g_psf = g_psf/np.sum(g_psf)

    #new_shape=[0,0]
    #new_shape[0]=np.shape(g_psf)[0]*dsx_sdss/dsx
    #new_shape[1]=np.shape(g_psf)[1]*dsx_sdss/dsx
    #g_psf = rebin_psf(g_psf,new_shape)

    g_images_psf = ss.fftconvolve(g_clean_ccd,g_psf,mode="same")
    #g_images_psf = ss.convolve(g_clean_ccd,g_psf,mode="same")
    #g_images_psf = g_clean_ccd

    #pl.figure()
    #pl.imshow((g_psf),interpolation='nearest',cmap=cm.gray)
    #pl.colorbar()

    #-------------------------------------------------------------
    # Need to be Caliborate the mags
    #g_noise = noise_map(nnn,nnn,np.sqrt(nstd),"Gaussian")
    g_noise = noise_map(128,128,np.sqrt(nstd),"Gaussian")
    g_final = g_images_psf+g_noise

    #pl.figure()
    #pl.imshow((g_final.T),interpolation='nearest',cmap=cm.gray)
    #pl.colorbar()

    g_final_rebin = congrid.congrid(g_final,[128,128])

    #pl.figure()
    #pl.imshow((g_final_rebin.T),interpolation='nearest',cmap=cm.gray)
    #pl.colorbar()

    #-------------------------------------------------------------

    output_filename = "../output_fits/"+str(ind)+".fits"
    pyfits.writeto(output_filename,g_final_rebin,clobber=True)

    pl.show()

    return 0
コード例 #22
0
def main():
    import sys

    filename = sys.argv[1]
    sdens = pyfits.getdata(filename)

    kappa0 = np.array(sdens,dtype='<d')
    kappa=congrid.congrid(kappa0,[512,512])

    #sdens = sdens.astype('<double')

    nnn = np.shape(kappa)[0]
    boxsize = 4.0
    zl = 0.1
    zs = 1.0
    p_mass = 1.0
    dsx = boxsize/nnn
    xi1 = np.linspace(-boxsize/2.0,boxsize/2.0-dsx,nnn)+0.5*dsx
    xi2 = np.linspace(-boxsize/2.0,boxsize/2.0-dsx,nnn)+0.5*dsx
    xi1,xi2 = np.meshgrid(xi1,xi2)
    #----------------------------------------------------
    # lens parameters for main halo
    xlc1 = 0.0
    xlc2 = 0.0
    ql0 = 0.999999999999
    rc0 = 0.000000000001
    re0 = 1.0
    phi0 = 0.0
    lpar = np.asarray([xlc1, xlc2, re0, rc0, ql0, phi0])

    lpars_list = []
    lpars_list.append(lpar)
    ##----------------------------------------------------
    ##sdens = lpar_nie_kappa(xi1,xi2,lpar)
    ##pii,pii1,pii2 = multiple_new_nie_all(xi1,xi2,lpars_list)

    #phi,phi1,phi2,td = lf.call_all_about_lensing(kappa,nnn,zl,zs,p_mass,dsx)

    #phi2,phi1 = np.gradient(phi,dsx)
    #phi12,phi11 = np.gradient(phi1,dsx)
    #phi22,phi21 = np.gradient(phi2,dsx)
    #kappac = 0.5*(phi11+phi22)
    #mu = 1.0/(1.0-(phi11+phi22)+phi11*phi22-phi12*phi21)
    #critical = lf.call_find_critical_curve(mu)
    #pl.figure(figsize=(10,10))
    #pl.contour(critical)

    ###----------------------------------------------------
    ### lens parameters for main halo
    ##xls1 = 0.7
    ##xls2 = 0.8
    ##qls = 0.999999999999
    ##rcs = 0.000000000001
    ##res = 0.5
    ##phis = 0.0
    ##lpars = np.asarray([xls1, xls2, res, rcs, qls, phis])
    ##lpars_list.append(lpars)

    ##sdens = lpar_nie_kappa(xi1,xi2,lpar)

    ##pii,pii1,pii2 = multiple_new_nie_all(xi1,xi2,lpars_list)

    ##phi,alpha1,alpha2,td = lf.call_all_about_lensing(sdens,nnn,zl,zs,p_mass,dsx)

    ##phi12,phi11 = np.gradient(alpha2,dsx)
    ##phi22,phi21 = np.gradient(alpha1,dsx)
    ##kappai = 0.5*(phi11+phi22)
##---#-----------------------------------------------------------------
#    sdens_pad = np.zeros((nnn*2,nnn*2))
#    sdens_pad[nnn/2:nnn/2*3,nnn/2:nnn/2*3] = kappa
#    green_in = green_iso(nnn*2,dsx)
#    phi,alpha1,alpha2,td,mu,kappas = fft_lensing_signals(sdens_pad,green_in,dsx)
##---#-----------------------------------------------------------------
    green_in = green_iso(nnn,dsx)
    phi,alpha1,alpha2,td,mu,kappas = fft_lensing_signals(kappa,green_in,dsx)
コード例 #23
0
def single_run_test(ind, ysc1, ysc2, q, vd, pha, zl, zs):
    zeropoint = 18

    dsx_sdss = 0.396  # pixel size of SDSS detector.

    R = 2.9918  #vd is velocity dispersion.
    #zl = 0.2     #zl is the redshift of the lens galaxy.
    #zs = 1.0
    #vd = 520    #Velocity Dispersion.
    nnn = 512  #Image dimension
    bsz = 30.0  # arcsecs
    dsx = bsz / nnn  # pixel size of SDSS detector.
    nstd = 59

    xx01 = np.linspace(-bsz / 2.0, bsz / 2.0, nnn) + 0.5 * dsx
    xx02 = np.linspace(-bsz / 2.0, bsz / 2.0, nnn) + 0.5 * dsx
    xi2, xi1 = np.meshgrid(xx01, xx02)
    #----------------------------------------------------------------------
    #ysc1 = 0.2
    #ysc2 = 0.5
    dsi = 0.03
    g_source = pyfits.getdata("./439.0_149.482739_1.889989_processed.fits")
    g_source = np.array(g_source, dtype="<d")
    g_source = p2p.pixcos2pixsdss(g_source)
    #----------------------------------------------------------------------
    xc1 = 0.0  #x coordinate of the center of lens (in units of Einstein radius).
    xc2 = 0.0  #y coordinate of the center of lens (in units of Einstein radius).
    #q   = 0.7       #Ellipticity of lens.
    rc = 0.0  #Core size of lens (in units of Einstein radius).
    re = re_sv(vd, zl, zs)  #Einstein radius of lens.
    #pha = 45.0      #Orintation of lens.
    lpar = np.asarray([xc1, xc2, q, rc, re, pha])
    #----------------------------------------------------------------------
    ai1, ai2, mua = lens_equation_sie(xi1, xi2, lpar)

    yi1 = xi1 - ai1
    yi2 = xi2 - ai2

    g_limage = lv4.call_ray_tracing(g_source, yi1, yi2, ysc1, ysc2, dsi)
    g_limage = mag_to_flux(g_limage, zeropoint)

    #pl.figure()
    #pl.contourf(xi1,xi2,g_limage)
    #pl.colorbar()
    #-------------------------------------------------------------
    # Need to be Caliborate the mags
    dA = Planck13.comoving_distance(zl).value * 1000. / (1 + zl)
    Re = dA * np.sin(R * np.pi / 180. / 3600.)
    counts = Brightness(R, vd)
    vpar = np.asarray([counts, Re, xc1, xc2, q, pha])
    #g_lens = deVaucouleurs(xi1,xi2,xc1,xc2,counts,R,1.0-q,pha)
    g_lens = de_vaucouleurs_2d(xi1, xi2, vpar)

    g_lens = ncounts_to_flux(g_lens * 1.5e-4, zeropoint)
    #-------------------------------------------------------------
    file_psf = "../PSF_and_noise/sdsspsf.fits"
    g_psf = pyfits.getdata(file_psf) - 1000.0
    g_psf = g_psf / np.sum(g_psf)
    new_shape = [0, 0]
    new_shape[0] = np.shape(g_psf)[0] * dsx_sdss / dsx
    new_shape[1] = np.shape(g_psf)[1] * dsx_sdss / dsx
    g_psf = rebin_psf(g_psf, new_shape)
    print(np.max(g_psf))
    g_limage = ss.fftconvolve(g_limage + g_lens, g_psf, mode="same")

    #pl.figure()
    #pl.contourf(xi1,xi2,g_limage)
    #pl.colorbar()
    #-------------------------------------------------------------
    # Need to be Caliborate the mags
    g_noise = noise_map(nnn, nnn, nstd, "Gaussian")
    g_noise = ncounts_to_flux(g_noise * 1e-0 + skycount, zeropoint)
    g_limage = g_limage + g_noise

    print np.shape(g_limage)
    g_limage = congrid.congrid(g_limage, [128, 128])
    g_limage = g_limage - np.min(g_limage)

    pl.figure()
    #pl.contourf(xi1,xi2,g_limage)
    pl.contourf(g_limage)
    pl.colorbar()
    #-------------------------------------------------------------

    output_filename = "../output_fits/" + str(ind) + ".fits"
    pyfits.writeto(output_filename, g_limage, clobber=True)

    pl.show()

    return 0
コード例 #24
0
def process_single_filter_subimage(image_parameters, galaxy_data, lcdata,
                                   filname, lambda_eff_microns):

    print('**** Processing subimage:  ', image_parameters)

    single_filter_subimage = np.ndarray(
        (image_parameters['Npix'], image_parameters['Npix']))
    print(single_filter_subimage.shape)

    #find sources in this sub-image

    #use buffer to clear edge effects from lower left
    buf_deg = 10.0 / 3600.0  #10 arcsec buffer?
    sub_indices = (
        lcdata['ra_deg'] >= image_parameters['x1_deg'] - buf_deg) * (
            lcdata['ra_deg'] < image_parameters['x2_deg']) * (
                lcdata['dec_deg'] >= image_parameters['y1_deg'] - buf_deg) * (
                    lcdata['dec_deg'] < image_parameters['y2_deg'])

    sub_data = lcdata[sub_indices]

    success = 0

    image_catalog = {'filter': filname}

    #final source info
    xcen_list = []
    ycen_list = []
    final_flux_njy_list = []
    ab_appmag_list = []

    #galaxy_data=galaxy_data.fromkeys(['image_dir','scale','simlabel','Mvir','Mstar','Rhalf_stars'])

    #original image source data
    final_file_list = []
    image_dir_list = []
    scalefactor_list = []
    simlabel_list = []
    Mvir_list = []
    mstar_list = []
    rhalf_list = []

    #lightcone entry data... all of it???

    for i, entry in enumerate(sub_data):
        #need pos_i, pos_j
        pos_i = np.int64(
            (entry['ra_deg'] - image_parameters['x1_deg']) *
            np.float64(image_parameters['Npix']) /
            (image_parameters['x2_deg'] - image_parameters['x1_deg']))
        pos_j = np.int64(
            (entry['dec_deg'] - image_parameters['y1_deg']) *
            np.float64(image_parameters['Npix']) /
            (image_parameters['y2_deg'] - image_parameters['y1_deg']))

        #select image file to insert
        mass_value = entry['subhalo_mass_msun']
        scale_value = 1.0 / (1.0 + entry['true_z'])
        mstar_value = entry['mstar_msun_rad']

        galaxy_scale_indices = (galaxy_data['scale'] >=
                                scale_value / scale_window)
        galaxy_scale_indices *= (galaxy_data['scale'] <=
                                 scale_value * scale_window)

        galaxy_mass_indices = (galaxy_data['Mvir'] >= mass_value / mass_window)
        galaxy_mass_indices *= (galaxy_data['Mvir'] <=
                                mass_value * mass_window)

        galaxy_search_indices = galaxy_scale_indices * galaxy_mass_indices

        found_galaxy = False

        if np.sum(galaxy_search_indices
                  ) == 0 and np.sum(galaxy_scale_indices) > 0:
            #pick random galaxy and resize?
            #index into list of Trues
            random_index = random.randint(np.sum(galaxy_scale_indices))
            scale_where = np.where(galaxy_scale_indices == True)[0]
            galaxy_index = scale_where[random_index]
            success += 1
            found_galaxy = True
        elif np.sum(galaxy_search_indices) == 0 and np.sum(
                galaxy_scale_indices) == 0:
            galaxy_index = None
            pass
        else:
            random_index = random.randint(np.sum(galaxy_search_indices))
            galaxy_where = np.where(galaxy_search_indices == True)[0]
            galaxy_index = galaxy_where[random_index]
            success += 1
            found_galaxy = True

        found_data = False
        #now we have galaxy_index
        if galaxy_index is not None:
            mstar_factor = mstar_value / (galaxy_data['Mstar'][galaxy_index])
            size_factor = (mstar_factor)**0.5

            folder = galaxy_data['image_dir'][galaxy_index]
            label = galaxy_data['simlabel'][galaxy_index]
            possible_files = np.sort(
                np.asarray(
                    glob.glob(folder + '/hires_images_cam??/' + label +
                              'cam??_' + filname + '*.fits')))

            if possible_files.shape[0] > 0:
                #pick a random camera
                file_index = random.randint(possible_files.shape[0])
                this_file = possible_files[file_index]
                try:
                    #locate file and load
                    found_data = True
                    this_hdu = fits.open(this_file)[0]
                    this_image = this_hdu.data

                    pixstr = this_file.split('_')[-2][3:]

                    #this was foolish!

                    pixsize_arcsec = np.float64(pixstr)

                except:
                    found_data = False

                if found_data == True:
                    #these are in nJy-- preserve integral!

                    original_flux = np.sum(this_image)

                    total_flux = original_flux * mstar_factor  #shrink luminosity by mstar factor

                    this_npix = this_image.shape[0]

                    #resize--preserve proper units

                    desired_npix = np.int32(
                        this_npix *
                        (pixsize_arcsec / image_parameters['pix_size_arcsec'])
                        * size_factor)

                    resized_image = congrid.congrid(
                        this_image, (desired_npix, desired_npix))
                    resized_flux = np.sum(resized_image)

                    resized_image = resized_image * (total_flux / resized_flux)

                    #is there a way to detect and avoid edge effects here??

                    #1. smooth image
                    #2. locate peak flux
                    #3. apply gaussian/exponential factor strong enough to eliminate to full-res image?
                    #4. use size info... to make sensible??

                    #add to image

                    npsub = desired_npix
                    i1 = pos_i
                    i2 = pos_i + npsub
                    j1 = pos_j
                    j2 = pos_j + npsub
                    icen = np.float64(pos_i) + np.float64(npsub) / 2.0
                    jcen = np.float64(pos_j) + np.float64(npsub) / 2.0

                    #determine overlap image_parameters['Npix']
                    im0 = 0
                    im1 = image_parameters['Npix']

                    sub_image_to_add = resized_image[im0 - i1:npsub -
                                                     (i2 - im1), im0 -
                                                     j1:npsub - (j2 - im1)]
                    print('Resized: ', mstar_factor, size_factor,
                          pixsize_arcsec, this_npix, desired_npix,
                          resized_image.shape, sub_image_to_add.shape, i1, i2,
                          j1, j2)

                    if sub_image_to_add.shape[
                            0] > 0 and sub_image_to_add.shape[1] > 0:
                        iplace = np.max([i1, 0])
                        jplace = np.max([j1, 0])
                        new_npi = sub_image_to_add.shape[0]
                        new_npj = sub_image_to_add.shape[1]
                        single_filter_subimage[iplace:iplace + new_npi,
                                               jplace:jplace +
                                               new_npj] += sub_image_to_add

                    if icen >= 0.0 and jcen >= 0.0:
                        #assemble catalog entries for this object
                        #final source info
                        xcen_list.append(icen)
                        ycen_list.append(jcen)

                        final_flux_njy_list.append(total_flux)
                        ab_appmag_list.append(-2.5 * np.log10(
                            (1.0e9) * (total_flux) / 3632.0))

                        #galaxy_data=galaxy_data.fromkeys(['image_dir','scale','simlabel','Mvir','Mstar','Rhalf_stars'])

                        #original image source data
                        final_file_list = []
                        image_dir_list = []
                        scalefactor_list = []
                        simlabel_list = []
                        Mvir_list = []
                        mstar_list = []
                        rhalf_list = []

                        #lightcone entry data... all of it???

        print('found galaxy? ', found_galaxy, 'found data? ', found_data)

    print('**** Subimage successes: ', str(success), '  out of  ', i + 1)

    return single_filter_subimage
コード例 #25
0
    def geomap_5kmto1km(self, geodata_5km):

        #print >> self.out, 'in geomap_5kmto1km() ...'
        #print >> self.out, 'geodata_5km: ', geodata_5km

        LowResDims = geodata_5km.shape
        #print >> self.out, 'LowResDims[0]: ', LowResDims[0], ', LowResDims[1]: ', LowResDims[1]
        ResolutionFactor = 5  # 5km res -> 1km res
        first_col_1km = 2  # first SDS pixel 5km has a coordinate (2,2) from 1km data
        first_row_1km = 2

        if self.Cell_Along_Swath_5km == 0:
            self.Cell_Along_Swath_5km = LowResDims[0]  # 406

        if self.Cell_Across_Swath_5km == 0:
            self.Cell_Across_Swath_5km = LowResDims[1]  # 270

        if self.Cell_Along_Swath_1km == 0:
            self.Cell_Along_Swath_1km = Cell_Along_Swath_5km * 5  # 2030

        if self.Cell_Across_Swath_1km == 0:
            self.Cell_Across_Swath_1km = Cell_Across_Swath_5km * 5 + 4  # 1354

        #print >> self.out, 'self.Cell_Along_Swath_5km: ', self.Cell_Along_Swath_5km
        #print >> self.out, 'self.Cell_Across_Swath_5km: ', self.Cell_Across_Swath_5km
        #print >> self.out, 'self.Cell_Along_Swath_1km: ', self.Cell_Along_Swath_1km
        #print >> self.out, 'self.Cell_Across_Swath_1km: ', self.Cell_Across_Swath_1km
        """
	print >> self.out, 'a row: '
	for i  in range(self.Cell_Across_Swath_5km):
	    print >> self.out, geodata_5km[0, i]
	"""

        # two beginning columns
        begin_col_exp = 2 * geodata_5km[:, 0] - geodata_5km[:, 1]
        #print >> self.out, 'len(begin_col_exp): ', len(begin_col_exp)
        ### print >> self.out, 'begin_col_exp: ', begin_col_exp

        # two end columns
        end_col_exp = 2 * geodata_5km[:, self.Cell_Across_Swath_5km -
                                      1] - geodata_5km[:, self.
                                                       Cell_Across_Swath_5km -
                                                       2]
        ### print >> self.out, 'end_col_exp: ', end_col_exp

        end_col_exp_rest = 2 * end_col_exp - geodata_5km[:, self.
                                                         Cell_Across_Swath_5km -
                                                         1]
        ### print >> self.out, 'end_col_exp_rest: ', end_col_exp_rest

        # a larger array for temp use
        exp_geodata_5km = N.array([0.0] * (self.Cell_Across_Swath_5km + 3) *
                                  self.Cell_Along_Swath_5km)
        exp_geodata_5km = exp_geodata_5km.reshape(
            self.Cell_Along_Swath_5km, (self.Cell_Across_Swath_5km + 3))
        ### print >> self.out, 'exp_geodata_5km: ', exp_geodata_5km
        exp_geodata_5km[:, 0] = begin_col_exp
        exp_geodata_5km[:, 1:self.Cell_Across_Swath_5km + 1] = geodata_5km
        ### print >> self.out, '1. exp_geodata_5km: ', exp_geodata_5km

        exp_geodata_5km[:, self.Cell_Across_Swath_5km + 1] = end_col_exp
        exp_geodata_5km[:, self.Cell_Across_Swath_5km + 2] = end_col_exp_rest
        ### print >> self.out, '2. exp_geodata_5km: ', exp_geodata_5km

        Internal_geodata_5km = exp_geodata_5km.copy()

        begin_row_exp = 2 * Internal_geodata_5km[0, :] - Internal_geodata_5km[
            1, :]
        end_row_exp = 2 * Internal_geodata_5km[
            self.Cell_Along_Swath_5km -
            1, :] - Internal_geodata_5km[self.Cell_Along_Swath_5km - 2, :]

        exp_geodata_5km = N.array([0.0] * ((self.Cell_Across_Swath_5km + 3) *
                                           (self.Cell_Along_Swath_5km + 2)))
        exp_geodata_5km = exp_geodata_5km.reshape(
            (self.Cell_Along_Swath_5km + 2), (self.Cell_Across_Swath_5km + 3))
        exp_geodata_5km[0, :] = begin_row_exp
        exp_geodata_5km[1:self.Cell_Along_Swath_5km +
                        1, :] = Internal_geodata_5km
        exp_geodata_5km[self.Cell_Along_Swath_5km + 1, :] = end_row_exp

        ### print >> self.out, '3. exp_geodata_5km: ', exp_geodata_5km

        Internal_geodata_5km = exp_geodata_5km.copy()
        #print >> self.out, 'Internal_geodata_5km: ', Internal_geodata_5km
        dims5km = Internal_geodata_5km.shape
        #print >> self.out, 'Internal_geodata_5km.shape: ', dims5km

        Expand_cell_Along_Swath_1km = (self.Cell_Along_Swath_5km +
                                       1) * ResolutionFactor + 1
        Expand_Cell_Across_Swath_1km = (self.Cell_Across_Swath_5km +
                                        2) * ResolutionFactor + 1

        dims1km = N.array([0, 0])

        dims1km[0] = Expand_cell_Along_Swath_1km
        dims1km[1] = Expand_Cell_Across_Swath_1km

        Expand_geodata_1km = congrid.congrid(Internal_geodata_5km, dims1km)

        geodata_1km = Expand_geodata_1km[(first_row_1km+1):self.Cell_Along_Swath_1km+first_row_1km, \
                (first_col_1km+1):self.Cell_Across_Swath_1km+first_col_1km]
        #print >> self.out, 'geodata_1km: ', geodata_1km
        #print >> self.out, 'geodata_1km.shape: ', geodata_1km.shape

        return geodata_1km
コード例 #26
0
def show_map(inputdata,npol='none',filename='none',background='w',plotrange='none',colpol='w',cmap=cm.gist_heat):

    nlevels=256
    fontsize=18

    L   = inputdata.get('L')
    map = inputdata.get('Inu')
    if 'Inupol' in inputdata:
        mappol = inputdata.get('Inupuol')


    xi = np.linspace(-L[0]/2.,L[0]/2.,map.shape[0])
    yi = np.linspace(-L[1]/2.,L[1]/2.,map.shape[1])
    xi2D=np.empty((map.shape[0],map.shape[1]))
    yi2D=np.empty((map.shape[0],map.shape[1]))
    for i in range(map.shape[1]):
        xi2D[:,i] = xi
    for i in range(map.shape[0]):
        yi2D[i,:] = yi
        
    plt.figure()
    plt.clf()

    if plotrange != 'none':
        levels = np.arange(nlevels)/float(nlevels-1)*(plotrange[1]-plotrange[0])+plotrange[0]
    	cs = plt.contourf(xi2D,yi2D,map,levels=levels,cmap=cmap)
    	cb = plt.colorbar()
    	cb.set_ticks(plotrange[1]*np.arange(10)/10.)
    else:
        cs = plt.contourf(xi2D,yi2D,np.log10(map),nlevels,cmap=cmap)
    	cb = plt.colorbar()

    ax = plt.gca()
    ax.set_aspect('equal')
    ax.patch.set_facecolor(background)
    for tick in ax.xaxis.get_major_ticks():
        tick.label1.set_fontsize(fontsize)
    for tick in ax.yaxis.get_major_ticks():
        tick.label1.set_fontsize(fontsize)
    ax.xaxis.get_offset_text().set_fontsize(fontsize-2)
    ax.yaxis.get_offset_text().set_fontsize(fontsize-2)

    for tick in cb.ax.get_yticklabels():
        tick.set_fontsize(fontsize)

    plt.plot(0,0,'k+')

    plt.ylabel('b [cm]',size=fontsize)
    plt.xlabel('a [cm]',size=fontsize)

    plt.title(''.join((['phi=',str(inputdata.get('phi'))[0:4],' ; theta=',str(inputdata.get('theta'))[0:4]])))

    if npol != 'none' and 'Inupol' in inputdata:
        xbeg=-0.5*L[0]*float(npol[0]-1)/float(npol[0])
        ybeg=-0.5*L[1]*float(npol[1]-1)/float(npol[1])
        x=np.linspace(xbeg,-xbeg,npol[0])
        y=np.linspace(ybeg,-ybeg,npol[1])
        pi=abs(mappol)/map
        u=congrid(pi*np.sqrt(mappol).real/np.sqrt(abs(mappol)),npol,method='linear',centre=True)
        v=congrid(pi*np.sqrt(mappol).imag/np.sqrt(abs(mappol)),npol,method='linear',centre=True)
        plt.quiver(x,y,u,v,width=0.002,headlength=0,headwidth=0,headaxislength=0,pivot='middle',color=colpol)


    if filename == 'none':
        plt.show()
    else: 
        plt.savefig(''.join([filename,'.pdf']), format="pdf", transparent=False)
        plt.close()
コード例 #27
0
def main():

    nnn = 800
    boxsize = 24.0
    dsx = boxsize / nnn
    dsi = dsx * 0.06
    xi1 = np.linspace(-boxsize / 2.0, boxsize / 2.0 - dsx, nnn) + 0.5 * dsx
    xi2 = np.linspace(-boxsize / 2.0, boxsize / 2.0 - dsx, nnn) + 0.5 * dsx
    xi1, xi2 = np.meshgrid(xi1, xi2)

    g_snR = pyfits.getdata("/Users/uranus/Desktop/gamer1/galaxy0004R.fits")
    g_snG = pyfits.getdata("/Users/uranus/Desktop/gamer1/galaxy0004G.fits")
    g_snB = pyfits.getdata("/Users/uranus/Desktop/gamer1/galaxy0004B.fits")

    print np.shape(g_snR)

    g_snR = np.array(g_snR, dtype="<d")
    g_snG = np.array(g_snG, dtype="<d")
    g_snB = np.array(g_snB, dtype="<d")

    g_snR = congrid.congrid(g_snR, [96, 96])
    g_snG = congrid.congrid(g_snG, [96, 96])
    g_snB = congrid.congrid(g_snB, [96, 96])
    dsi = dsx * 1.0

    #g_snR = snf.gaussian_filter(np.array(g_snR, dtype="<d"), 16.5)
    #g_snG = snf.gaussian_filter(np.array(g_snG, dtype="<d"), 16.5)
    #g_snB = snf.gaussian_filter(np.array(g_snB, dtype="<d"), 16.5)

    #cc = find_critical_curve(mu)

    pygame.init()
    FPS = 60
    fpsClock = pygame.time.Clock()

    screen = pygame.display.set_mode((nnn, nnn), 0, 32)

    pygame.display.set_caption("Gravitational Lensing Toy")

    mouse_cursor = pygame.Surface((nnn, nnn))

    #----------------------------------------------------

    base0 = np.zeros((nnn, nnn, 3), 'uint8')
    base1 = np.zeros((nnn, nnn, 3), 'uint8')
    base2 = np.zeros((nnn, nnn, 3), 'uint8')

    #----------------------------------------------------
    # lens parameters for main halo
    xlc1 = 0.0
    xlc2 = 0.0
    ql0 = 0.799999999999
    rc0 = 0.000000000001
    re0 = 4.0
    phi0 = 60.0
    lpar = np.asarray([xlc1, xlc2, re0, rc0, ql0, phi0])

    lpars_list = []
    lpars_list.append(lpar)
    #----------------------------------------------------
    # lens parameters for main halo

    xls1 = 0.7
    xls2 = 0.8
    qls = 0.999999999999
    rcs = 0.000000000001
    res = 0.0
    phis = 0.0
    lpars = np.asarray([xls1, xls2, res, rcs, qls, phis])
    lpars_list.append(lpars)

    scale_factor = 30
    ap0 = 1.0
    l_sig0 = 0.5
    glpar = np.asarray([ap0, l_sig0, xlc1, xlc2, ql0, phi0])

    g_lens = lens_galaxies(xi1, xi2, glpar)

    base0[:, :, 0] = g_lens * 255
    base0[:, :, 1] = g_lens * 180
    base0[:, :, 2] = g_lens * 0

    #rgb(255, 180, 0)

    x = 0
    y = 0
    step = 1
    gr_sig = 0.1

    LeftButton = 0

    g_xcen = 0.0175
    g_ycen = 0.375
    #----------------------------------------------------

    i = 0
    while True:
        i = i + 1
        for event in pygame.event.get():
            if event.type == QUIT:
                exit()
            if event.type == MOUSEMOTION:

                if event.buttons[LeftButton]:
                    rel = event.rel
                    x += rel[0]
                    y += rel[1]

            #----------------------------------------------
            if event.type == pygame.MOUSEBUTTONDOWN:
                if event.button == 4:
                    gr_sig -= 0.1
                    if gr_sig < 0.01:
                        gr_sig = 0.01

                elif event.button == 5:
                    gr_sig += 0.01
                    if gr_sig > 0.4:
                        gr_sig = 0.4

        keys = pygame.key.get_pressed()  # checking pressed keys
        if keys[pygame.K_RIGHT]:
            x += step
            if x > 500:
                x = 500
        if keys[pygame.K_LSHIFT] & keys[pygame.K_RIGHT]:
            x += 30 * step

        if keys[pygame.K_LEFT]:
            x -= step
            if x < -500:
                x = -500

        if keys[pygame.K_LSHIFT] & keys[pygame.K_LEFT]:
            x -= 30 * step

        if keys[pygame.K_UP]:
            y -= step
            if y < -500:
                y = -500
        if keys[pygame.K_LSHIFT] & keys[pygame.K_UP]:
            y -= 30 * step

        if keys[pygame.K_DOWN]:
            y += step
            if y > 500:
                y = 500
        if keys[pygame.K_LSHIFT] & keys[pygame.K_DOWN]:
            y += 30 * step

        #----------------------------------------------
        if keys[pygame.K_MINUS]:
            gr_sig -= 0.01
            if gr_sig < 0.01:
                gr_sig = 0.01

        if keys[pygame.K_EQUALS]:
            gr_sig += 0.01
            if gr_sig > 0.1:
                gr_sig = 0.1

        #gr_sig = 0.005

        #----------------------------------------------
        # parameters of source galaxies.
        #----------------------------------------------
        g_amp = 2.0         # peak brightness value
        g_sig = gr_sig * 1.5          # Gaussian "sigma" (i.e., size)
        #g_xcen = x * 2.0 / nnn  # x position of center
        #g_ycen = y * 2.0 / nnn  # y position of center
        g_xcen = 0.0175
        g_ycen = 0.375
        g_axrat = 1.0       # minor-to-major axis ratio
        # major-axis position angle (degrees) c.c.w. from y axis
        g_pa = 0.0
        gpar = np.asarray([g_amp, g_sig, g_ycen, g_xcen, g_axrat, g_pa])
        #----------------------------------------------

        #----------------------------------------------
        # parameters of SNs.
        #----------------------------------------------
        g_amp = 1.0         # peak brightness value
        g_sig = 0.1          # Gaussian "sigma" (i.e., size)
        g_xcen = y * 2.0 / nnn + 0.05  # x position of center
        g_ycen = x * 2.0 / nnn + 0.05  # y position of center
        g_axrat = 1.0       # minor-to-major axis ratio
        # major-axis position angle (degrees) c.c.w. from y axis
        g_pa = 0.0

        phi, td, ai1, ai2, kappa, mu, yi1, yi2 = nie_all(
            xi1, xi2, xlc1, xlc2, re0, rc0, ql0, phi0, g_ycen, g_xcen)

        g_lsB = lv4.call_ray_tracing(g_snB, yi1, yi2, g_xcen, g_ycen, dsi)
        g_lsR = lv4.call_ray_tracing(g_snR, yi1, yi2, g_xcen, g_ycen, dsi)
        g_lsG = lv4.call_ray_tracing(g_snG, yi1, yi2, g_xcen, g_ycen, dsi)

        base2[:, :, 0] = g_lsR / scale_factor * 256
        base2[:, :, 1] = g_lsG / scale_factor * 256
        base2[:, :, 2] = g_lsB / scale_factor * 256

        wf = base1 + base2

        idx1 = wf >= base0
        idx2 = wf < base0

        base = base0 * 0
        base[idx1] = wf[idx1]
        base[idx2] = base0[idx2]

        #base = wf*base0+(base1+base2)
        pygame.surfarray.blit_array(mouse_cursor, base)

        screen.blit(mouse_cursor, (0, 0))

        # font=pygame.font.SysFont(None,30)
        #text = font.render("( "+str(x)+", "+str(-y)+" )", True, (255, 255, 255))
        #screen.blit(text,(10, 10))
        pygame.display.update()
        fpsClock.tick(FPS)
コード例 #28
0
ファイル: synchroSun.py プロジェクト: yangyha/mpi-AMRVAC
def show_polmap(inputdata,npol='none',filename='none',background='w',vmin=None,vmax=None,colpol='w',cmap=cm.gist_heat_r):

    dpi=300
    fontsize=12
    plt.rcParams['xtick.direction'] = 'out'
    plt.rcParams['ytick.direction'] = 'out'


    L   = inputdata.get('L')
    mappol = inputdata.get('Inupol')
    map    = inputdata.get('Inu')

    fig1 = plt.figure(figsize=(4,4))
    ax  = fig1.add_subplot(1,1,1)
    ax.set_xlim(-L[0]/2.,L[0]/2.)
    ax.set_ylim(-L[1]/2.,L[1]/2.)

    extent=(-L[0]/2.,L[0]/2.,-L[1]/2.,L[1]/2.)

    if vmin==None:
        vmin=map.min()
    if vmax==None:
        vmax=map.max()
    map_clip = np.clip(np.abs(mappol),vmin,vmax)
    cs = plt.imshow(map_clip.transpose(),origin='lower',cmap=cmap,extent=extent,vmin=vmin,vmax=vmax)
#    cb = plt.colorbar()
#    for tick in cb.ax.get_yticklabels():
#        tick.set_fontsize(fontsize)
#    cb.ax.xaxis.get_offset_text().set_fontsize(fontsize-2)
#    cb.ax.yaxis.get_offset_text().set_fontsize(fontsize-2)

#    ax.patch.set_facecolor(background)
    for tick in ax.xaxis.get_major_ticks():
        tick.label1.set_fontsize(fontsize)
    for tick in ax.yaxis.get_major_ticks():
        tick.label1.set_fontsize(fontsize)
    ax.xaxis.get_offset_text().set_fontsize(fontsize-2)
    ax.yaxis.get_offset_text().set_fontsize(fontsize-2)

#    for tick in cb.ax.get_yticklabels():
#        tick.set_fontsize(fontsize)

    plt.plot(0,0,'w+',markersize=10)

#    plt.ylabel('b [cm]',size=fontsize)
#    plt.xlabel('a [cm]',size=fontsize)
#    plt.title(''.join((['phi=',str(inputdata.get('phi'))[0:4],' ; theta=',str(inputdata.get('theta'))[0:4]
#                        ,' ; delta=',str(inputdata.get('delta'))[0:4]])))

    if npol != 'none' and 'Inupol' in inputdata:
        xbeg=0.5*L[0]*float(npol[0]-1)/float(npol[0])
        ybeg=0.5*L[1]*float(npol[1]-1)/float(npol[1])
        x=np.linspace(-xbeg,xbeg,npol[0])
        y=np.linspace(-ybeg,ybeg,npol[1])
        x2D=np.empty((npol[0],npol[1]))
        y2D=np.empty((npol[0],npol[1]))
        for i in range(npol[1]):
            x2D[:,i] = x
        for i in range(npol[0]):
            y2D[i,:] = y
        ex=-congrid(np.sqrt(mappol).imag,npol,method='linear',centre=True)
        ey=congrid(np.sqrt(mappol).real,npol,method='linear',centre=True) 
        pi=congrid(abs(mappol)/map,npol,method='linear',centre=True)
        
        norm=np.sqrt(ex**2+ey**2)
        ex=ex/norm * pi
        ey=ey/norm * pi
# evectors:
#        plt.quiver(x2D,y2D,ex,ey,width=0.002,headlength=0,headwidth=0,headaxislength=0,pivot='middle',color=colpol)
# bvectors:
        plt.quiver(x2D,y2D,-ey,ex,width=0.002,headlength=0,headwidth=0,headaxislength=0,pivot='middle',color=colpol,scale=npol[0]/4.,scale_units='inches')
        
# make a little inset showing the scaling of the polarization
        alpha = inputdata.get('alpha')
        pimax = (alpha+1.)/(alpha+5./3.)
        plt.fill((-7.5e17,-6e17,-6e17,-7.5e17),(-7.5e17,-7.5e17,-6e17,-6e17),'w')
        plt.quiver(-6.75e17,-6.75e17,pimax,0,width=0.002,headlength=0,headwidth=0,headaxislength=0,pivot='middle',color=colpol,zorder=2,scale_units='inches',scale=npol[0]/4)


    if filename == 'none':
        plt.show()
    else: 
        plt.savefig(''.join([filename,'.pdf']), format="pdf", transparent=False,dpi=dpi,bbox_inches='tight')
        plt.close()
コード例 #29
0
def process_single_filter_subimage(image_parameters,
                                   galaxy_data,
                                   lcdata,
                                   filname,
                                   lambda_eff_microns,
                                   selected_catalog=None):

    print('**** Processing subimage:  ', image_parameters)

    single_filter_subimage = np.ndarray(
        (image_parameters['Npix'], image_parameters['Npix']))
    single_filter_subimage_smooth = np.ndarray(
        (image_parameters['Npix'], image_parameters['Npix']))

    print(single_filter_subimage.shape)

    #find sources in this sub-image

    #use buffer to clear edge effects from lower left
    buf_deg = 10.0 / 3600.0  #10 arcsec buffer?
    sub_indices = (
        lcdata['ra_deg'] >= image_parameters['x1_deg'] - buf_deg) * (
            lcdata['ra_deg'] < image_parameters['x2_deg']) * (
                lcdata['dec_deg'] >= image_parameters['y1_deg'] - buf_deg) * (
                    lcdata['dec_deg'] < image_parameters['y2_deg'])

    sub_data = lcdata[sub_indices]

    success = 0

    #image_catalog={'filter':filname}

    #final source info
    xcen_list = []
    ycen_list = []
    final_flux_njy_list = []
    ab_appmag_list = []

    #galaxy_data=galaxy_data.fromkeys(['image_dir','scale','simlabel','Mvir','Mstar','Rhalf_stars'])

    #original image source data
    final_file_list = []
    image_dir_list = []
    scalefactor_list = []
    simlabel_list = []
    Mvir_list = []
    mstar_list = []
    rhalf_list = []

    #lightcone entry data... all of it???

    number = len(sub_data)

    if selected_catalog is not None:
        assert (len(selected_catalog['galaxy_indices']) == number)
        build_catalog = False
    else:
        selected_catalog = {'filter': filname}
        selected_catalog['galaxy_indices'] = np.ndarray((number), dtype=object)
        selected_catalog['found_galaxy'] = np.ndarray((number), dtype=object)
        selected_catalog['this_camstr'] = np.ndarray((number), dtype=object)
        selected_catalog['simlabel'] = np.ndarray((number), dtype=object)
        selected_catalog['numcams'] = np.ndarray((number), dtype=object)
        selected_catalog['image_dir'] = np.ndarray((number), dtype=object)
        selected_catalog['scalefactor'] = np.ndarray((number), dtype=object)
        selected_catalog['Mvir'] = np.ndarray((number), dtype=object)
        selected_catalog['mstar'] = np.ndarray((number), dtype=object)
        selected_catalog['rhalf'] = np.ndarray((number), dtype=object)
        selected_catalog['lc_entry'] = np.ndarray((number), dtype=object)
        selected_catalog['orig_pix_arcsec'] = np.ndarray((number),
                                                         dtype=object)

        selected_catalog['mstar_factor'] = np.ndarray((number), dtype=object)
        selected_catalog['size_factor'] = np.ndarray((number), dtype=object)

        selected_catalog['icen'] = np.ndarray((number), dtype=object)
        selected_catalog['jcen'] = np.ndarray((number), dtype=object)
        selected_catalog['flux_njy'] = np.ndarray((number), dtype=object)
        selected_catalog['ABmag'] = np.ndarray((number), dtype=object)
        selected_catalog['in_image'] = np.ndarray((number), dtype=object)

        selected_catalog['final_file'] = np.ndarray((number), dtype=object)
        selected_catalog['Kband_file'] = np.ndarray((number), dtype=object)

        build_catalog = True

    data_found = 0

    for i, entry in enumerate(sub_data):

        if i % 100 == 0:
            print('   processed ', i, ' out of ', number)

        #need pos_i, pos_j
        pos_i = np.int64(
            (entry['ra_deg'] - image_parameters['x1_deg']) *
            np.float64(image_parameters['Npix']) /
            (image_parameters['x2_deg'] - image_parameters['x1_deg']))
        pos_j = np.int64(
            (entry['dec_deg'] - image_parameters['y1_deg']) *
            np.float64(image_parameters['Npix']) /
            (image_parameters['y2_deg'] - image_parameters['y1_deg']))

        #select image file to insert
        mass_value = entry['subhalo_mass_msun']
        scale_value = 1.0 / (1.0 + entry['true_z'])
        mstar_value = entry['mstar_msun_rad']

        if build_catalog is True:
            galaxy_scale_indices = (galaxy_data['scale'] >=
                                    scale_value / scale_window)
            galaxy_scale_indices *= (galaxy_data['scale'] <=
                                     scale_value * scale_window)

            galaxy_mass_indices = (galaxy_data['Mvir'] >=
                                   mass_value / mass_window)
            galaxy_mass_indices *= (galaxy_data['Mvir'] <=
                                    mass_value * mass_window)

            galaxy_search_indices = galaxy_scale_indices * galaxy_mass_indices

            found_galaxy = False

            selected_catalog['lc_entry'][i] = entry

            if np.sum(galaxy_search_indices
                      ) == 0 and np.sum(galaxy_scale_indices) > 0:
                #pick random galaxy and resize?
                #index into list of Trues
                random_index = random.randint(np.sum(galaxy_scale_indices))
                scale_where = np.where(galaxy_scale_indices == True)[0]
                galaxy_index = scale_where[random_index]
                success += 1
                found_galaxy = True
            elif np.sum(galaxy_search_indices) == 0 and np.sum(
                    galaxy_scale_indices) == 0:
                galaxy_index = None
                pass
            else:
                random_index = random.randint(np.sum(galaxy_search_indices))
                galaxy_where = np.where(galaxy_search_indices == True)[0]
                galaxy_index = galaxy_where[random_index]
                success += 1
                found_galaxy = True

            selected_catalog['found_galaxy'][i] = found_galaxy
            selected_catalog['galaxy_indices'][i] = galaxy_index
        else:
            galaxy_index = selected_catalog['galaxy_indices'][i]
            found_galaxy = selected_catalog['found_galaxy'][i]

        found_data = False
        #now we have galaxy_index
        if galaxy_index is not None:
            mstar_factor = mstar_value / (galaxy_data['Mstar'][galaxy_index])
            size_factor = (mstar_factor)**0.5

            folder = galaxy_data['image_dir'][galaxy_index]
            label = galaxy_data['simlabel'][galaxy_index]
            if build_catalog is True:
                selected_catalog['simlabel'][i] = label
                selected_catalog['image_dir'][i] = folder

                possible_files = np.sort(
                    np.asarray(
                        glob.glob(folder + '/hires_images_cam??/' + label +
                                  'cam??_' + filname + '*.fits')))
                selected_catalog['numcams'][i] = possible_files.shape[0]

                if possible_files.shape[0] > 0:
                    #pick a random camera
                    file_index = random.randint(possible_files.shape[0])
                    #assert all filters exist
                    this_file = possible_files[file_index]
                    this_folder = os.path.dirname(this_file)
                    this_camstr = this_folder[-5:]
                    filter_files = np.sort(
                        np.asarray(
                            glob.glob(this_folder + '/' + label + '*.fits')))
                    if filter_files.shape[0] == 8:
                        kband_files = np.asarray(
                            glob.glob(folder + '/hires_images_' + this_camstr +
                                      '/' + label + this_camstr + '_' +
                                      'nircam_f200w*.fits'))
                        if kband_files.shape[0] == 0:
                            assert (false)
                        else:
                            selected_catalog['Kband_file'][i] = kband_files[0]
                    else:
                        this_camstr = None

                    selected_catalog['this_camstr'][i] = this_camstr
                else:
                    this_file = None
            else:
                this_camstr = selected_catalog['this_camstr'][i]
                if this_camstr is not None:
                    this_files = np.asarray(
                        glob.glob(folder + '/hires_images_' + this_camstr +
                                  '/' + label + this_camstr + '_' + filname +
                                  '*.fits'))
                    if this_files.shape[0] == 0:
                        assert (False)
                    this_file = this_files[0]

                else:
                    this_file = None

            selected_catalog['final_file'][i] = this_file

            if this_file is not None:
                found_data = True

                this_hdu = fits.open(this_file)[0]
                this_image = this_hdu.data

                pixstr = this_file.split('_')[-2][3:]

                pixsize_arcsec = np.float64(pixstr)

                #adaptive-smmoother here???  HOW?
                kband_hdu = fits.open(selected_catalog['Kband_file'][i])[0]
                kdata = kband_hdu.data

                #measure inverse? distance transform in K filter
                #not sure this is the best flux limit.. a higher one will give more agressive blurring in outer gal

                #inverse_im=np.where(kdata < 1.0e-4, np.ones_like(kdata), np.zeros_like(kdata))
                #idt=scipy.ndimage.distance_transform_cdt(inverse_im)

                #run generic_filter with adaptive_filter function
                #inarr=np.ndarray((kdata.shape[0],kdata.shape[0],2),dtype=np.float64)
                #outarr=np.zeros_like(inarr)
                #inarr[:,:,0]=kdata
                #inarr[:,:,1]=idt  #transform this

                #actually.. apply to re-sized images for efficiency???  need ultra-fast image transforms..

            else:
                found_data = False

            if found_data == True:
                #these are in nJy-- preserve integral!
                data_found += 1

                original_flux = np.sum(this_image)

                total_flux = original_flux * mstar_factor  #shrink luminosity by mstar factor

                this_npix = this_image.shape[0]

                #resize--preserve proper units

                desired_npix = np.int32(
                    this_npix *
                    (pixsize_arcsec / image_parameters['pix_size_arcsec']) *
                    size_factor)

                resized_image = congrid.congrid(this_image,
                                                (desired_npix, desired_npix))
                resized_flux = np.sum(resized_image)

                resized_image = resized_image * (total_flux / resized_flux)

                resized_k = congrid.congrid(kdata,
                                            (desired_npix, desired_npix))

                #inverse_k=np.where(resized_k < 1.0e-3,np.ones_like(resized_k),np.zeros_like(resized_k))
                #idt=scipy.ndimage.distance_transform_cdt(inverse_k)
                #inarr=np.ndarray((idt.shape[0],idt.shape[0],2),dtype=np.float64)
                #outarr=np.zeros_like(inarr)

                #inarr[:,:,0]=resized_image
                #inarr[:,:,1]=scipy.ndimage.gaussian_filter(2.5*idt**0.5,5)

                #scipy.ndimage.generic_filter(inarr,adaptive_filter,size=(10,10,2),output=outarr,origin=0.5)

                #tform_image=outarr[:,:,1]

                #is there a way to detect and avoid edge effects here??

                #1. smooth image
                #2. locate peak flux
                #3. apply gaussian/exponential factor strong enough to eliminate to full-res image?
                #4. use size info... to make sensible??

                #add to image

                npsub = desired_npix
                i1 = pos_i
                i2 = pos_i + npsub
                j1 = pos_j
                j2 = pos_j + npsub
                icen = np.float64(pos_i) + np.float64(npsub) / 2.0
                jcen = np.float64(pos_j) + np.float64(npsub) / 2.0

                #determine overlap image_parameters['Npix']
                im0 = 0
                im1 = image_parameters['Npix']

                #I think this is wrong?
                #sub_image_to_add=resized_image[im0-i1:npsub-(i2-im1),im0-j1:npsub-(j2-im1)]
                #k_subimage=resized_k[im0-i1:npsub-(i2-im1),im0-j1:npsub-(j2-im1)]

                if i1 < im0:
                    is1 = np.abs(im0 - i1)
                else:
                    is1 = 0

                if i2 >= im1:
                    is2 = npsub - (i2 - im1) - 1
                else:
                    is2 = npsub - 1

                if j1 < im0:
                    js1 = np.abs(im0 - j1)
                else:
                    js1 = 0

                if j2 >= im1:
                    js2 = npsub - (j2 - im1) - 1
                else:
                    js2 = npsub - 1

                sub_image_to_add = resized_image[is1:is2, js1:js2]
                k_subimage = resized_k[is1:is2, js1:js2]

                new_image_to_add = adaptive_fast(sub_image_to_add, k_subimage,
                                                 size_factor)
                k_smoothed = adaptive_fast(k_subimage, k_subimage, size_factor)
                orig_image_to_add = sub_image_to_add

                #detect edges?

                if k_subimage.shape[0] > 2 and k_subimage.shape[1] > 2:
                    edge1 = np.mean(k_smoothed[0:2, :])
                    edge2 = np.mean(k_smoothed[:, -2:])
                    edge3 = np.mean(k_smoothed[:, 0:2])
                    edge4 = np.mean(k_smoothed[-2:, :])
                    maxedge = np.max(np.asarray([edge1, edge2, edge3, edge4]))
                else:
                    maxedge = 0.0

                #print('edge ratios: ', edge1/total_flux, edge2/total_flux, edge3/total_flux, edge4/total_flux, ' file ', this_file, size_factor)

                in_image = False

                if maxedge > 0.03 and resized_image.shape[
                        0] > 20 and new_image_to_add.shape[
                            0] == new_image_to_add.shape[1]:
                    print('omitting edge effect, max: ', maxedge,
                          os.path.basename(this_file), size_factor,
                          new_image_to_add.shape)
                    new_image_to_add *= 0.0
                    orig_image_to_add *= 0.0
                    in_image = False
                elif maxedge > 0.001 and resized_image.shape[
                        0] > 200 and new_image_to_add.shape[
                            0] == new_image_to_add.shape[1]:
                    print('omitting edge effect, max: ', maxedge,
                          os.path.basename(this_file), size_factor,
                          new_image_to_add.shape)
                    new_image_to_add *= 0.0
                    orig_image_to_add *= 0.0
                    in_image = False
                else:
                    in_image = True

                if icen >= 0.0 and jcen >= 0.0:
                    #assemble catalog entries for this object
                    #final source info
                    xcen_list.append(icen)
                    ycen_list.append(jcen)

                    final_flux_njy_list.append(np.sum(new_image_to_add))
                    if np.sum(new_image_to_add) == 0.0:
                        ab_appmag_list.append(-1)
                    else:
                        ab_appmag_list.append(-2.5 * np.log10(
                            (1.0e9) * (np.sum(new_image_to_add)) / 3632.0))
                else:
                    in_image = False

                if sub_image_to_add.shape[0] > 0 and sub_image_to_add.shape[
                        1] > 0:
                    iplace = np.max([i1, 0])
                    jplace = np.max([j1, 0])
                    new_npi = sub_image_to_add.shape[0]
                    new_npj = sub_image_to_add.shape[1]
                    single_filter_subimage[iplace:iplace + new_npi,
                                           jplace:jplace +
                                           new_npj] += orig_image_to_add
                    single_filter_subimage_smooth[iplace:iplace + new_npi,
                                                  jplace:jplace +
                                                  new_npj] += new_image_to_add

                selected_catalog['scalefactor'][i] = galaxy_data['scale'][
                    galaxy_index]
                selected_catalog['Mvir'][i] = galaxy_data['Mvir'][galaxy_index]
                selected_catalog['mstar'][i] = galaxy_data['Mstar'][
                    galaxy_index]
                selected_catalog['rhalf'][i] = galaxy_data['Rhalf_stars'][
                    galaxy_index]
                selected_catalog['orig_pix_arcsec'][i] = pixsize_arcsec

                selected_catalog['mstar_factor'][i] = mstar_factor
                selected_catalog['size_factor'][i] = size_factor

                selected_catalog['icen'][i] = icen
                selected_catalog['jcen'][i] = jcen
                selected_catalog['flux_njy'][i] = total_flux
                selected_catalog['ABmag'][i] = -2.5 * np.log10(
                    (1.0e9) * (total_flux) / 3632.0)
                selected_catalog['in_image'][i] = in_image

    print('**** Subimage data found: ', str(data_found), '  out of  ', i + 1)

    return single_filter_subimage, single_filter_subimage_smooth, selected_catalog
コード例 #30
0
ファイル: pb_drc_MVISR_L1.py プロジェクト: NingAnMe/pb
    def Load(self, in_file):
        if self.error:
            return
        # try:
        hdf4 = SD(in_file, SDC.READ)
        # 过滤无效值
        year_dataset = hdf4.select('Year_Count')[:]
        idx_valid = np.where(year_dataset != 0)[0]

        year = hdf4.select('Year_Count')[:]
        day = hdf4.select('Day_Count')[:]
        msec_dataset = hdf4.select('Msec_Count')[:]
        year = year[idx_valid][0]
        day = day[idx_valid][0]
        msec_dataset = msec_dataset[idx_valid]

        dn_dataset = hdf4.select('Earth_View')[:]
        sv_dataset = hdf4.select('Space_View')[:]
        bb_dataset = hdf4.select('Black_Body_View')[:]

        dn_dataset = dn_dataset[:, idx_valid, :]
        sv_dataset = sv_dataset[:, idx_valid, :]
        bb_dataset = bb_dataset[:, idx_valid, :]

        sensor_zenith_dataset = hdf4.select('Sensor_Zenith')[:]
        solar_zenith_dataset = hdf4.select('Solar_Zenith')[:]
        relative_azimuth = hdf4.select('Relative_Azimuth')[:]
        sensor_zenith_dataset = sensor_zenith_dataset[idx_valid, :]
        solar_zenith_dataset = solar_zenith_dataset[idx_valid, :]
        relative_azimuth = relative_azimuth[idx_valid, :]

        longitude_dataset = hdf4.select('Longitude')[:]
        latitude_dataset = hdf4.select('Latitude')[:]
        longitude_dataset = longitude_dataset[idx_valid, :]
        latitude_dataset = latitude_dataset[idx_valid, :]

        coeff_dataset = hdf4.select('Calibration_coeff')[:]
        coeff_dataset = coeff_dataset[idx_valid, :]

        time = self.create_time(year, day, msec_dataset)

        self.data_shape = dn_dataset[0].shape
        shape = self.data_shape
        cols_count = shape[1]

        self.Time = self.extend_matrix_2d(time, 1, cols_count)
        self.Lats = congrid(latitude_dataset, shape, method='spline')
        self.Lons = congrid(longitude_dataset, shape, method='spline')
        self.satZenith = congrid(sensor_zenith_dataset, shape, method='spline')
        self.sunZenith = congrid(solar_zenith_dataset, shape, method='spline')
        self.RelativeAzimuth = congrid(relative_azimuth,
                                       shape,
                                       method='spline')

        for i in xrange(self.Band):
            channel_name = 'CH_{:02d}'.format(i + 1)
            self.Dn[channel_name] = dn_dataset[i, :]
            self.SV[channel_name] = congrid(sv_dataset[i, :],
                                            shape,
                                            method='spline')
            self.BB[channel_name] = congrid(bb_dataset[i, :],
                                            shape,
                                            method='spline')

            k0_dataset = self.change_1d_to_2d(coeff_dataset[:, i * 2 + 1])
            k1_dataset = self.change_1d_to_2d(coeff_dataset[:, i * 2])
            self.k0[channel_name] = self.extend_matrix_2d(
                k0_dataset, 1, cols_count)
            self.k1[channel_name] = self.extend_matrix_2d(
                k1_dataset, 1, cols_count)
            self.Ref[channel_name] = self.Dn[channel_name] * self.k0[channel_name] + \
                self.k1[channel_name]
        hdf4.end()
        # except Exception as why:
        #     print "{}.{}: {}".format(self.__class__, 'Load', why)
        #     self.error = True

        # 复制文件属性
        self.file_attr = self.read_file_attr(in_file)
コード例 #31
0
def do_hst_illustris(fieldstr, alph, Q, rf, gf, bf):

    #in nJy
    fielda_f435 = fits.open(
        hlsp_dir + 'mag30-' + fieldstr +
        '-11-10/hlsp_misty_illustris_hst-acs_f435w_mag30-' + fieldstr +
        '-11-10_v1_lightcone.fits')['IMAGE_PSF'].data
    fielda_f606 = fits.open(
        hlsp_dir + 'mag30-' + fieldstr +
        '-11-10/hlsp_misty_illustris_hst-acs_f606w_mag30-' + fieldstr +
        '-11-10_v1_lightcone.fits')['IMAGE_PSF'].data
    fielda_f775 = fits.open(
        hlsp_dir + 'mag30-' + fieldstr +
        '-11-10/hlsp_misty_illustris_hst-acs_f775w_mag30-' + fieldstr +
        '-11-10_v1_lightcone.fits')['IMAGE_PSF'].data
    fielda_f814 = fits.open(
        hlsp_dir + 'mag30-' + fieldstr +
        '-11-10/hlsp_misty_illustris_hst-acs_f814w_mag30-' + fieldstr +
        '-11-10_v1_lightcone.fits')['IMAGE_PSF'].data
    fielda_f850 = fits.open(
        hlsp_dir + 'mag30-' + fieldstr +
        '-11-10/hlsp_misty_illustris_hst-acs_f850lp_mag30-' + fieldstr +
        '-11-10_v1_lightcone.fits')['IMAGE_PSF'].data

    fielda_f105 = fits.open(
        hlsp_dir + 'mag30-' + fieldstr +
        '-11-10/hlsp_misty_illustris_hst-wfc3_f105w_mag30-' + fieldstr +
        '-11-10_v1_lightcone.fits')['IMAGE_PSF'].data
    fielda_f125 = fits.open(
        hlsp_dir + 'mag30-' + fieldstr +
        '-11-10/hlsp_misty_illustris_hst-wfc3_f125w_mag30-' + fieldstr +
        '-11-10_v1_lightcone.fits')['IMAGE_PSF'].data
    fielda_f140 = fits.open(
        hlsp_dir + 'mag30-' + fieldstr +
        '-11-10/hlsp_misty_illustris_hst-wfc3_f140w_mag30-' + fieldstr +
        '-11-10_v1_lightcone.fits')['IMAGE_PSF'].data
    fielda_f160 = fits.open(
        hlsp_dir + 'mag30-' + fieldstr +
        '-11-10/hlsp_misty_illustris_hst-wfc3_f160w_mag30-' + fieldstr +
        '-11-10_v1_lightcone.fits')['IMAGE_PSF'].data

    ra = rf * (0.25 * fielda_f105 + 0.25 * fielda_f125 + 0.25 * fielda_f140 +
               0.25 * fielda_f160)
    ga = gf * (0.50 * fielda_f850 + 0.50 * fielda_f775)
    ba = bf * (0.50 * fielda_f435 + 0.50 * fielda_f606)

    gnew = congrid.congrid(ga, ra.shape) * 4.0  #preserve surface brightness
    bnew = congrid.congrid(ba, ra.shape) * 4.0

    print(fielda_f435.shape)
    print(ra.shape, ga.shape, gnew.shape)

    rgb_field = make_color_image.make_interactive_nasa(bnew, gnew, ra, alph, Q)

    f1 = pyplot.figure(figsize=(10.0, 10.0), dpi=600)
    pyplot.subplots_adjust(left=0.0,
                           right=1.0,
                           bottom=0.0,
                           top=1.0,
                           wspace=0.0,
                           hspace=0.0)

    axi = f1.add_subplot(111)
    axi.imshow(rgb_field,
               interpolation='nearest',
               aspect='auto',
               origin='lower')

    f1.savefig('illustris_render_' + fieldstr + '.pdf', dpi=600)
    pyplot.close(f1)
コード例 #32
0
def psf_match(f1,f2, test=False, data1_res=1.375):

    # following tutorial here:
    # http://photutils.readthedocs.io/en/latest/photutils/psf_matching.html
    # WARNING: this is sensitive to the windowing!
    # how to properly choose smoothing?

    from photutils import create_matching_kernel, CosineBellWindow, TopHatWindow

    #### how large do we want our kernel?
    # images are 1200 x 1200 pixels
    # each pixel is 0.25", WISE PSF FWHM is ~6"
    # take 52" x 52" here for maximum safety
    limits = (500,700)

    #### generate PSFs
    psf1, res1 = load_wise_psf(f1)
    psf2, res2 = load_wise_psf(f2)

    if res1 != res2:
        print 1/0

    #### shrink
    psf1 = psf1[limits[0]:limits[1],limits[0]:limits[1]]
    psf2 = psf2[limits[0]:limits[1],limits[0]:limits[1]]

    ### rebin to proper pixel scale
    # following http://scipy-cookbook.readthedocs.io/items/Rebinning.html
    from congrid import congrid
    xdim = np.round(psf1.shape[0]/(data1_res/res1))
    ydim = np.round(psf1.shape[1]/(data1_res/res1))
    psf1 = congrid(psf1,[xdim,ydim])

    xdim = np.round(psf2.shape[0]/(data1_res/res2))
    ydim = np.round(psf2.shape[1]/(data1_res/res2))
    psf2 = congrid(psf2,[xdim,ydim])

    ### normalize
    psf1 /= psf1.sum()
    psf2 /= psf2.sum()

    #window = CosineBellWindow(alpha=1.5)
    window = TopHatWindow(beta=0.7) #0.42
    kernel = create_matching_kernel(psf1, psf2,window=window)
    
    if test == True:
        fig, ax = plt.subplots(2,3, figsize=(15, 10))
        ax = np.ravel(ax)

        ### plot PSFs
        img = ax[0].imshow(psf1/psf1.max(), cmap='Greys_r', origin='lower')
        plt.colorbar(img,ax=ax[0])
        ax[0].set_title(f1+' PSF')

        convolved_psf1 = convolve_fft(psf1, kernel,interpolate_nan='fill')
        img = sedax[0].imshow(convolved_psf1/convolved_psf1.max(), cmap='Greys_r', origin='lower')
        plt.colorbar(img,ax=sedax)
        sedax[0].set_title(f1+' PSF convolved')

        img = ax[2].imshow(psf2/psf2.max(), cmap='Greys_r', origin='lower')
        plt.colorbar(img,ax=ax[2])
        ax[2].set_title(f2+' PSF')

        ### plot kernel
        img = ax[3].imshow(kernel, cmap='Greys_r', origin='lower')
        plt.colorbar(img,ax=ax[3])
        ax[3].set_title('Convolution Kernel')

        ### plot unconvolved residual
        img = ax[0].imshow((psf1-psf2)/psf2, cmap='Greys_r', origin='lower',vmin=-0.05,vmax=0.05)
        cbar = plt.colorbar(img,ax=ax)
        cbar.ax[0].set_title('percent deviation')
        ax[0].set_title('[f1-f2]/f2')

        ### plot residual
        img = ax[0].imshow((convolved_psf1-psf2)/psf2, cmap='Greys_r', origin='lower',vmin=-0.05,vmax=0.05)
        cbar = plt.colorbar(img,ax=ax[0])
        cbar.ax[0].set_title('percent deviation')
        ax[0].set_title('[f1(convolved)-f2]/f2')

        plt.show()

    return kernel, res1
コード例 #33
0
def single_run_test(ind,ysc1,ysc2,q,vd,pha,zl,zs):
    zeropoint = 18

    dsx_sdss     = 0.396         # pixel size of SDSS detector.


    R  = 2.9918     #vd is velocity dispersion.
    #zl = 0.2     #zl is the redshift of the lens galaxy.
    #zs = 1.0
    #vd = 520    #Velocity Dispersion.
    nnn = 512      #Image dimension
    bsz = 30.0 # arcsecs
    dsx = bsz/nnn         # pixel size of SDSS detector.
    nstd = 59

    xx01 = np.linspace(-bsz/2.0,bsz/2.0,nnn)+0.5*dsx
    xx02 = np.linspace(-bsz/2.0,bsz/2.0,nnn)+0.5*dsx
    xi2,xi1 = np.meshgrid(xx01,xx02)
    #----------------------------------------------------------------------
    #ysc1 = 0.2
    #ysc2 = 0.5
    dsi = 0.03
    g_source = pyfits.getdata("./439.0_149.482739_1.889989_processed.fits")
    g_source = np.array(g_source,dtype="<d")
    g_source = p2p.pixcos2pixsdss(g_source)
    #----------------------------------------------------------------------
    xc1 = 0.0       #x coordinate of the center of lens (in units of Einstein radius).
    xc2 = 0.0       #y coordinate of the center of lens (in units of Einstein radius).
    #q   = 0.7       #Ellipticity of lens.
    rc  = 0.0       #Core size of lens (in units of Einstein radius).
    re  = re_sv(vd,zl,zs)       #Einstein radius of lens.
    #pha = 45.0      #Orintation of lens.
    lpar = np.asarray([xc1,xc2,q,rc,re,pha])
    #----------------------------------------------------------------------
    ai1,ai2,mua = lens_equation_sie(xi1,xi2,lpar)

    yi1 = xi1-ai1
    yi2 = xi2-ai2

    g_limage = lv4.call_ray_tracing(g_source,yi1,yi2,ysc1,ysc2,dsi)
    g_limage = mag_to_flux(g_limage,zeropoint)

    #pl.figure()
    #pl.contourf(xi1,xi2,g_limage)
    #pl.colorbar()
    #-------------------------------------------------------------
    # Need to be Caliborate the mags
    dA = Planck13.comoving_distance(zl).value*1000./(1+zl)
    Re = dA*np.sin(R*np.pi/180./3600.)
    counts  =Brightness(R,vd)
    vpar = np.asarray([counts,Re,xc1,xc2,q,pha])
    #g_lens = deVaucouleurs(xi1,xi2,xc1,xc2,counts,R,1.0-q,pha)
    g_lens = de_vaucouleurs_2d(xi1,xi2,vpar)

    g_lens = ncounts_to_flux(g_lens*1.5e-4,zeropoint)
    #-------------------------------------------------------------
    file_psf = "../PSF_and_noise/sdsspsf.fits"
    g_psf = pyfits.getdata(file_psf)-1000.0
    g_psf = g_psf/np.sum(g_psf)
    new_shape=[0,0]
    new_shape[0]=np.shape(g_psf)[0]*dsx_sdss/dsx
    new_shape[1]=np.shape(g_psf)[1]*dsx_sdss/dsx
    g_psf = rebin_psf(g_psf,new_shape)
    print(np.max(g_psf))
    g_limage = ss.fftconvolve(g_limage+g_lens,g_psf,mode="same")

    #pl.figure()
    #pl.contourf(xi1,xi2,g_limage)
    #pl.colorbar()
    #-------------------------------------------------------------
    # Need to be Caliborate the mags
    g_noise = noise_map(nnn,nnn,nstd,"Gaussian")
    g_noise = ncounts_to_flux(g_noise*1e-0+skycount,zeropoint)
    g_limage = g_limage+g_noise

    print np.shape(g_limage)
    g_limage = congrid.congrid(g_limage,[128,128])
    g_limage = g_limage-np.min(g_limage)

    pl.figure()
    #pl.contourf(xi1,xi2,g_limage)
    pl.contourf(g_limage)
    pl.colorbar()
    #-------------------------------------------------------------

    output_filename = "../output_fits/"+str(ind)+".fits"
    pyfits.writeto(output_filename,g_limage,clobber=True)

    pl.show()

    return 0
コード例 #34
0
    def testCalibration(self, myim):
        #def detector_calibration_test_points (self) :
        en = 37.077
        cut = 30.
        dist_tol = 1.8
        IovS = float(self.topLevel.ui.det_snrLE.text())
        start_dist = self.dist - self.dist * 0.5
        end_dist = self.dist + self.dist * .5
        im = myim.imArray.astype(np.int64)
        imarr = cgd.congrid(im, [500, 500], method='nearest',
                            minusone=True).astype(np.int64)
        zarr = np.zeros((500, 500), dtype=np.uint8)

        bg = self.local_background(imarr)
        #imarr.tofile ("/home/harold/imarr.dat")
        # only for debug

        hpf = imarr / bg.astype(np.int64)

        self.ff = np.where((hpf > IovS) & (imarr > 20.))

        nn = len(self.ff[0])
        print 'number of pixels meeting the peak condition is %d' % (nn)

        ### equal proximity coarse search
        # in 5 pixel steps
        h = np.zeros((100, 100), dtype=np.float32)
        for i in range(100):
            print i
            for j in range(100):
                dist = self.compdist(self.ff, [i * 5, j * 5])
                mx = int(dist.max()) + 1
                mn = int(dist.min())

                #nbins = int(dist.max() - dist.min()+1)
                histo, edges = np.histogram(dist,
                                            range=[mn, mx],
                                            bins=(mx - mn))
                h[i, j] = np.max(histo)
        maxsub = np.argmax(h)
        maxrow = maxsub / 100
        maxcol = maxsub - maxrow * 100
        print maxsub, maxrow, maxcol

        self.eqprox[0] = maxrow / 100.
        self.eqprox[1] = maxcol / 100.

        # is this even being used
        dist = self.compdist(self.ff, [maxrow, maxcol])
        nbins = int(dist.max() - dist.min() + 1)
        h = np.histogram(dist, bins=nbins)

        ### equal proximity seach fine (in 500 space)
        h = np.zeros((11, 11), dtype=np.float32)
        for i in range(-5, 6):
            for j in range(-5, 6):
                # note in gse_ada , there is a 5 + in the index calculation
                dist = self.compdist(self.ff,
                                     [5. * maxrow + i, 5. * maxcol + j])
                nbins = int(dist.max() - dist.min() + 1)
                mx = int(dist.max() + 1)
                mn = int(dist.min())
                histo, edges = np.histogram(dist, range=[mn, mx], bins=mx - mn)
                h[i + 5][j + 5] = np.max(histo)
        maxsub = np.argmax(h)

        # then back in 100 space
        xy = self.xy_from_ind(11, 11, maxsub)
        maxrow = maxrow + (xy[0] - 5) / 5.
        maxcol = maxcol + (xy[1] - 5) / 5.
        xy0 = [maxrow, maxcol]
        self.eqproxfine[0] = maxrow / 100.
        self.eqproxfine[1] = maxcol / 100.

        self.beamx = xy0[0] / 100. * self.nopixx
        self.beamy = xy0[1] / 100. * self.nopixy
        xy0[0] *= 5.
        xy0[1] *= 5.
        dist = self.compdist(self.ff, xy0)
        mn = int(dist.min())
        mx = int(dist.max() + 1)
        nbins = mx - mn
        h, edges = np.histogram(dist, range=[mn, mx], bins=nbins)
        h1 = np.copy(h)
        #h = h[0]
        numH = len(h)

        while (np.max(h1) > cut):
            i = np.argmax(h1)
            m = np.max(h1)
            h1[i] = 0.
            if (i > 0 and i < numH - 1):
                j = i - 1
                while (j >= 0):
                    if (h1[j] > cut / 2.):
                        h[i] += h[j]
                        h[j] = 0.
                        h1[j] = 0.
                    else:
                        j = 0
                    j = j - 1

                j = i + 1
                while (j <= numH - 1):
                    if (h1[j] > cut / 2.):
                        h[i] += h[j]
                        h[j] = 0
                        h1[j] = 0
                    else:
                        j = numH - 1
                    j = j + 1
        # NOTE - should be cut not cut/2.
        fh = np.where(h > cut)[0]
        numB = len(fh)
        # number of different rings with sufficient number of points
        rings = np.zeros(nn, dtype=np.int64)
        for i in range(nn):
            c = np.absolute(np.subtract(dist[i], edges[fh]))
            ri = np.min(c)
            kk = np.argmin(c)
            if (ri < dist_tol):
                rings[i] = kk
            else:
                rings[i] = -1

        nr = np.zeros(numB, dtype=np.int64)
        ds = np.zeros(numB, dtype=np.float32)
        for k in range(numB):
            r = np.where(rings == k)[0]
            nr[k] = len(r)
        print "Classes Done ...\r\n"
        m = np.max(nr)
        print 'Max of nr is : %d' % (m)

        # x,y coords of points in ring
        self.rgx = np.zeros((numB, m), dtype=np.float32)
        self.rgy = np.zeros((numB, m), dtype=np.float32)
        self.rgN = np.zeros(numB, dtype=np.uint16)

        self.numRings = numB
        for k in range(numB):
            r = np.where(rings == k)[0]
            ds[k] = np.mean(dist[r]) * self.nopixx / 500. * self.psizex
            print 'ds of %d is : %f' % (k, ds[k])
            #xya=self.xy_from_indArr(500,500,self.ff[r])
            self.rgy[k, 0:nr[k]] = self.ff[0][r] / 500.
            self.rgx[k, 0:nr[k]] = self.ff[1][r] / 500.
            self.rgN[k] = len(r)

        step = (end_dist - start_dist) / 1000.
        ddists = np.zeros((2, 1000), dtype=np.float32)
        for i in range(1000):
            thisstep = start_dist + i * step
            ddists[0][i] = thisstep
            ddists[1][i] = self.sum_closest_refs(ds, thisstep)
        aa = np.argmin(ddists[1][:])
        dst = ddists[0][aa]

        print 'Coarse estimated detector distance : %f' % (dst)

        # fine tune detector distance
        start_dist = dst - step * 5.
        end_dist = dst + step * 5.
        step = (end_dist - start_dist) / 1000.
        for i in range(1000):
            ddists[0][i] = start_dist + i * step
            ddists[1][i] = self.sum_closest_refs(ds, ddists[0][i])
        aa = np.argmin(ddists[1][:])
        dst = ddists[0][aa]
        print 'Refined estimated detector distance : %f' % (dst)

        # use only peaks which match standard and are unique
        cr = np.zeros((2, numB), dtype=np.float32)
        for i in range(numB):
            cr[0][i] = self.closest_ref(ds[i], dst)
            cr[1][i] = self.closest_ref_d(ds[i], dst)

        X = self.rgx[0][0:nr[0]] * self.nopixx / 500.
        Y = self.rgy[0][0:nr[0]] * self.nopixx / 500.

        dspcc = np.ones(nr[0]) * cr[1][0]

        self.calPeaks.emit()
コード例 #35
0
    def testCalibration_esd(self, myim):
        esd = np.zeros(9, dtype=np.float32)
        en = A_to_kev(self.wavelength)
        cut = 30.
        dist_tol = 1.8
        IovS = float(self.topLevel.ui.det_snrLE.text())
        start_dist = self.dist - self.dist * 0.5
        end_dist = self.dist + self.dist * .5
        im = myim.imArray.astype(np.int64)
        imarr = cgd.congrid(im, [500, 500], method='nearest',
                            minusone=True).astype(np.int64)
        zarr = np.zeros((500, 500), dtype=np.uint8)

        bg = self.local_background(imarr)
        imarr.tofile("/home/harold/imarr.dat")
        # only for debug

        hpf = imarr / bg.astype(np.int64)

        self.ff = np.where((hpf > IovS) & (imarr > 20.))

        nn = len(self.ff[0])
        print 'number of pixels meeting the peak condition is %d' % (nn)

        ### equal proximity coarse search
        # in 5 pixel steps
        h = np.zeros((100, 100), dtype=np.float32)
        for i in range(100):
            print i
            for j in range(100):
                dist = self.compdist(self.ff, [i * 5, j * 5])
                mx = int(dist.max()) + 1
                mn = int(dist.min())

                #nbins = int(dist.max() - dist.min()+1)
                histo, edges = np.histogram(dist,
                                            range=[mn, mx],
                                            bins=(mx - mn))
                h[i, j] = np.max(histo)
        maxsub = np.argmax(h)
        maxrow = maxsub / 100
        maxcol = maxsub - maxrow * 100
        print maxsub, maxrow, maxcol

        self.eqprox[0] = maxrow / 100.
        self.eqprox[1] = maxcol / 100.

        # is this even being used
        dist = self.compdist(self.ff, [maxrow, maxcol])
        nbins = int(dist.max() - dist.min() + 1)
        h = np.histogram(dist, bins=nbins)

        ### equal proximity seach fine (in 500 space)
        h = np.zeros((11, 11), dtype=np.float32)
        for i in range(-5, 6):
            for j in range(-5, 6):
                # note in gse_ada , there is a 5 + in the index calculation
                dist = self.compdist(self.ff,
                                     [5. * maxrow + i, 5. * maxcol + j])
                nbins = int(dist.max() - dist.min() + 1)
                mx = int(dist.max() + 1)
                mn = int(dist.min())
                histo, edges = np.histogram(dist, range=[mn, mx], bins=mx - mn)
                h[i + 5][j + 5] = np.max(histo)
        maxsub = np.argmax(h)

        # then back in 100 space
        xy = self.xy_from_ind(11, 11, maxsub)
        maxrow = maxrow + (xy[0] - 5) / 5.
        maxcol = maxcol + (xy[1] - 5) / 5.
        xy0 = [maxrow, maxcol]
        self.eqproxfine[0] = maxrow / 100.
        self.eqproxfine[1] = maxcol / 100.

        self.beamx = xy0[0] / 100. * self.nopixx
        self.beamy = xy0[1] / 100. * self.nopixy
        xy0[0] *= 5.
        xy0[1] *= 5.
        dist = self.compdist(self.ff, xy0)
        mn = int(dist.min())
        mx = int(dist.max() + 1)
        nbins = mx - mn
        h, edges = np.histogram(dist, range=[mn, mx], bins=nbins)
        h1 = np.copy(h)
        #h = h[0]
        numH = len(h)

        while (np.max(h1) > cut):
            i = np.argmax(h1)
            m = np.max(h1)
            h1[i] = 0.
            if (i > 0 and i < numH - 1):
                j = i - 1
                while (j >= 0):
                    if (h1[j] > cut / 2.):
                        h[i] += h[j]
                        h[j] = 0.
                        h1[j] = 0.
                    else:
                        j = 0
                    j = j - 1

                j = i + 1
                while (j <= numH - 1):
                    if (h1[j] > cut / 2.):
                        h[i] += h[j]
                        h[j] = 0
                        h1[j] = 0
                    else:
                        j = numH - 1
                    j = j + 1
        # NOTE - should be cut not cut/2.
        fh = np.where(h > cut)[0]
        numB = len(fh)
        # number of different rings with sufficient number of points
        rings = np.zeros(nn, dtype=np.int64)
        for i in range(nn):
            c = np.absolute(np.subtract(dist[i], edges[fh]))
            ri = np.min(c)
            kk = np.argmin(c)
            if (ri < dist_tol):
                rings[i] = kk
            else:
                rings[i] = -1

        nr = np.zeros(numB, dtype=np.int64)
        ds = np.zeros(numB, dtype=np.float32)
        for k in range(numB):
            r = np.where(rings == k)[0]
            nr[k] = len(r)
        print "Classes Done ...\r\n"
        m = np.max(nr)
        print 'Max of nr is : %d' % (m)

        # x,y coords of points in ring
        self.rgx = np.zeros((numB, m), dtype=np.float32)
        self.rgy = np.zeros((numB, m), dtype=np.float32)
        self.rgN = np.zeros(numB, dtype=np.uint16)

        self.numRings = numB
        for k in range(numB):
            r = np.where(rings == k)[0]
            ds[k] = np.mean(dist[r]) * self.nopixx / 500. * self.psizex
            print 'ds of %d is : %f' % (k, ds[k])
            # xya=self.xy_from_indArr (500,500,self.ff[r])
            self.rgy[k, 0:nr[k]] = self.ff[0][r]
            self.rgx[k, 0:nr[k]] = self.ff[1][r]
            self.rgN[k] = len(r)

        step = (end_dist - start_dist) / 1000.
        ddists = np.zeros((2, 1000), dtype=np.float32)
        for i in range(1000):
            thisstep = start_dist + i * step
            ddists[0][i] = thisstep
            ddists[1][i] = self.sum_closest_refs(ds, thisstep)
        aa = np.argmin(ddists[1][:])
        dst = ddists[0][aa]

        print 'Coarse estimated detector distance : %f' % (dst)

        # fine tune detector distance
        start_dist = dst - step * 5.
        end_dist = dst + step * 5.
        step = (end_dist - start_dist) / 1000.
        for i in range(1000):
            ddists[0][i] = start_dist + i * step
            ddists[1][i] = self.sum_closest_refs(ds, ddists[0][i])
        aa = np.argmin(ddists[1][:])
        dst = ddists[0][aa]
        print 'Refined estimated detector distance : %f' % (dst)

        # use only peaks which match standard and are unique
        cr = np.zeros((2, numB), dtype=np.float32)
        omissions = np.zeros(numB, dtype=np.int32)
        for i in range(numB):
            cr[0][i] = self.closest_ref(ds[i], dst)
            cr[1][i] = self.closest_ref_d(ds[i], dst)
            if (cr[0][i] > .2):
                omissions[i] = 1

        rrr = 0
        X = self.rgx[0][0:nr[0]] * self.nopixx / 500.
        Y = self.rgy[0][0:nr[0]] * self.nopixx / 500.
        Z = np.zeros(nr[0], dtype=np.float32)
        crval = cr[1][0]
        dspcc = np.ones(nr[0]) * crval
        nus = np.zeros(nr[0], dtype=np.float32)
        tths = np.zeros(nr[0], dtype=np.float32)
        tthval = tth_from_en_and_d(en, crval)
        tths = np.ones(nr[0]) * tthval

        for rrr in range(1, numB):
            if (omissions[rrr] == 0):
                pos = len(X)
                X = np.concatenate((X, self.rgx[rrr][0:nr[rrr]]))
                Y = np.concatenate((Y, self.rgy[rrr][0:nr[rrr]]))
                crval = cr[1][rrr]
                newcr = np.ones(nr[rrr], dtype=np.float32) * crval
                dspcc = np.concatenate((dspcc, newcr))
                tt = np.zeros(nr[rrr], dtype=np.float32)
                nu = np.zeros(nr[rrr], dtype=np.float32)
                tths = np.concatenate((tths, tt))
                nus = np.concatenate((nus, nu))
                newlen = pos + nr[rrr]
                print 'RRR is ', rrr
                for i in range(pos, pos + nr[rrr]):
                    print i

                    tths[i] = tth_from_en_and_d(en, dspcc[i])
                    nus[i] = self.get_nu_from_pix([X[i], Y[i]])

        p = np.zeros(6, dtype=np.float32)
        p[0] = self.dist
        p[1] = self.beamx
        p[2] = self.beamy
        print 'Starting calibration refinement'

        pars = {'value': 0., 'fixed': 0, 'limited': [0, 0], 'limits': [0., 0]}
        parinfo = []
        for i in range(6):
            parinfo.append(pars.copy())

        NNN = len(X)
        arr1 = self.nopixx
        arr2 = self.nopixy
        imdat = myim.imArray
        for i in range(NNN):
            if not (X[i] < 5 or X[i] > (arr1 - 6) or Y[i] < 5 or Y[i] >
                    (arr2 - 6)):
                if (imdat[X[i], Y[i]] < imdat[X[i] - 1, Y[i]]):
                    X[i] = X[i] - 1
                if (imdat[X[i], Y[i]] < imdat[X[i] + 1, Y[i]]):
                    X[i] = X[i] + 1
                if (imdat[X[i], Y[i]] < imdat[X[i], Y[i] - 1]):
                    Y[i] = Y[i] - 1
                if (imdat[X[i], Y[i]] < imdat[X[i], Y[i] + 1]):
                    Y[i] = Y[i] + 1

                if (imdat[X[i], Y[i]] < imdat[X[i] - 1, Y[i]]):
                    X[i] = X[i] - 1
                if (imdat[X[i], Y[i]] < imdat[X[i] + 1, Y[i]]):
                    X[i] = X[i] + 1
                if (imdat[X[i], Y[i]] < imdat[X[i], Y[i] - 1]):
                    Y[i] = Y[i] - 1
                if (imdat[X[i], Y[i]] < imdat[X[i], Y[i] + 1]):
                    Y[i] = Y[i] + 1

                if (imdat[X[i], Y[i]] < imdat[X[i] - 1, Y[i]]):
                    X[i] = X[i] - 1
                if (imdat[X[i], Y[i]] < imdat[X[i] + 1, Y[i]]):
                    X[i] = X[i] + 1
                if (imdat[X[i], Y[i]] < imdat[X[i], Y[i] - 1]):
                    Y[i] = Y[i] - 1
                if (imdat[X[i], Y[i]] < imdat[X[i], Y[i] + 1]):
                    Y[i] = Y[i] + 1

            if not (X[i] < 5 or X[i] > (arr1 - 6) or Y[i] < 5 or Y[i] >
                    (arr2 - 6)):
                xxx = np.arange(-5, 6, 1)
                a = np.zeros(3, dtype=np.float32)
                nus[i] = self.get_nu_from_pix((X[i], Y[i]))
                if (nus[i] > 45. and nus[i] < 135.):
                    ix = int(round(X[i], 0))
                    iy = int(round(Y[i], 0))
                    sec = myim.imArray[iy, ix - 5:ix + 6]
                    a[0] = np.max(sec) - np.min(sec)
                    a[1] = 0
                    a[2] = 2
                    #res = gaussfit (sec, xxx, a, nterms=4)
                    res = gaussfit1d(xxx, sec, a)
                    #print res
                else:
                    ix = int(round(X[i], 0))
                    iy = int(round(Y[i], 0))
                    sec = myim.imArray[iy - 5:iy + 6, ix]

                    #res = gaussfit (sec, xxx, a, nterms=4)
                    #res =  gauss_lsq(xxx, sec)
                    a[0] = np.max(sec) - np.min(sec)
                    a[1] = 0
                    a[2] = 2
                    res = gaussfit1d(xxx, sec, a)
                    #print res
        er = 1. / dspcc
        Z = np.zeros(NNN, dtype=np.float32)
        parinfo[:].value = [P]

        self.calPeaks.emit()
コード例 #36
0
        axi = fig.add_subplot(ny, nx, totalcount + 1)
        axi.set_xticks([])
        axi.set_yticks([])

        #plot grayscale galaxy image
        data = bb[camera].data[fili[i], 350:450, 350:450] * (lams[i]**2)
        print fils[fili[i]], np.max(data), 0.01 * np.max(data), sigma_pix[i]
        cdata = np.zeros_like(data)

        resc = sp.ndimage.filters.gaussian_filter(data * 1.0,
                                                  sigma_pix[i],
                                                  output=cdata)
        print fils[fili[i]], np.max(cdata), 0.01 * np.max(
            cdata), sigma_pix[i], np.sum(data) / np.sum(cdata)

        cdata = congrid.congrid(cdata, (40, 40))
        print fils[fili[i]], np.max(cdata), 0.01 * np.max(
            cdata), sigma_pix[i], np.sum(data) / np.sum(cdata)

        norm = ImageNormalize(stretch=LogStretch(),
                              vmin=0.01,
                              vmax=0.35,
                              clip=True)
        axi.imshow(cdata * np.max(data) / np.max(cdata),
                   origin='lower',
                   cmap='Greys',
                   norm=norm,
                   interpolation='nearest')
        #axi.annotate('{:3.2f}$\mu m$'.format(image_hdu.header['EFLAMBDA']),xy=(0.05,0.05),xycoords='axes fraction',color='white',ha='left',va='center',size=6)

        totalcount = totalcount + 1
コード例 #37
0
ファイル: sebi-cf.py プロジェクト: jrgcolin/sebi-cf
def process(fproject):
    """Backbone: Tasks are processed the following order:
    - Get time
    - Setting common constants
    - Instantiate a new project
    - Load images
    - Try load optional images, else use surrogate
    - Crop images
    - Generate atmospheric forcing layers
    - Input diagnostic
    - kB-1 sequence
    - Radiative budget sequence
    - Downscaling
    - Stability sequence
    - Low resolution at-limit parameters
    - Upscaling
    - External resistances and gradients
    - Saving to files
    - Get time and save logs
    
    """
    # Dedicated logger
    process_logger = logging.getLogger("SEBI-CF.Process")

    #    try:
    # Get time
    time0 = time.time()
    # Setting common constants
    cd = 0.2
    ct = 0.01
    cp = 1005.
    es0 = 610.7  #Pa
    g = 9.81
    hs = 0.009
    k = 0.4
    p0 = 101325.
    pdtl = 0.71
    gamma = 67.
    rd = 287.04
    rv = 461.05
    sigma = 5.678E-8

    # Instantiate a new project
    myproj = Project.project(fproject)
    process_logger.info("Instantiate a new project")

    #    except Exception, err:
    #        sys.stderr.write('ERROR: %s\n' % str(err))

    #try:
    # Calculate scales
    myproj.setGrids()

    # Load images
    widgets = [
        ' Loading surface data:    ',
        progressBar.Percentage(), ' ',
        progressBar.Bar(marker='-', left='[', right=']'), ' ', ' ', ' ', ' '
    ]
    pbar = progressBar.ProgressBar(widgets=widgets, maxval=8).start()

    albedo = myproj.read(myproj.falbedo)
    pbar.update(1)
    ts = myproj.read(myproj.flst)
    pbar.update(2)
    ndvi = myproj.read(myproj.fndvi)
    pbar.update(3)

    # Cleanup
    albedo, log = l.cleanup(albedo, "albedo")
    process_logger.info(log)
    ts, log = l.cleanup(ts, "ts")
    process_logger.info(log)
    ndvi, log = l.cleanup(ndvi, "ndvi")
    process_logger.info(log)

    # Try load optional images, else use surrogate
    if myproj.femissivity != 'undef':
        emi = myproj.read(myproj.femissivity)
        process_logger.info("Emissivity image found")
    else:
        emi = l.ndvi2emi(ndvi)

    pbar.update(4)

    if myproj.ffc != 'undef':
        fc = myproj.read(myproj.ffc)
        process_logger.info("Fc image found")
    else:
        fc = l.ndvi2fc(ndvi)

    pbar.update(5)

    if myproj.fhv != 'undef':
        hv = myproj.read(myproj.fhv)
        process_logger.info("Hv image found")
        hv = l.substitute(hv, 0., 0.01)
        z0m = l.hv2z0m(hv)
    else:
        z0m = l.ndvi2z0m(ndvi)
        z0m = l.substitute(z0m, 0., 0.000001)
        hv = l.z0m2hv(z0m)
        hv = l.substitute(hv, 0., 0.01)
    pbar.update(6)

    if myproj.flai != 'undef':
        lai = myproj.read(myproj.flai)
        process_logger.info("LAI image found")
    else:
        lai = l.ndvi2lai(ndvi)
    pbar.update(7)

    if myproj.mask != 'undef':
        mask = myproj.read(myproj.mask)
        process_logger.info("Mask image found")
    else:
        mask = ndvi - ndvi

    if myproj.RnDaily != 'undef':
        RnDaily = myproj.read(myproj.RnDaily)
        process_logger.info("RnDaily image found")
    else:
        mask = ndvi - ndvi

    pbar.update(8)

    pbar.finish()

    # Crop images
    albedo = albedo[0:myproj.imgDims[0], 0:myproj.imgDims[1]]
    ts = ts[0:myproj.imgDims[0], 0:myproj.imgDims[1]]
    ndvi = ndvi[0:myproj.imgDims[0], 0:myproj.imgDims[1]]
    emi = emi[0:myproj.imgDims[0], 0:myproj.imgDims[1]]
    fc = fc[0:myproj.imgDims[0], 0:myproj.imgDims[1]]
    hv = hv[0:myproj.imgDims[0], 0:myproj.imgDims[1]]
    z0m = z0m[0:myproj.imgDims[0], 0:myproj.imgDims[1]]
    lai = lai[0:myproj.imgDims[0], 0:myproj.imgDims[1]]
    mask = mask[0:myproj.imgDims[0], 0:myproj.imgDims[1]]
    if myproj.RnDaily != 'undef':
        RnDaily = RnDaily[0:myproj.imgDims[0], 0:myproj.imgDims[1]]

    # Generate atmospheric forcing layers
    widgets = [
        ' Loading PBL data:        ',
        progressBar.Percentage(), ' ',
        progressBar.Bar(marker='-', left='[', right=']'), ' ', ' ', ' ', ' '
    ]
    pbar = progressBar.ProgressBar(widgets=widgets, maxval=10).start()

    if myproj.atmmode == '1D':
        hg = n.zeros(myproj.imgDims, dtype=float) + myproj.hg
        pbar.update(1)
        hr = n.zeros(myproj.imgDims, dtype=float) + myproj.hr
        pbar.update(2)
        lwdw = n.zeros(myproj.imgDims, dtype=float) + myproj.lwdw
        pbar.update(3)
        pg = n.zeros(myproj.imgDims, dtype=float) + myproj.pg
        pbar.update(4)
        pr = n.zeros(myproj.imgDims, dtype=float) + myproj.pr
        pbar.update(5)
        qg = n.zeros(myproj.imgDims, dtype=float) + myproj.qg
        pbar.update(6)
        qr = n.zeros(myproj.imgDims, dtype=float) + myproj.qr
        pbar.update(7)
        swdw = n.zeros(myproj.imgDims, dtype=float) + myproj.swdw
        pbar.update(8)
        tr = n.zeros(myproj.imgDims, dtype=float) + myproj.tr
        pbar.update(9)
        ur = n.zeros(myproj.imgDims, dtype=float) + myproj.ur
        pbar.update(10)
    if myproj.atmmode == '2D':
        hg = myproj.read(myproj.fhg)
        pbar.update(1)
        hr = myproj.read(myproj.fhr)
        pbar.update(2)
        lwdw = myproj.read(myproj.flwdw)
        pbar.update(3)
        pg = myproj.read(myproj.fpg)
        pbar.update(4)
        pr = myproj.read(myproj.fpr)
        pbar.update(5)
        qg = myproj.read(myproj.fqg)
        pbar.update(6)
        qr = myproj.read(myproj.fqr)
        pbar.update(7)
        swdw = myproj.read(myproj.fswdw)
        pbar.update(8)
        tr = myproj.read(myproj.ftr)
        pbar.update(9)
        ur = myproj.read(myproj.fur)
        pbar.update(10)

        # Additional cleanup
        swdw, log = l.cleanup(swdw, "swdw")
        process_logger.info(log)
        lwdw, log = l.cleanup(lwdw, "lwdw")
        process_logger.info(log)
        if myproj.RnDaily != 'undef':
            RnDaily, log = l.cleanup(lwdw, "RnDaily")
            process_logger.info(log)

        # Crop images
        hg = hg[0:myproj.imgDims[0], 0:myproj.imgDims[1]]
        hr = hr[0:myproj.imgDims[0], 0:myproj.imgDims[1]]
        lwdw = lwdw[0:myproj.imgDims[0], 0:myproj.imgDims[1]]
        pg = pg[0:myproj.imgDims[0], 0:myproj.imgDims[1]]
        pr = pr[0:myproj.imgDims[0], 0:myproj.imgDims[1]]
        qg = qg[0:myproj.imgDims[0], 0:myproj.imgDims[1]]
        qr = qr[0:myproj.imgDims[0], 0:myproj.imgDims[1]]
        swdw = swdw[0:myproj.imgDims[0], 0:myproj.imgDims[1]]
        tr = tr[0:myproj.imgDims[0], 0:myproj.imgDims[1]]
        ur = ur[0:myproj.imgDims[0], 0:myproj.imgDims[1]]

        if myproj.pressureUnit == "hPa":
            pg = pg * 100.
            pr = pr * 100.
        if myproj.pressureLevel == "SL":
            pg = l.ps_sea2gnd(pg, hg)

        #TMP TMP TMP
        #search = n.where(mask == 5.5)
        #ur[search] = ur[search]*1.2

    pbar.finish()
    # Apply mask
    if myproj.mask != 'undef':
        search = n.where(mask == 0)

        albedo[search] = n.nan
        ts[search] = n.nan
        ndvi[search] = n.nan
        emi[search] = n.nan
        fc[search] = n.nan
        hv[search] = n.nan
        z0m[search] = n.nan
        lai[search] = n.nan
        hg[search] = n.nan
        hr[search] = n.nan
        lwdw[search] = n.nan
        pg[search] = n.nan
        pr[search] = n.nan
        qg[search] = n.nan
        qr[search] = n.nan
        swdw[search] = n.nan
        tr[search] = n.nan
        ur[search] = n.nan
        if myproj.RnDaily != 'undef':
            RnDaily[search] = n.nan

#    # Input diagnostic
    widgets = [
        ' Input diagnostic:        ',
        progressBar.Percentage(), ' ',
        progressBar.Bar(marker='-', left='[', right=']'), ' ', ' ', ' ', ' '
    ]
    pbar = progressBar.ProgressBar(widgets=widgets, maxval=18).start()
    l.getStats(albedo, 'albedo')
    pbar.update(1)
    l.getStats(ts, 'ts')
    pbar.update(2)
    l.getStats(ndvi, 'ndvi')
    pbar.update(3)
    l.getStats(emi, 'emi')
    pbar.update(4)
    l.getStats(fc, 'fc')
    pbar.update(5)
    l.getStats(hv, 'hv')
    pbar.update(6)
    l.getStats(z0m, 'z0m')
    pbar.update(7)
    l.getStats(lai, 'lai')
    pbar.update(8)
    l.getStats(hg, 'hg')
    pbar.update(9)
    l.getStats(hr, 'hr')
    pbar.update(10)
    l.getStats(lwdw, 'lwdw')
    pbar.update(11)
    l.getStats(pg, 'pg')
    pbar.update(12)
    l.getStats(pr, 'pr')
    pbar.update(13)
    l.getStats(qg, 'qg')
    pbar.update(14)
    l.getStats(qr, 'qr')
    pbar.update(15)
    l.getStats(swdw, 'swdw')
    pbar.update(16)
    l.getStats(tr, 'tr')
    pbar.update(17)
    l.getStats(ur, 'ur')
    pbar.update(18)

    pbar.finish()

    # kB-1 sequence
    widgets = [
        ' Running kB-1 model:      ',
        progressBar.Percentage(), ' ',
        progressBar.Bar(marker='-', left='[', right=']'), ' ', ' ', ' ', ' '
    ]
    pbar = progressBar.ProgressBar(widgets=widgets, maxval=100).start()
    if myproj.kbMode == "Massman":
        process_logger.info("Launching kB-1 model")
        kB_1, z0h = l.kB(cd, ct, fc, k, hg, hr, hs, hv, lai, ndvi, p0, pr, tr,
                         ur, z0m)

    else:
        kB_1 = n.zeros(myproj.imgDims, dtype=float) + 4.
        z0h = z0m / n.exp(kB_1)

    l.getStats(kB_1, 'kB_1')
    l.getStats(z0h, 'z0h')

    pbar.update(100)
    pbar.finish()

    # Radiative budget
    widgets = [
        ' Radiative budget:        ',
        progressBar.Percentage(), ' ',
        progressBar.Bar(marker='-', left='[', right=']'), ' ', ' ', ' ', ' '
    ]
    pbar = progressBar.ProgressBar(widgets=widgets, maxval=3).start()
    process_logger.info("Launching Radiative budget")
    myproj.logs += '\n\nRadiative budget:'

    Rn = l.Rn(albedo, emi, lwdw, sigma, swdw, ts)
    l.getStats(Rn, 'Rn')
    pbar.update(1)

    G0 = l.G0(fc, Rn)
    l.getStats(G0, 'G0')
    pbar.update(2)

    G0_Rn = G0 / Rn
    l.getStats(G0_Rn, 'G0_Rn')

    G0_Rn, log = l.cleanup(G0_Rn, "G0_Rn")
    myproj.logs += log

    pbar.update(3)

    pbar.finish()

    # Downscaling
    widgets = [
        ' Downscaling:             ',
        progressBar.Percentage(), ' ',
        progressBar.Bar(marker='-', left='[', right=']'), ' ', ' ', ' ', ' '
    ]
    pbar = progressBar.ProgressBar(widgets=widgets, maxval=12).start()
    process_logger.info("Launching Downscaling")

    low_z0m = l.downscaling(z0m, myproj)
    l.getStats(low_z0m, 'low_z0m')
    pbar.update(1)

    low_z0h = l.downscaling(z0h, myproj)
    l.getStats(low_z0h, 'low_z0h')
    pbar.update(2)

    low_ts = l.downscaling(ts, myproj)
    l.getStats(low_ts, 'low_ts')
    pbar.update(3)

    low_Rn = l.downscaling(Rn, myproj)
    l.getStats(low_Rn, 'low_Rn')
    pbar.update(4)

    low_G0 = l.downscaling(G0, myproj)
    l.getStats(low_G0, 'low_G0')
    pbar.update(5)

    low_ur = l.downscaling(ur, myproj)
    l.getStats(low_ur, 'low_ur')
    pbar.update(6)

    low_hr = l.downscaling(hr, myproj)
    l.getStats(low_hr, 'low_hr')
    pbar.update(7)

    low_pr = l.downscaling(pr, myproj)
    l.getStats(low_pr, 'low_pr')
    pbar.update(8)

    low_pg = l.downscaling(pg, myproj)
    l.getStats(low_pg, 'low_pg')
    pbar.update(9)

    low_qr = l.downscaling(qr, myproj)
    l.getStats(low_qr, 'low_qr')
    pbar.update(10)

    low_qg = l.downscaling(qg, myproj)
    l.getStats(low_qg, 'low_qg')
    pbar.update(11)

    low_tr = l.downscaling(tr, myproj)
    l.getStats(low_tr, 'low_tr')
    pbar.update(12)

    pbar.finish()

    # Stability sequence
    widgets = [
        ' Stability sequence:      ',
        progressBar.Percentage(), ' ',
        progressBar.Bar(marker='-', left='[', right=']'), ' ', ' ', ' ', ' '
    ]
    pbar = progressBar.ProgressBar(widgets=widgets,
                                   maxval=myproj.gridNb[0] *
                                   myproj.gridNb[1]).start()

    process_logger.info("Launching Stability sequence")

    low_d0 = l.z0m2d0(low_z0m)
    l.getStats(low_d0, 'low_d0')
    ustar_i = l.u2ustar(low_d0, low_hr, k, low_ur, low_z0m)
    l.getStats(ustar_i, 'ustar_i')
    ra_i = l.ra(low_d0, low_hr, low_z0h)
    l.getStats(ra_i, 'ra_i')
    low_delta_a = l.tpot(cp,low_pg,p0,low_qg,rd,low_ts) - \
                    l.tpot(cp,low_pr,p0,low_qr,rd,low_tr)
    l.getStats(low_delta_a, 'low_delta_a')
    low_es = l.esat(es0, low_ts)
    l.getStats(low_es, 'low_es')
    low_e = l.eact(low_pg, low_qg, rd, rv)
    l.getStats(low_e, 'low_e')
    low_rho = l.rho(low_e,low_pg,low_qg,\
                    rd,low_ts)
    l.getStats(low_rho, 'low_rho')
    H_i = l.H(cp, low_delta_a, k, ra_i, low_rho, ustar_i)
    l.getStats(H_i, 'H_i')
    delta = l.delta(low_es, low_ts)
    l.getStats(delta, 'delta')
    Le_i = (delta * rd * (low_ts)**2) / (0.622 * low_es)
    l.getStats(Le_i, 'Le_i')
    L_i = (-ustar_i**3 * low_rho) / (k * g * 0.61 * (low_Rn - low_G0) / Le_i)
    l.getStats(L_i, 'L_i')
    #pbar.update(1)

    # Modif >><<

    H_ic = low_delta_a * k * low_rho * cp
    L_ic = -low_rho * cp * (low_ts * (1 + 0.61 * low_qr)) / (k * g)
    ustar_i = k * low_ur / n.log((low_hr - low_d0) / low_z0m)
    H_i = H_ic * ustar_i / n.log((low_hr - low_d0) / low_z0h)
    H_target = H_i
    L_i = L_i - L_i

    # >><<

    # Starting the iterative sequence
    vars = n.zeros([11, 1], dtype=float)
    # Variables for output
    slvUstar = n.zeros([myproj.gridNb[0], myproj.gridNb[1]], dtype=float)
    slvH = n.zeros([myproj.gridNb[0], myproj.gridNb[1]], dtype=float)
    slvL = n.zeros([myproj.gridNb[0], myproj.gridNb[1]], dtype=float)
    iterator = n.zeros([myproj.gridNb[0], myproj.gridNb[1]], dtype=float)

    if myproj.iterate == "True":
        for i in n.arange(0, myproj.gridNb[0]):
            for j in n.arange(0, myproj.gridNb[1]):

                stabDif = 10.
                stabLoops = 0

                while stabDif > 0.01 and stabLoops < 100:
                    L_i[i, j] = L_ic[i, j] * (ustar_i[i, j]**3) / H_i[i, j]
                    ustar_i[i, j] = k * low_ur[i, j] / (n.log(
                        (low_hr[i, j] - low_d0[i, j]) / low_z0m[i, j]) - l.Bw(
                            low_hr[i, j], L_i[i, j], low_z0h[i, j],
                            low_z0m[i, j]))
                    H_i[i, j] = H_ic[i, j] * ustar_i[i, j] / (
                        n.log((low_hr[i, j] - low_d0[i, j]) / low_z0h[i, j]) -
                        (-7.6 * n.log(low_hr[i, j] / L_i[i, j])))
                    stabDif = n.abs(H_target[i, j] - H_i[i, j])
                    H_target[i, j] = H_i[i, j]
                    stabLoops += 1

                slvUstar[i, j] = ustar_i[i, j]
                slvH[i, j] = H_i[i, j]
                slvL[i, j] = L_i[i, j]
                iterator[i, j] = stabLoops

                ## Grid stability functions
                #Cw = -7.6*n.log(low_hr[i,j]/L_i[i,j])
                #ra = n.log((low_hr[i,j]-low_d0[i,j])/low_z0h[i,j])
                #Bw = l.Bw(low_hr[i,j],L_i[i,j],low_z0h[i,j],low_z0m[i,j])
                ## Prepare the file to provide to l.stabFunc
                #vars[0] = low_ur[i,j]
                #vars[1] = low_hr[i,j]
                #vars[2] = low_d0[i,j]
                #vars[3] = low_z0m[i,j]
                #vars[4] = Bw
                #vars[5] = low_delta_a[i,j]
                #vars[6] = low_rho[i,j]
                #vars[7] = ra
                #vars[8] = l.tpot(cp,low_pg[i,j],p0,low_qg[i,j],rd,0.5*(low_ts[i,j]+low_tr[i,j]))
                #vars[9] = Le_i[i,j]
                #vars[10] = low_Rn[i,j] - low_G0[i,j]
                #vars.tofile('tmp000')
                #
                ##slvUstar[i,j],slvH[i,j],slvL[i,j] = fsolve(l.stabFunc,[ustar_i[i,j],H_i[i,j],L_i[i,j]],warning=False)
                #try:
                #    slvUstar[i,j],slvH[i,j],slvL[i,j] = broyden2(\
                #            l.stabFunc,[ustar_i[i,j],H_i[i,j],L_i[i,j]],iter=40,verbose=False)
                #except(OverflowError):
                #    slvUstar[i,j] = ustar_i[i,j]
                #    slvH[i,j] = H_i[i,j]
                #    slvL[i,j] = L_i[i,j]

                pbar.update(myproj.gridNb[1] * i + j)

        # add some stats
        l.getStats(slvUstar, 'slvUstar')
        l.getStats(slvH, 'slvH')
        l.getStats(slvL, 'slvL')
        l.getStats(iterator, 'iterator')
        pbar.finish()

    else:
        # 2010-02-05: TMP TMP TMP TMP TMP TMP TMP TMP TMP TMP TMP TMP TMP TMP TMP TMP TMP TMP TMP TMP TMP TMP
        slvUstar = ustar_i
        slvH = H_i
        slvL = L_i

        # TMP TMP TMP TMP TMP TMP TMP TMP TMP TMP TMP TMP TMP TMP TMP TMP TMP TMP TMP TMP TMP TMP TMP TMP TMP

    # Low resolution at-limit parameters
    low_Ld = l.L(cp,
                 low_delta_a,
                 g,
                 k,
                 Le_i,
                 low_rho,
                 low_Rn,
                 low_G0,
                 slvUstar,
                 state='dry')
    low_Lw = l.L(cp,
                 low_delta_a,
                 g,
                 k,
                 Le_i,
                 low_rho,
                 low_Rn,
                 low_G0,
                 slvUstar,
                 state='wet')
    low_Cwd = l.Cw(low_hr, low_Ld, low_z0h, low_z0m)
    low_Cww = l.Cw(low_hr, low_Lw, low_z0h, low_z0m)
    low_Cw = l.Cw(low_hr, slvL, low_z0h, low_z0m)
    l.getStats(low_Ld, 'low_Ld')
    l.getStats(low_Lw, 'low_Lw')
    l.getStats(low_Cwd, 'low_Cwd')
    l.getStats(low_Cww, 'low_Cww')

    # Upscaling
    widgets = [
        ' Upscaling:               ',
        progressBar.Percentage(), ' ',
        progressBar.Bar(marker='-', left='[', right=']'), ' ', ' ', ' ', ' '
    ]
    pbar = progressBar.ProgressBar(widgets=widgets, maxval=6).start()
    process_logger.info("Launching Upscaling")

    # TMP TMP TMP TMP TMP TMP TMP TMP TMP TMP TMP TMP TMP TMP TMP TMP TMP TMP TMP TMP TMP TMP TMP TMP TMP
    low_rho = l.nan2flt(low_rho, n.nansum(low_rho) / low_rho.size)
    slvUstar = l.nan2flt(slvUstar, n.nansum(slvUstar) / slvUstar.size)
    low_Cwd = l.nan2flt(low_Cwd, n.nansum(low_Cwd) / low_Cwd.size)
    low_Cww = l.nan2flt(low_Cww, n.nansum(low_Cww) / low_Cww.size)
    low_Cw = l.nan2flt(low_Cw, n.nansum(low_Cw) / low_Cw.size)
    slvL = l.nan2flt(slvL, n.nansum(slvL) / slvL.size)
    # TMP TMP TMP TMP TMP TMP TMP TMP TMP TMP TMP TMP TMP TMP TMP TMP TMP TMP TMP TMP TMP TMP TMP TMP TMP

    rho = congrid.congrid(low_rho, [myproj.imgDims[0], myproj.imgDims[1]],
                          method='spline',
                          minusone=True)
    pbar.update(1)
    ustar = congrid.congrid(slvUstar, [myproj.imgDims[0], myproj.imgDims[1]],
                            method='spline',
                            minusone=True)
    pbar.update(2)
    Cwd = congrid.congrid(low_Cwd, [myproj.imgDims[0], myproj.imgDims[1]],
                          method='spline',
                          minusone=True)
    pbar.update(3)
    Cww = congrid.congrid(low_Cww, [myproj.imgDims[0], myproj.imgDims[1]],
                          method='spline',
                          minusone=True)
    pbar.update(4)
    Cw = congrid.congrid(low_Cw, [myproj.imgDims[0], myproj.imgDims[1]],
                         method='spline',
                         minusone=True)
    pbar.update(5)
    L = congrid.congrid(slvL, [myproj.imgDims[0], myproj.imgDims[1]],
                        method='spline',
                        minusone=True)
    pbar.update(6)

    pbar.finish()

    # External resistances and gradients
    widgets = [
        ' Processing SEBI:         ',
        progressBar.Percentage(), ' ',
        progressBar.Bar(marker='-', left='[', right=']'), ' ', ' ', ' ', ' '
    ]
    pbar = progressBar.ProgressBar(widgets=widgets, maxval=16).start()

    process_logger.info("Launching external resistances and gradients")

    d0 = l.z0m2d0(z0m)
    es = l.esat(es0, 0.5 * (ts + tr))
    e = l.eact(0.5 * (pg + pr), 0.5 * (qg + qr), rd, rv)
    delta = l.delta(es, 0.5 * (ts + tr))
    pbar.update(1)

    # dry limit
    red = l.re(Cwd, d0, hr, k, ustar, z0h)
    deltad = l.deltad(cp, red, rho, Rn, G0)
    l.getStats(Cwd, 'Cwd')
    l.getStats(red, 'red')
    l.getStats(deltad, 'deltad')
    pbar.update(2)

    # wet limit
    rew = l.re(Cww, d0, hr, k, ustar, z0h)
    deltaw = l.deltaw(cp, delta, e, es, gamma, rew, rho, Rn, G0)
    l.getStats(Cww, 'Cww')
    l.getStats(rew, 'rew')
    l.getStats(deltaw, 'deltaw')
    pbar.update(3)

    # actual conditions
    Cw = l.Cw(hr, L, z0h, z0m)
    pbar.update(4)

    re = l.re(Cw, d0, hr, k, ustar, z0h)
    pbar.update(5)

    deltaa = l.tpot(cp, pg, p0, qg, rd, ts) - l.tpot(cp, pr, p0, qr, rd, tr)
    pbar.update(6)

    SEBI = (deltaa / re - deltaw / rew) / (deltad / red - deltaw / rew)
    pbar.update(7)

    ef = 1 - SEBI
    pbar.update(8)

    search = n.where(ef > 1.)
    ef[search] = 1.
    pbar.update(9)

    LE = (Rn - G0) * ef
    H = Rn - LE - G0
    pbar.update(10)

    # relative evap (alternate)
    Hd = Rn - G0
    Hw = ((Rn - G0) - (rho * cp / rew) * es / gamma) / (1.0 + delta / gamma)
    Ha = rho * cp * deltaa / re
    pbar.update(11)

    search = n.where(Ha > Hd)
    Ha[search] = Hd[search]
    pbar.update(12)

    search = n.where(Ha < Hw)
    Ha[search] = Hw[search]
    pbar.update(13)

    Le_re = 1. - ((Ha - Hw) / (Hd - Hw))

    search = n.where(Hd <= Hw)
    Le_re[search] = 1.
    pbar.update(14)

    Le_fr = Le_re * (Rn - G0 - Hw) / (Rn - G0)

    search = n.where((Rn - G0) <= 0)
    Le_fr[search] = 1.
    pbar.update(15)

    if myproj.RnDaily != 'undef':
        ETdaily = Le_fr * (RnDaily * (1 - G0_Rn)) * 24 * 3600 / 2.45E6

    pbar.update(16)
    pbar.finish()

    widgets = [
        ' Output statistics        ',
        progressBar.Percentage(), ' ',
        progressBar.Bar(marker='-', left='[', right=']'), ' ', ' ', ' ', ' '
    ]
    pbar = progressBar.ProgressBar(widgets=widgets, maxval=12).start()

    l.getStats(L, 'L')
    pbar.update(1)
    l.getStats(Cw, 'Cw')
    pbar.update(2)
    l.getStats(re, 're')
    pbar.update(3)
    l.getStats(deltaa, 'deltaa')
    pbar.update(4)
    l.getStats(SEBI, 'SEBI')
    pbar.update(5)
    l.getStats(ef, 'ef')
    pbar.update(6)
    l.getStats(Ha, 'Ha')
    pbar.update(7)
    l.getStats(Hd, 'Hd')
    pbar.update(8)
    l.getStats(Hw, 'Hw')
    pbar.update(9)
    l.getStats(Le_re, 'Le_re')
    pbar.update(10)
    l.getStats(Le_fr, 'Le_fr')
    pbar.update(11)

    # Stats for daily ET only if variable exist (may not be calculated if no daily Rn provided)
    try:
        l.getStats(ETdaily, 'ETdaily')
    except UnboundLocalError:
        pass

    pbar.update(12)
    pbar.finish()

    # Saving to files
    widgets = [
        ' Saving to files:         ',
        progressBar.Percentage(), ' ',
        progressBar.Bar(marker='-', left='[', right=']'), ' ', ' ', ' ', ' '
    ]
    pbar = progressBar.ProgressBar(widgets=widgets, maxval=8).start()

    # Data layers:
    #ascIO.ascWritter(myproj.path+myproj.prefix+'ef',
    #                 ef,myproj.imgDims[1],myproj.imgDims[0],xllcorner=myproj.xllcorner,yllcorner=myproj.yllcorner,cellsize=myproj.cellsize,NODATA_value='nan')
    myproj.writeRaw(ef, 'ef')
    pbar.update(1)

    #ascIO.ascWritter(myproj.path+myproj.prefix+'kb',
    #                 kB_1,myproj.imgDims[1],myproj.imgDims[0],xllcorner=myproj.xllcorner,yllcorner=myproj.yllcorner,cellsize=myproj.cellsize,NODATA_value='nan')
    myproj.writeRaw(kB_1, 'kb')
    pbar.update(2)

    #ascIO.ascWritter(myproj.path+myproj.prefix+'LE',
    #                 LE,myproj.imgDims[1],myproj.imgDims[0],xllcorner=myproj.xllcorner,yllcorner=myproj.yllcorner,cellsize=myproj.cellsize,NODATA_value='nan')
    myproj.writeRaw(LE, 'LE')
    pbar.update(3)

    #ascIO.ascWritter(myproj.path+myproj.prefix+'H',
    #                 H,myproj.imgDims[1],myproj.imgDims[0],xllcorner=myproj.xllcorner,yllcorner=myproj.yllcorner,cellsize=myproj.cellsize,NODATA_value='nan')
    myproj.writeRaw(H, 'H')
    pbar.update(4)

    #ascIO.ascWritter(myproj.path+myproj.prefix+'Rn',
    #                 Rn,myproj.imgDims[1],myproj.imgDims[0],xllcorner=myproj.xllcorner,yllcorner=myproj.yllcorner,cellsize=myproj.cellsize,NODATA_value='nan')
    myproj.writeRaw(Rn, 'Rn')
    pbar.update(5)

    #ascIO.ascWritter(myproj.path+myproj.prefix+'G0',
    #                 G0,myproj.imgDims[1],myproj.imgDims[0],xllcorner=myproj.xllcorner,yllcorner=myproj.yllcorner,cellsize=myproj.cellsize,NODATA_value='nan')
    myproj.writeRaw(G0, 'G0')
    pbar.update(6)

    myproj.writeRaw(Le_fr, 'Le_fr')
    pbar.update(7)

    if myproj.RnDaily != 'undef':
        myproj.writeRaw(ETdaily, 'ETdaily')

    pbar.update(8)

    pbar.finish()

    print 'Done...'

    #except Exception, err:
    #log.exception('Error from process():')
    #myproj.logs += log.exception('Error from process():')
    #process_logger.info("ERROR: process aborted")

    #finally:
    print "Check sebi-cf.log for more information."
コード例 #38
0
def main():

    #nnn = 512
    #boxsize = 4.0
    #dsx = boxsize/nnn
    #xi1 = np.linspace(-boxsize/2.0,boxsize/2.0-dsx,nnn)+0.5*dsx
    #xi2 = np.linspace(-boxsize/2.0,boxsize/2.0-dsx,nnn)+0.5*dsx
    #xi1,xi2 = np.meshgrid(xi1,xi2)


    #cc = find_critical_curve(mu)
    import sys
    filename = sys.argv[1]

    #sdens = pyfits.getdata("/Users/uranus/Desktop/hlsp_frontier_model_abell2744_cats_v1_kappa.fits")
    sdens = pyfits.getdata(filename)
    kappa0 = np.array(sdens,dtype='<d')
    kappa=congrid.congrid(kappa0,[768,768])
    nnn = np.shape(kappa)[0]
    boxsize = 4.0

    dsx = boxsize/nnn
    xi1 = np.linspace(-boxsize/2.0,boxsize/2.0-dsx,nnn)+0.5*dsx
    xi2 = np.linspace(-boxsize/2.0,boxsize/2.0-dsx,nnn)+0.5*dsx
    xi1,xi2 = np.meshgrid(xi1,xi2)

    pygame.init()
    FPS = 30
    fpsClock = pygame.time.Clock()

    screen = pygame.display.set_mode((nnn, nnn), 0, 32)

    pygame.display.set_caption("Gravitational Lensing Toy")

    mouse_cursor = pygame.Surface((nnn,nnn))

    #----------------------------------------------------

    base0 = np.zeros((nnn,nnn,3),'uint8')
    base1 = np.zeros((nnn,nnn,3),'uint8')
    base2 = np.zeros((nnn,nnn,3),'uint8')
    base3 = np.zeros((nnn,nnn,3),'uint8')
    base4 = np.zeros((nnn,nnn,3),'uint8')

    #----------------------------------------------------
    # lens parameters for main halo
    xlc1 = 0.0
    xlc2 = 0.0
    ql0 = 0.699999999999
    rc0 = 0.000000000001
    re0 = 1.0
    phi0 = 30.0
    lpar = np.asarray([xlc1, xlc2, re0, rc0, ql0, phi0])

    lpars_list = []
    lpars_list.append(lpar)
    #----------------------------------------------------
    # lens parameters for main halo
    xls1 = 0.7
    xls2 = 0.8
    qls = 0.999999999999
    rcs = 0.000000000001
    res = 0.0
    phis = 0.0
    lpars = np.asarray([xls1, xls2, res, rcs, qls, phis])
    lpars_list.append(lpars)

    ap0 = 1.0
    l_sig0 = 0.5
    glpar  = np.asarray([ap0,l_sig0,xlc1,xlc2,ql0,phi0])

    g_lens = lens_galaxies(xi1,xi2,glpar)

    base0[:,:,0] = g_lens*256
    base0[:,:,1] = g_lens*128
    base0[:,:,2] = g_lens*0
    x = 0
    y = 0
    gr_sig = dsx*2

    LeftButton=0

    #----------------------------------------------------

    ic = FPS/4.0

    i = 0

    zl = 0.1
    zs = 1.0
    p_mass = 1.0
    #----------------------------------------------------
    phi,phi1,phi2,td = lf.call_all_about_lensing(kappa,nnn,zl,zs,p_mass,dsx)

    phi2,phi1 = np.gradient(phi,dsx)

    phi12,phi11 = np.gradient(phi1,dsx)
    phi22,phi21 = np.gradient(phi2,dsx)
    #kappac = 0.5*(phi11+phi22)
    mu = 1.0/(1.0-(phi11+phi22)+phi11*phi22-phi12*phi21)
    critical = lf.call_find_critical_curve(mu)

    yi1 = xi1-phi1
    yi2 = xi2-phi2

    idx = critical > 0.0
    yif1 = yi1[idx]
    yif2 = yi2[idx]
    caustic = lf.call_find_caustic(yif1,yif2,nnn,nnn,boxsize)

    while True:
        i = i+1
        for event in pygame.event.get():
            if event.type == QUIT:
                exit()
            if event.type == MOUSEMOTION:

                if event.buttons[LeftButton]:
                    rel = event.rel
                    x += rel[0]
                    y += rel[1]

            #----------------------------------------------
            if event.type == pygame.MOUSEBUTTONDOWN:
                if event.button == 4:
                    gr_sig -= 0.1*dsx
                    if gr_sig <0.01:
                        gr_sig = 0.01

                elif event.button == 5:
                    gr_sig += 0.01*dsx
                    if gr_sig >0.4:
                        gr_sig = 0.4

        #----------------------------------------------
        #parameters of source galaxies.
        #----------------------------------------------
        g_amp = 1.0         # peak brightness value
        g_sig = gr_sig          # Gaussian "sigma" (i.e., size)
        g_xcen = x*2.0/nnn  # x position of center
        g_ycen = y*2.0/nnn  # y position of center
        g_axrat = 1.0       # minor-to-major axis ratio
        g_pa = 0.0          # major-axis position angle (degrees) c.c.w. from y axis
        gpar = np.asarray([g_amp, g_sig, g_ycen, g_xcen, g_axrat, g_pa])
        #----------------------------------------------
        g_image,g_lensimage = lensed_images(xi1,xi2,yi1,yi2,gpar)

        base1[:,:,0] = g_image*256
        base1[:,:,1] = g_image*256
        base1[:,:,2] = g_image*256

        ##sktd = (td-td.min())/(td.max()-td.min())*ic/2
        #sktd = (td)/(1.5)*ic/2
        #itmp = (i+FPS/8-sktd)%(FPS)
        #ratio = (ic-itmp)*itmp/(ic/2.0)**2.0
        #ratio[ratio<0]=0.0

        base2[:,:,0] = g_lensimage*102#*(1.0+ratio)/2
        base2[:,:,1] = g_lensimage*178#*(1.0+ratio)/2
        base2[:,:,2] = g_lensimage*256#*(1.0+ratio)/2

        base3[:,:,0] = critical*255
        base3[:,:,1] = critical*0
        base3[:,:,2] = critical*0

        base4[:,:,0] = caustic.T*0
        base4[:,:,1] = caustic.T*255
        base4[:,:,2] = caustic.T*0

        base = base1+base2+base3+base4

        #idx1 = wf>=base0
        #idx2 = wf<base0

        #base = base0*0
        #base[idx1] = wf[idx1]
        #base[idx2] = base0[idx2]


        #base = wf*base0+(base1+base2)
        pygame.surfarray.blit_array(mouse_cursor,base)


        screen.blit(mouse_cursor, (0, 0))

        #font=pygame.font.SysFont(None,30)
        #text = font.render("( "+str(x)+", "+str(-y)+" )", True, (255, 255, 255))
        #screen.blit(text,(10, 10))
        pygame.display.update()
        fpsClock.tick(FPS)
コード例 #39
0
def single_run_test(ind, ysc1, ysc2, q, vd, pha, zl, zs):
    dsx_sdss = 0.396  # pixel size of SDSS detector.
    R = 3.0000  #
    #zl = 0.2     #zl is the redshift of the lens galaxy.
    #zs = 1.0
    #vd = 520    #Velocity Dispersion.
    nnn = 128  #Image dimension
    bsz = dsx_sdss * nnn  # arcsecs
    dsx = bsz / nnn  # pixel size of SDSS detector.
    nstd = 59  #^2

    xx01 = np.linspace(-bsz / 2.0, bsz / 2.0, nnn) + 0.5 * dsx
    xx02 = np.linspace(-bsz / 2.0, bsz / 2.0, nnn) + 0.5 * dsx
    xi2, xi1 = np.meshgrid(xx01, xx02)
    #----------------------------------------------------------------------
    #ysc1 = 0.2
    #ysc2 = 0.5
    dsi = 0.03
    g_source = pyfits.getdata("./439.0_149.482739_1.889989_processed.fits")
    g_source = np.array(g_source, dtype="<d") * 10.0
    g_source[g_source <= 0.0001] = 1e-6
    #print np.sum(g_source)
    #print np.max(g_source)
    #pl.figure()
    #pl.contourf(g_source)
    #pl.colorbar()
    #g_source = p2p.cosccd2mag(g_source)
    ##g_source = p2p.mag2sdssccd(g_source)
    ##print np.max(g_source*13*13*52.0)
    #pl.figure()
    #pl.contourf(g_source)
    #pl.colorbar()
    #----------------------------------------------------------------------
    xc1 = 0.0  #x coordinate of the center of lens (in units of Einstein radius).
    xc2 = 0.0  #y coordinate of the center of lens (in units of Einstein radius).
    #q   = 0.7       #Ellipticity of lens.
    rc = 0.0  #Core size of lens (in units of Einstein radius).
    re = re_sv(vd, zl, zs)  #Einstein radius of lens.
    #pha = 45.0      #Orintation of lens.
    lpar = np.asarray([xc1, xc2, q, rc, re, pha])
    #----------------------------------------------------------------------
    ai1, ai2, mua = lens_equation_sie(xi1, xi2, lpar)

    yi1 = xi1 - ai1
    yi2 = xi2 - ai2

    g_limage = lv4.call_ray_tracing(g_source, yi1, yi2, ysc1, ysc2, dsi)
    g_limage[g_limage <= 0.0001] = 1e-6
    g_limage = p2p.cosccd2mag(g_limage)
    g_limage = p2p.mag2sdssccd(g_limage)

    #pl.figure()
    #pl.imshow((g_limage),interpolation='lanczos',cmap=cm.gray)
    #pl.colorbar()

    #-------------------------------------------------------------
    # Need to be Caliborate the mags
    dA = Planck13.comoving_distance(zl).value * 1000. / (1 + zl)
    Re = dA * np.sin(R * np.pi / 180. / 3600.)
    counts = Brightness(Re, vd)
    vpar = np.asarray([counts, R, xc1, xc2, q, pha])
    #g_lens = deVaucouleurs(xi1,xi2,xc1,xc2,counts,R,1.0-q,pha)
    g_lens = de_vaucouleurs_2d(xi1, xi2, vpar)

    #pl.figure()
    #pl.imshow((g_lens),interpolation='nearest',cmap=cm.gray)
    #pl.colorbar()

    g_clean_ccd = g_lens + g_limage

    #pl.figure()
    #pl.imshow((g_clean_ccd),interpolation='nearest',cmap=cm.gray)
    #pl.colorbar()

    g_clean_ccd = congrid.congrid(g_clean_ccd, [128, 128])

    #-------------------------------------------------------------
    file_psf = "../PSF_and_noise/sdsspsf.fits"
    g_psf = pyfits.getdata(file_psf) - 1000.0
    g_psf = g_psf / np.sum(g_psf)

    #new_shape=[0,0]
    #new_shape[0]=np.shape(g_psf)[0]*dsx_sdss/dsx
    #new_shape[1]=np.shape(g_psf)[1]*dsx_sdss/dsx
    #g_psf = rebin_psf(g_psf,new_shape)

    g_images_psf = ss.fftconvolve(g_clean_ccd, g_psf, mode="same")
    #g_images_psf = ss.convolve(g_clean_ccd,g_psf,mode="same")
    #g_images_psf = g_clean_ccd

    #pl.figure()
    #pl.imshow((g_psf),interpolation='nearest',cmap=cm.gray)
    #pl.colorbar()

    #-------------------------------------------------------------
    # Need to be Caliborate the mags
    #g_noise = noise_map(nnn,nnn,np.sqrt(nstd),"Gaussian")
    g_noise = noise_map(128, 128, np.sqrt(nstd), "Gaussian")
    g_final = g_images_psf + g_noise

    #pl.figure()
    #pl.imshow((g_final.T),interpolation='nearest',cmap=cm.gray)
    #pl.colorbar()

    g_final_rebin = congrid.congrid(g_final, [128, 128])

    #pl.figure()
    #pl.imshow((g_final_rebin.T),interpolation='nearest',cmap=cm.gray)
    #pl.colorbar()

    #-------------------------------------------------------------

    output_filename = "../output_fits/" + str(ind) + ".fits"
    pyfits.writeto(output_filename, g_final_rebin, clobber=True)

    pl.show()

    return 0
コード例 #40
0
    def find_image(self,mstar,redshift,sfr,seed,xpix,ypix,hmag):
        sim_simname = self.simdata['col1']
        sim_expfact = self.simdata['col2']
        sim_sfr = self.simdata['col54']
        sim_mstar = self.simdata['col56']
        sim_redshift = 1.0/sim_expfact - 1.0
        metalmass = self.simdata['col53']
        sim_res_pc = self.simdata['col62']
        sim_string = self.simdata['col60']

        simage_loc = '/Users/gsnyder/Documents/Projects/HydroART_Morphology/Hyades_Data/images_rsync/'

        self.mstar_list.append(mstar)
        self.redshift_list.append(redshift)

        adjust_size=False

        print " "
        print "Searching for simulation with mstar,z,seed : ", mstar, redshift, seed
        wide_i = np.where(np.logical_and(np.logical_and(np.abs(sim_redshift-redshift)<0.3,np.abs(np.log10(sim_mstar)-mstar)<0.1),sim_sfr > -1))[0]
        Nwi = wide_i.shape[0]
        if Nwi==0:
            wide_i = np.where(np.logical_and(np.logical_and(np.abs(sim_redshift-redshift)<0.5,np.abs(np.log10(sim_mstar)-mstar)<0.4),sim_sfr > -1))[0]
            Nwi = wide_i.shape[0]
        if Nwi==0 and (mstar < 7.1):
            print "  Can't find good sim, adjusting image parameters to get low mass things "
            wide_i = np.where(np.abs(sim_redshift-redshift)<0.3)[0]  #wide_i is a z range
            llmi = np.argmin(np.log10(sim_mstar[wide_i]))  #the lowest mass in this z range
            wlmi = np.where(np.abs(np.log10(sim_mstar[wide_i]) - np.log10(sim_mstar[wide_i[llmi]])) < 0.3)[0] #search within 0.3 dex of lowest available sims
            print "   ", wide_i.shape, llmi, wlmi.shape
            wide_i = wide_i[wlmi]
            Nwi = wide_i.shape[0]
            print "   ", Nwi
            adjust_size=True

        #assert(wide_i.shape[0] > 0)

        if Nwi==0:
            print "    Could not find roughly appropriate simulation for mstar,z: ", mstar, redshift
            print " "
            self.image_files.append('')
            return 0#np.zeros(shape=(600,600)), -1

        print "    Found N candidates: ", wide_i.shape


        np.random.seed(seed)

        #choose random example and camera
        rps = np.random.random_integers(0,Nwi-1,1)[0]
        cn = str(np.random.random_integers(5,8,1)[0])

        prefix = os.path.basename(sim_string[wide_i[rps]])
        sim_realmstar = np.log10(sim_mstar[wide_i[rps]]) #we picked a sim with this log mstar
        mstar_factor = sim_realmstar - mstar  

        rad_factor = 1.0
        lum_factor = 1.0

        if adjust_size==True:
            rad_factor = 10.0**(mstar_factor*0.5) #must **shrink** images by this factor, total flux by mstar factor
            lum_factor = 10.0**(mstar_factor)
    

        print ">>>FACTORS<<<   ", prefix, sim_realmstar, mstar_factor, rad_factor, lum_factor

        im_folder = simage_loc + prefix +'_skipir/images'

        im_file = os.path.join(im_folder, prefix+'_skipir_CAMERA'+cn+'-BROADBAND_'+self.filter_string+'_simulation.fits')
        cn_file = os.path.join(im_folder, prefix+'_skipir_CAMERA'+cn+'-BROADBAND_'+self.filter_string+'_candelized_noise.fits')
        req1 = os.path.join(im_folder, prefix+'_skipir_CAMERA'+cn+'-BROADBAND_'+self.req_filters[0]+'_simulation.fits')
        req2 = os.path.join(im_folder, prefix+'_skipir_CAMERA'+cn+'-BROADBAND_'+self.req_filters[1]+'_simulation.fits')
        req3 = os.path.join(im_folder, prefix+'_skipir_CAMERA'+cn+'-BROADBAND_'+self.req_filters[2]+'_simulation.fits')
        req4 = os.path.join(im_folder, prefix+'_skipir_CAMERA'+cn+'-BROADBAND_'+self.req_filters[3]+'_simulation.fits')
        req5 = os.path.join(im_folder, prefix+'_skipir_CAMERA'+cn+'-BROADBAND_'+self.req_filters[4]+'_simulation.fits')


        ## Actually, probably want to keep trying some possible galaxies/files...
        is_file = os.path.lexists(im_file) and os.path.lexists(cn_file) and os.path.lexists(req1) and os.path.lexists(req2) and os.path.lexists(req3) and os.path.lexists(req4) and os.path.lexists(req5)
        #is_file = os.path.lexists(im_file) and os.path.lexists(cn_file) #and os.path.lexists(req1) and os.path.lexists(req2) and os.path.lexists(req3)

        if is_file==False:
            print "    Could not find appropriate files: ", im_file, cn_file
            print " "
            self.image_files.append('')
            return 0 #np.zeros(shape=(600,600)), -1


        self.image_files.append(im_file)

        cn_header = pyfits.open(cn_file)[0].header

        im_hdu = pyfits.open(im_file)[0]
        scalesim = cn_header.get('SCALESIM') #pc/pix
        Ps = cosmocalc.cosmocalc(redshift)['PS_kpc'] #kpc/arcsec
        print "    Simulation pixel size at z: ", scalesim
        print "    Plate scale for z: ", Ps
        print "    Desired Kpc/pix at z: ", Ps*self.Pix_arcsec
        sunrise_image = np.float32(im_hdu.data)  #W/m/m^2/Sr

        Sim_Npix = sunrise_image.shape[0]
        New_Npix = int( Sim_Npix*(scalesim/(1000.0*Ps*self.Pix_arcsec))/rad_factor )  #rad_factor reduces number of pixels (total size) desired
        if New_Npix==0:
            New_Npix=1

        print "    New galaxy pixel count: ", New_Npix
        rebinned_image = congrid.congrid(sunrise_image,(New_Npix,New_Npix)) #/lum_factor  #lum_factor shrinks surface brightness by mass factor... but we're shrinking size first, so effective total flux already adjusted by this; may need to ^^ SB instead???  or fix size adjust SB?
        print "    New galaxy image shape: ", rebinned_image.shape
        print "    New galaxy image max: ", np.max(rebinned_image)
        #finite_bool = np.isfinite(rebinned_image)
        #num_infinite = np.where(finite_bool==False)[0].shape[0]
        #print "    Number of INF pixels: ", num_infinite, prefix
        #self.N_inf.append(num_infinite)

        if xpix==-1:
            xpix = int( (self.Npix-1)*np.random.rand()) #np.random.random_integers(0,self.Npix-1,1)[0]
            ypix = int( (self.Npix-1)*np.random.rand()) #np.random.random_integers(0,self.Npix-1,1)[0]

        self.x_array.append(xpix)
        self.y_array.append(ypix)

        x1_choice = np.asarray([int(xpix-float(New_Npix)/2.0),0])
        x1i = np.argmax(x1_choice)
        x1 = x1_choice[x1i]
        diff=0
        if x1==0:
            diff = x1_choice[1]-x1_choice[0]
        x2_choice = np.asarray([x1 + New_Npix - diff,self.Npix])
        x2i = np.argmin(x2_choice)
        x2 = int(x2_choice[x2i])
        x1sim = abs(np.min(x1_choice))
        x2sim = min(New_Npix,self.Npix-x1)

        y1_choice = np.asarray([int(ypix-float(New_Npix)/2.0),0])
        y1i = np.argmax(y1_choice)
        y1 = y1_choice[y1i]
        diff=0
        if y1==0:
            diff = y1_choice[1]-y1_choice[0]
        y2_choice = np.asarray([y1 + New_Npix - diff,self.Npix])
        y2i = np.argmin(y2_choice)
        y2 = int(y2_choice[y2i])
        y1sim = abs(np.min(y1_choice))
        y2sim = min(New_Npix,self.Npix-y1)


        print "    Placing new image at x,y in x1:x2, y1:y2  from xsim,ysim, ", xpix, ypix, x1,x2,y1,y2, x1sim, x2sim, y1sim, y2sim
        #image_slice = np.zeros_like(self.blank_array)
        print "    done creating image slice"
        #bool_slice = np.int32( np.zeros(shape=(self.Npix,self.Npix)))

        image_cutout = rebinned_image[x1sim:x2sim,y1sim:y2sim]
        print "    New image shape: ", image_cutout.shape



        pixel_Sr = (self.Pix_arcsec**2)/sq_arcsec_per_sr  #pixel area in steradians:  Sr/pixel


        to_nJy_per_Sr = (1.0e9)*(1.0e14)*(self.eff_lambda_microns**2)/c   #((pixscale/206265.0)^2)*
        #sigma_nJy = 0.3*(2.0**(-0.5))*((1.0e9)*(3631.0/5.0)*10.0**(-0.4*self.maglim))*self.Pix_arcsec*(3.0*self.FWHM_arcsec)
        to_Jy_per_pix = to_nJy_per_Sr*(1.0e-9)*pixel_Sr
        

        #b = b*(to_nJy_per_Sr_b*fluxscale*bluefact)   # + np.random.randn(Npix,Npix)*sigma_nJy/pixel_Sr
        image_cutout = image_cutout*to_Jy_per_pix #image_cutout*to_nJy_per_Sr


        #image_slice[x1:x2,y1:y2] = image_cutout*1.0
        #bool_slice[x1:x2,y1:y2]=1
        print "    done slicing"

        #self.final_array += image_slice
        self.final_array[x1:x2,y1:y2] += image_cutout

        print "    done adding image to final array"

        #finite_bool = np.isfinite(self.final_array)
        #num_infinite = np.where(finite_bool==False)[0].shape[0]

        #print "    Final array INF count and max:", num_infinite, np.max(self.final_array)

        print " "
        return 1 #sunrise_image,scalesim
コード例 #41
0
def make_niriss_trace(bbfile='grism.fits',
                      outname='grismtrace',
                      ybox=None,
                      xbox=None,
                      noisemaxfact=0.05,
                      alph=1.0,
                      Q=1.0,
                      rotate=False,
                      resize=None):

    go = pyfits.open(bbfile)

    redshift = go['BROADBAND'].header['REDSHIFT']
    niriss_pix_as = 0.065
    f200_nm_per_pix = 9.5 / 2.0

    min_lam = 1.750
    max_lam = 2.220

    hdu = go['CAMERA0-BROADBAND-NONSCATTER']
    cube = hdu.data  #L_lambda units!
    #cube=np.flipud(cube) ; print(cube.shape)

    fil = go['FILTERS']
    lamb = fil.data['lambda_eff'] * 1.0e6
    flux = fil.data['L_lambda_eff_nonscatter0']

    g_i = (lamb >= min_lam) & (lamb <= max_lam)

    arcsec_per_kpc = gsu.illcos.arcsec_per_kpc_proper(redshift)
    kpc_per_arcsec = 1.0 / arcsec_per_kpc.value

    im_kpc = hdu.header['CD1_1']
    print('pix size kpc: ', im_kpc)

    niriss_kpc_per_pix = niriss_pix_as * kpc_per_arcsec
    total_width_pix = (1.0e3) * (max_lam - min_lam) / f200_nm_per_pix
    total_width_kpc = total_width_pix * niriss_kpc_per_pix

    total_width_impix = int(total_width_kpc / im_kpc)

    delta_lam = (max_lam - min_lam) / total_width_impix  #microns/pix

    psf_arcsec = 0.13  #????
    psf_kpc = psf_arcsec * kpc_per_arcsec
    psf_impix = psf_kpc / im_kpc

    print(delta_lam)

    imw_cross = 33.0
    imw_disp = total_width_impix + imw_cross
    Np = cube.shape[-1]
    mid = np.int64(Np / 2)
    delt = np.int64(imw_cross / 2)
    output_image = np.zeros_like(
        np.ndarray(shape=(imw_disp, imw_cross), dtype='float'))
    #r = r[mid-delt:mid+delt,mid-delt:mid+delt]
    output_image.shape
    small_cube = cube[g_i, mid - delt:mid + delt, mid - delt:mid + delt]

    for i, l in enumerate(lamb[g_i]):
        di = int((l - min_lam) / delta_lam)
        this_cube = small_cube[i, :, :] * l**2  #convert to Janskies-like
        if rotate is True:
            this_cube = np.rot90(this_cube)

        #if i==17:
        #    this_cube[30,30] = 1.0e3
        #print(i,l/(1.0+redshift),int(di),np.sum(this_cube),this_cube.shape,output_image.shape,output_image[di:di+imw_cross,:].shape)
        output_image[di:di +
                     imw_cross, :] = output_image[di:di +
                                                  imw_cross, :] + this_cube

    output_image = scipy.ndimage.gaussian_filter(
        output_image, sigma=[4.0, psf_impix / 2.355])

    new_thing = np.transpose(np.flipud(output_image))
    if resize is not None:
        new_thing = congrid.congrid(new_thing, resize)

    nr = noisemaxfact * np.max(new_thing) * random.randn(
        new_thing.shape[0], new_thing.shape[1])

    #thing=make_color_image.make_interactive(new_thing+nr,new_thing+nr,new_thing+nr,alph=alph,Q=Q)
    #thing=1.0-np.fliplr(np.transpose(thing,axes=[1,0,2]))
    thing = np.fliplr(new_thing + nr)

    f = plt.figure(figsize=(25, 6))
    f.subplots_adjust(wspace=0.0,
                      hspace=0.0,
                      top=0.99,
                      right=0.99,
                      left=0,
                      bottom=0)
    axi = f.add_subplot(1, 1, 1)
    axi.imshow((thing),
               aspect='auto',
               origin='left',
               interpolation='nearest',
               cmap='Greys_r')
    f.savefig(outname + '.png', dpi=500)
    plt.close(f)

    #[ybox[0]:ybox[1],xbox[0]:xbox[1]]
    #[50:125,120:820,:]

    new_hdu = pyfits.PrimaryHDU(thing)
    new_list = pyfits.HDUList([new_hdu])
    new_list.writeto(outname + '.fits', overwrite=True)

    f, l, f_im, l_im = get_integrated_spectrum(bbfile)

    return thing, new_thing, f, l, f_im, l_im
コード例 #42
0
ファイル: myDetector.py プロジェクト: comptech/atrex
    def testCalibration_esd (self, myim) :
        esd = np.zeros(9, dtype=np.float32)
        en = A_to_kev (self.wavelength)
        cut = 30.
        dist_tol = 1.8
        IovS = float(self.topLevel.ui.det_snrLE.text())
        start_dist = self.dist - self.dist * 0.5
        end_dist = self.dist + self.dist *.5
        im = myim.imArray.astype(np.int64)
        imarr = cgd.congrid (im, [500, 500], method='nearest',minusone=True).astype(np.int64)
        zarr = np.zeros ((500,500),dtype=np.uint8)

        bg = self.local_background (imarr)
        imarr.tofile ("/home/harold/imarr.dat")
        # only for debug

        hpf = imarr / bg.astype(np.int64)

        self.ff = np.where ((hpf > IovS) & (imarr>20.))

        nn = len(self.ff[0])
        print 'number of pixels meeting the peak condition is %d'%(nn)


        ### equal proximity coarse search
        # in 5 pixel steps
        h = np.zeros((100,100), dtype=np.float32)
        for i in range (100) :
            print i
            for j in range (100) :
                dist = self.compdist (self.ff, [i*5,j*5])
                mx = int(dist.max())+1
                mn = int(dist.min())

                #nbins = int(dist.max() - dist.min()+1)
                histo,edges = np.histogram(dist, range=[mn,mx],bins=(mx-mn))
                h[i,j] = np.max (histo)
        maxsub = np.argmax(h)
        maxrow = maxsub / 100
        maxcol = maxsub - maxrow * 100
        print maxsub, maxrow, maxcol

        self.eqprox[0]=maxrow/100.
        self.eqprox[1]=maxcol/100.

        # is this even being used
        dist = self.compdist (self.ff, [maxrow, maxcol])
        nbins = int (dist.max()-dist.min()+1)
        h = np.histogram(dist, bins=nbins)

        ### equal proximity seach fine (in 500 space)
        h = np.zeros((11,11),dtype=np.float32)
        for i in range (-5,6) :
            for j in range(-5,6) :
                # note in gse_ada , there is a 5 + in the index calculation
                dist = self.compdist(self.ff, [5.*maxrow+i, 5.*maxcol+j])
                nbins = int(dist.max() - dist.min() +1)
                mx = int(dist.max()+1)
                mn = int(dist.min())
                histo, edges = np.histogram(dist, range=[mn,mx],bins=mx-mn)
                h[i+5][j+5]=np.max(histo)
        maxsub = np.argmax (h)

        # then back in 100 space
        xy = self.xy_from_ind (11,11,maxsub)
        maxrow = maxrow + (xy[0] - 5)/5.
        maxcol = maxcol + (xy[1] - 5) / 5.
        xy0 = [maxrow, maxcol]
        self.eqproxfine[0]=maxrow/100.
        self.eqproxfine[1]=maxcol/100.

        self.beamx = xy0[0]/100. * self.nopixx
        self.beamy = xy0[1]/100. * self.nopixy
        xy0[0]*=5.
        xy0[1]*=5.
        dist = self.compdist (self.ff, xy0)
        mn = int(dist.min())
        mx = int(dist.max()+1)
        nbins = mx-mn
        h,edges = np.histogram (dist, range=[mn,mx],bins=nbins)
        h1 = np.copy(h)
        #h = h[0]
        numH = len(h)

        while (np.max(h1)>cut) :
            i = np.argmax (h1)
            m = np.max(h1)
            h1[i] = 0.
            if (i > 0 and i < numH-1) :
                j = i - 1
                while (j >= 0) :
                    if (h1[j] > cut/2.) :
                        h[i] += h[j]
                        h[j] =0.
                        h1[j]=0.
                    else :
                        j = 0
                    j=j-1

                j=i+1
                while (j <= numH-1) :
                    if (h1[j]>cut/2.) :
                        h[i]+=h[j]
                        h[j]=0
                        h1[j]=0
                    else :
                        j=numH-1
                    j=j+1
        # NOTE - should be cut not cut/2.
        fh = np.where (h > cut)[0]
        numB = len(fh)
        # number of different rings with sufficient number of points
        rings = np.zeros(nn, dtype=np.int64)
        for i in range (nn) :
            c = np.absolute (np.subtract(dist[i],edges[fh]))
            ri = np.min (c)
            kk = np.argmin (c)
            if (ri < dist_tol) :
                rings[i] = kk
            else :
                rings[i] = -1


        nr = np.zeros(numB, dtype=np.int64)
        ds = np.zeros (numB, dtype=np.float32)
        for k in range (numB) :
            r = np.where(rings == k)[0]
            nr[k]= len(r)
        print "Classes Done ...\r\n"
        m = np.max(nr)
        print 'Max of nr is : %d'%(m)

        # x,y coords of points in ring
        self.rgx = np.zeros((numB,m), dtype=np.float32)
        self.rgy = np.zeros((numB,m), dtype=np.float32)
        self.rgN = np.zeros (numB,dtype=np.uint16)

        self.numRings = numB
        for k in range (numB) :
            r = np.where (rings == k)[0]
            ds[k] = np.mean(dist[r])*self.nopixx/500. * self.psizex
            print 'ds of %d is : %f'%(k, ds[k])
            # xya=self.xy_from_indArr (500,500,self.ff[r])
            self.rgy[k,0:nr[k]] = self.ff[0][r]
            self.rgx[k,0:nr[k]] = self.ff[1][r]
            self.rgN[k] = len(r)

        step = (end_dist - start_dist) / 1000.
        ddists = np.zeros((2,1000), dtype = np.float32)
        for i in range (1000) :
            thisstep = start_dist + i * step
            ddists[0][i] = thisstep
            ddists[1][i] = self.sum_closest_refs (ds, thisstep)
        aa=np.argmin (ddists[1][:])
        dst = ddists[0][aa]

        print 'Coarse estimated detector distance : %f'%(dst)


        # fine tune detector distance
        start_dist = dst- step*5.
        end_dist = dst + step * 5.
        step = (end_dist-start_dist) / 1000.
        for i in range (1000):
            ddists[0][i] = start_dist + i * step
            ddists[1][i] = self.sum_closest_refs (ds, ddists[0][i])
        aa=np.argmin (ddists[1][:])
        dst = ddists[0][aa]
        print 'Refined estimated detector distance : %f'%(dst)


        # use only peaks which match standard and are unique
        cr = np.zeros ((2,numB), dtype=np.float32)
        omissions = np.zeros (numB, dtype=np.int32)
        for i in range (numB) :
            cr[0][i]= self.closest_ref (ds[i], dst)
            cr[1][i]= self.closest_ref_d(ds[i], dst)
            if (cr[0][i] > .2) :
                omissions[i] = 1

        rrr=0
        X = self.rgx[0][0:nr[0]]*self.nopixx/500.
        Y = self.rgy[0][0:nr[0]]*self.nopixx/500.
        Z = np.zeros(nr[0],dtype=np.float32)
        crval = cr[1][0]
        dspcc = np.ones(nr[0]) * crval
        nus = np.zeros(nr[0],dtype=np.float32)
        tths = np.zeros(nr[0],dtype=np.float32)
        tthval = tth_from_en_and_d (en, crval)
        tths = np.ones (nr[0])*tthval

        for rrr in range (1, numB) :
            if (omissions[rrr]==0) :
                pos = len (X)
                X=np.concatenate ((X, self.rgx[rrr][0:nr[rrr]]))
                Y=np.concatenate ((Y, self.rgy[rrr][0:nr[rrr]]))
                crval = cr[1][rrr]
                newcr = np.ones(nr[rrr],dtype=np.float32)*crval
                dspcc = np.concatenate ((dspcc, newcr))
                tt = np.zeros(nr[rrr], dtype=np.float32)
                nu = np.zeros(nr[rrr], dtype=np.float32)
                tths = np.concatenate ((tths, tt))
                nus=np.concatenate ((nus, nu))
                newlen = pos+nr[rrr]
                print 'RRR is ', rrr
                for i in range (pos, pos+nr[rrr]) :
                    print i

                    tths [i] = tth_from_en_and_d (en, dspcc[i])
                    nus[i]=self.get_nu_from_pix([X[i],Y[i]])

        p = np.zeros(6,dtype=np.float32)
        p[0] = self.dist
        p[1] = self.beamx
        p[2] = self.beamy
        print 'Starting calibration refinement'

        pars = {'value':0.,'fixed':0,'limited':[0,0],'limits':[0.,0]}
        parinfo = []
        for i in range (6) :
            parinfo.append (pars.copy())

        NNN = len(X)
        arr1 = self.nopixx
        arr2 = self.nopixy
        imdat = myim.imArray
        for i in range (NNN) :
            if not (X[i]<5 or X[i] > (arr1 - 6) or Y[i] < 5 or Y[i] > (arr2-6))  :
                if (imdat[X[i],Y[i]]< imdat[X[i]-1,Y[i]]):
                    X[i] = X[i]-1
                if (imdat[X[i],Y[i]]< imdat[X[i]+1,Y[i]]):
                    X[i] = X[i]+1
                if (imdat[X[i],Y[i]]< imdat[X[i],Y[i]-1]):
                    Y[i] = Y[i]-1
                if (imdat[X[i],Y[i]]< imdat[X[i],Y[i]+1]):
                    Y[i] = Y[i]+1

                if (imdat[X[i],Y[i]]< imdat[X[i]-1,Y[i]]):
                    X[i] = X[i]-1
                if (imdat[X[i],Y[i]]< imdat[X[i]+1,Y[i]]):
                    X[i] = X[i]+1
                if (imdat[X[i],Y[i]]< imdat[X[i],Y[i]-1]):
                    Y[i] = Y[i]-1
                if (imdat[X[i],Y[i]]< imdat[X[i],Y[i]+1]):
                    Y[i] = Y[i]+1

                if (imdat[X[i],Y[i]]< imdat[X[i]-1,Y[i]]):
                    X[i] = X[i]-1
                if (imdat[X[i],Y[i]]< imdat[X[i]+1,Y[i]]):
                    X[i] = X[i]+1
                if (imdat[X[i],Y[i]]< imdat[X[i],Y[i]-1]):
                    Y[i] = Y[i]-1
                if (imdat[X[i],Y[i]]< imdat[X[i],Y[i]+1]):
                    Y[i] = Y[i]+1

            if not (X[i]<5 or X[i] > (arr1 - 6) or Y[i] < 5 or Y[i] > (arr2-6))  :
                xxx = np.arange(-5,6,1)
                a=np.zeros(3, dtype=np.float32)
                nus[i]= self.get_nu_from_pix((X[i],Y[i]))
                if (nus[i]>45. and nus[i]<135.):
                    ix = int (round(X[i],0))
                    iy = int (round(Y[i],0))
                    sec = myim.imArray[iy,ix-5:ix+6]
                    a[0] = np.max(sec)-np.min(sec)
                    a[1]= 0
                    a[2] =2
                    #res = gaussfit (sec, xxx, a, nterms=4)
                    res =  gaussfit1d(xxx, sec, a)
                    #print res
                else :
                    ix = int (round(X[i],0))
                    iy = int (round(Y[i],0))
                    sec = myim.imArray[iy-5:iy+6,ix]

                    #res = gaussfit (sec, xxx, a, nterms=4)
                    #res =  gauss_lsq(xxx, sec)
                    a[0] = np.max(sec)-np.min(sec)
                    a[1]= 0
                    a[2] =2
                    res =  gaussfit1d(xxx, sec, a)
                    #print res
        er = 1./dspcc
        Z = np.zeros (NNN, dtype=np.float32)
        parinfo[:].value = [P]


        self.calPeaks.emit()
コード例 #43
0

    #resB = sp.ndimage.filters.gaussian_filter(b,sigma[2],output=sB)

    for i in range(nx):
        
        axi = fig.add_subplot(ny,nx,totalcount+1) 
        axi.set_xticks([]) ; axi.set_yticks([])

        #plot grayscale galaxy image
        data = bb[camera].data[fili[i],350:450,350:450]*(lams[i]**2)
        print fils[fili[i]], np.max(data), 0.01*np.max(data), sigma_pix[i]
        cdata = np.zeros_like(data)

        resc = sp.ndimage.filters.gaussian_filter(data*1.0,sigma_pix[i],output=cdata)
        print fils[fili[i]], np.max(cdata), 0.01*np.max(cdata), sigma_pix[i], np.sum(data)/np.sum(cdata)

        cdata = congrid.congrid(cdata,(40,40))
        print fils[fili[i]], np.max(cdata), 0.01*np.max(cdata), sigma_pix[i], np.sum(data)/np.sum(cdata)

        norm = ImageNormalize(stretch=LogStretch(),vmin=0.01,vmax=0.35,clip=True)
        axi.imshow(cdata*np.max(data)/np.max(cdata), origin='lower', cmap='Greys', norm=norm, interpolation='nearest')
        #axi.annotate('{:3.2f}$\mu m$'.format(image_hdu.header['EFLAMBDA']),xy=(0.05,0.05),xycoords='axes fraction',color='white',ha='left',va='center',size=6)

        totalcount = totalcount+1



    fig.savefig('jwst.pdf',dpi=600)
    pyplot.close(fig)
コード例 #44
0
ファイル: sebi-cf.py プロジェクト: jrgcolin/sebi-cf
def process(fproject):
    """Backbone: Tasks are processed the following order:
    - Get time
    - Setting common constants
    - Instantiate a new project
    - Load images
    - Try load optional images, else use surrogate
    - Crop images
    - Generate atmospheric forcing layers
    - Input diagnostic
    - kB-1 sequence
    - Radiative budget sequence
    - Downscaling
    - Stability sequence
    - Low resolution at-limit parameters
    - Upscaling
    - External resistances and gradients
    - Saving to files
    - Get time and save logs
    
    """
    # Dedicated logger
    process_logger = logging.getLogger("SEBI-CF.Process")

#    try:
        # Get time
    time0 = time.time()
    # Setting common constants
    cd = 0.2
    ct = 0.01
    cp = 1005.
    es0 = 610.7 #Pa
    g = 9.81
    hs = 0.009
    k = 0.4
    p0 = 101325.
    pdtl = 0.71
    gamma = 67.
    rd = 287.04
    rv = 461.05
    sigma = 5.678E-8

    # Instantiate a new project
    myproj = Project.project(fproject)
    process_logger.info("Instantiate a new project")
        
#    except Exception, err:
#        sys.stderr.write('ERROR: %s\n' % str(err))
      
    #try:
    # Calculate scales
    myproj.setGrids()

    # Load images
    widgets = [' Loading surface data:    ', progressBar.Percentage(), ' ', progressBar.Bar(marker='-',left='[',right=']'),
                       ' ', ' ', ' ', ' ']
    pbar = progressBar.ProgressBar(widgets=widgets, maxval=8).start()

    albedo = myproj.read(myproj.falbedo)
    pbar.update(1)
    ts = myproj.read(myproj.flst)
    pbar.update(2)
    ndvi = myproj.read(myproj.fndvi)
    pbar.update(3)

    # Cleanup
    albedo,log = l.cleanup(albedo,"albedo")
    process_logger.info(log)
    ts,log = l.cleanup(ts,"ts")
    process_logger.info(log)
    ndvi,log = l.cleanup(ndvi,"ndvi")
    process_logger.info(log)


    # Try load optional images, else use surrogate
    if myproj.femissivity != 'undef':
        emi = myproj.read(myproj.femissivity)
        process_logger.info("Emissivity image found")
    else:
        emi = l.ndvi2emi(ndvi)

    pbar.update(4)

    if myproj.ffc != 'undef':
        fc = myproj.read(myproj.ffc)
        process_logger.info("Fc image found")
    else:
        fc = l.ndvi2fc(ndvi)

    pbar.update(5)

    if myproj.fhv != 'undef':
        hv = myproj.read(myproj.fhv)
        process_logger.info("Hv image found")
        hv = l.substitute(hv, 0., 0.01)
        z0m = l.hv2z0m(hv)
    else:
        z0m = l.ndvi2z0m(ndvi)
        z0m = l.substitute(z0m, 0., 0.000001)
        hv = l.z0m2hv(z0m)
        hv = l.substitute(hv, 0., 0.01)
    pbar.update(6)

    if myproj.flai != 'undef':
        lai = myproj.read(myproj.flai)
        process_logger.info("LAI image found")
    else:
        lai = l.ndvi2lai(ndvi)
    pbar.update(7)

    if myproj.mask != 'undef':
        mask = myproj.read(myproj.mask)
        process_logger.info("Mask image found")
    else:
        mask = ndvi-ndvi

    if myproj.RnDaily != 'undef':
        RnDaily = myproj.read(myproj.RnDaily)
        process_logger.info("RnDaily image found")
    else:
        mask = ndvi-ndvi



    pbar.update(8)

    pbar.finish()

    # Crop images
    albedo = albedo[0:myproj.imgDims[0], 0:myproj.imgDims[1]]
    ts = ts[0:myproj.imgDims[0], 0:myproj.imgDims[1]]
    ndvi = ndvi[0:myproj.imgDims[0], 0:myproj.imgDims[1]]
    emi = emi[0:myproj.imgDims[0], 0:myproj.imgDims[1]]
    fc = fc[0:myproj.imgDims[0], 0:myproj.imgDims[1]]
    hv = hv[0:myproj.imgDims[0], 0:myproj.imgDims[1]]
    z0m = z0m[0:myproj.imgDims[0], 0:myproj.imgDims[1]]
    lai = lai[0:myproj.imgDims[0], 0:myproj.imgDims[1]]
    mask = mask[0:myproj.imgDims[0], 0:myproj.imgDims[1]]
    if myproj.RnDaily != 'undef':
        RnDaily = RnDaily[0:myproj.imgDims[0], 0:myproj.imgDims[1]]

    # Generate atmospheric forcing layers
    widgets = [' Loading PBL data:        ', progressBar.Percentage(), ' ', progressBar.Bar(marker='-',left='[',right=']'),
                       ' ', ' ', ' ', ' ']
    pbar = progressBar.ProgressBar(widgets=widgets, maxval=10).start()

    if myproj.atmmode == '1D':
        hg = n.zeros(myproj.imgDims,dtype=float) + myproj.hg
        pbar.update(1)
        hr = n.zeros(myproj.imgDims,dtype=float) + myproj.hr
        pbar.update(2)
        lwdw = n.zeros(myproj.imgDims,dtype=float) + myproj.lwdw
        pbar.update(3)
        pg = n.zeros(myproj.imgDims,dtype=float) + myproj.pg
        pbar.update(4)
        pr = n.zeros(myproj.imgDims,dtype=float) + myproj.pr
        pbar.update(5)
        qg = n.zeros(myproj.imgDims,dtype=float) + myproj.qg
        pbar.update(6)
        qr = n.zeros(myproj.imgDims,dtype=float) + myproj.qr
        pbar.update(7)
        swdw = n.zeros(myproj.imgDims,dtype=float) + myproj.swdw
        pbar.update(8)
        tr = n.zeros(myproj.imgDims,dtype=float) + myproj.tr
        pbar.update(9)
        ur = n.zeros(myproj.imgDims,dtype=float) + myproj.ur
        pbar.update(10)
    if myproj.atmmode == '2D':
        hg = myproj.read(myproj.fhg)
        pbar.update(1)
        hr = myproj.read(myproj.fhr)
        pbar.update(2)
        lwdw = myproj.read(myproj.flwdw)
        pbar.update(3)
        pg = myproj.read(myproj.fpg)
        pbar.update(4)
        pr = myproj.read(myproj.fpr)
        pbar.update(5)
        qg = myproj.read(myproj.fqg)
        pbar.update(6)
        qr = myproj.read(myproj.fqr)
        pbar.update(7)
        swdw = myproj.read(myproj.fswdw)
        pbar.update(8)
        tr = myproj.read(myproj.ftr)
        pbar.update(9)
        ur = myproj.read(myproj.fur)
        pbar.update(10)

        # Additional cleanup
        swdw,log = l.cleanup(swdw,"swdw")
        process_logger.info(log)
        lwdw,log = l.cleanup(lwdw,"lwdw")
        process_logger.info(log)
        if myproj.RnDaily != 'undef':
            RnDaily,log = l.cleanup(lwdw,"RnDaily")
            process_logger.info(log)

        # Crop images
        hg = hg[0:myproj.imgDims[0], 0:myproj.imgDims[1]]
        hr = hr[0:myproj.imgDims[0], 0:myproj.imgDims[1]]
        lwdw = lwdw[0:myproj.imgDims[0], 0:myproj.imgDims[1]]
        pg = pg[0:myproj.imgDims[0], 0:myproj.imgDims[1]]
        pr = pr[0:myproj.imgDims[0], 0:myproj.imgDims[1]]
        qg = qg[0:myproj.imgDims[0], 0:myproj.imgDims[1]]
        qr = qr[0:myproj.imgDims[0], 0:myproj.imgDims[1]]
        swdw = swdw[0:myproj.imgDims[0], 0:myproj.imgDims[1]]
        tr = tr[0:myproj.imgDims[0], 0:myproj.imgDims[1]]
        ur = ur[0:myproj.imgDims[0], 0:myproj.imgDims[1]]

        if myproj.pressureUnit == "hPa":
            pg = pg*100.
            pr = pr*100.
        if myproj.pressureLevel == "SL":
            pg = l.ps_sea2gnd(pg,hg)

        #TMP TMP TMP
        #search = n.where(mask == 5.5)
        #ur[search] = ur[search]*1.2

    pbar.finish()
    # Apply mask
    if myproj.mask != 'undef':
        search = n.where(mask==0)

        albedo[search] = n.nan
        ts[search] = n.nan
        ndvi[search] = n.nan
        emi[search] = n.nan
        fc[search] = n.nan
        hv[search] = n.nan
        z0m[search] = n.nan
        lai[search] = n.nan
        hg[search] = n.nan
        hr[search] = n.nan
        lwdw[search] = n.nan
        pg[search] = n.nan
        pr[search] = n.nan
        qg[search] = n.nan
        qr[search] = n.nan
        swdw[search] = n.nan
        tr[search] = n.nan
        ur[search] = n.nan
        if myproj.RnDaily != 'undef':
            RnDaily[search] = n.nan


#    # Input diagnostic
    widgets = [' Input diagnostic:        ', progressBar.Percentage(), ' ', progressBar.Bar(marker='-',left='[',right=']'),
                       ' ', ' ', ' ', ' ']
    pbar = progressBar.ProgressBar(widgets=widgets, maxval=18).start()
    l.getStats(albedo,'albedo')
    pbar.update(1)
    l.getStats(ts,'ts')
    pbar.update(2)
    l.getStats(ndvi,'ndvi')
    pbar.update(3)
    l.getStats(emi,'emi')
    pbar.update(4)
    l.getStats(fc,'fc')
    pbar.update(5)
    l.getStats(hv,'hv')
    pbar.update(6)
    l.getStats(z0m,'z0m')
    pbar.update(7)
    l.getStats(lai,'lai')
    pbar.update(8)
    l.getStats(hg,'hg')
    pbar.update(9)
    l.getStats(hr,'hr')
    pbar.update(10)
    l.getStats(lwdw,'lwdw')
    pbar.update(11)
    l.getStats(pg,'pg')
    pbar.update(12)
    l.getStats(pr,'pr')
    pbar.update(13)
    l.getStats(qg,'qg')
    pbar.update(14)
    l.getStats(qr,'qr')
    pbar.update(15)
    l.getStats(swdw,'swdw')
    pbar.update(16)
    l.getStats(tr,'tr')
    pbar.update(17)
    l.getStats(ur,'ur')
    pbar.update(18)

    pbar.finish()

    # kB-1 sequence
    widgets = [' Running kB-1 model:      ', progressBar.Percentage(), ' ', progressBar.Bar(marker='-',left='[',right=']'),
                       ' ', ' ', ' ', ' ']
    pbar = progressBar.ProgressBar(widgets=widgets, maxval=100).start()
    if myproj.kbMode == "Massman":
        process_logger.info("Launching kB-1 model")
        kB_1,z0h = l.kB(cd,ct,fc,k,hg,hr,hs,hv,lai,ndvi,p0,pr,tr,ur,z0m)
        
    else:
        kB_1 = n.zeros(myproj.imgDims,dtype=float) + 4.
        z0h = z0m / n.exp(kB_1)

    l.getStats(kB_1,'kB_1')
    l.getStats(z0h,'z0h')

    pbar.update(100)
    pbar.finish()

    # Radiative budget
    widgets = [' Radiative budget:        ', progressBar.Percentage(), ' ', progressBar.Bar(marker='-',left='[',right=']'),
                       ' ', ' ', ' ', ' ']
    pbar = progressBar.ProgressBar(widgets=widgets, maxval=3).start()
    process_logger.info("Launching Radiative budget")
    myproj.logs += '\n\nRadiative budget:'

    Rn = l.Rn(albedo,emi,lwdw,sigma,swdw,ts)
    l.getStats(Rn,'Rn')
    pbar.update(1)

    G0 = l.G0(fc,Rn)
    l.getStats(G0,'G0')
    pbar.update(2)

    G0_Rn = G0/Rn
    l.getStats(G0_Rn,'G0_Rn')

    G0_Rn,log = l.cleanup(G0_Rn,"G0_Rn")
    myproj.logs += log

    pbar.update(3)

    pbar.finish()

    # Downscaling
    widgets = [' Downscaling:             ', progressBar.Percentage(), ' ', progressBar.Bar(marker='-',left='[',right=']'),
                       ' ', ' ', ' ', ' ']
    pbar = progressBar.ProgressBar(widgets=widgets, maxval=12).start()
    process_logger.info("Launching Downscaling")

    low_z0m = l.downscaling(z0m,myproj)
    l.getStats(low_z0m,'low_z0m')
    pbar.update(1)

    low_z0h = l.downscaling(z0h,myproj)
    l.getStats(low_z0h,'low_z0h')
    pbar.update(2)

    low_ts = l.downscaling(ts,myproj)
    l.getStats(low_ts,'low_ts')
    pbar.update(3)

    low_Rn = l.downscaling(Rn,myproj)
    l.getStats(low_Rn,'low_Rn')
    pbar.update(4)

    low_G0 = l.downscaling(G0,myproj)
    l.getStats(low_G0,'low_G0')
    pbar.update(5)

    low_ur = l.downscaling(ur,myproj)
    l.getStats(low_ur,'low_ur')
    pbar.update(6)

    low_hr = l.downscaling(hr,myproj)
    l.getStats(low_hr,'low_hr')
    pbar.update(7)

    low_pr = l.downscaling(pr,myproj)
    l.getStats(low_pr,'low_pr')
    pbar.update(8)

    low_pg = l.downscaling(pg,myproj)
    l.getStats(low_pg,'low_pg')
    pbar.update(9)

    low_qr = l.downscaling(qr,myproj)
    l.getStats(low_qr,'low_qr')
    pbar.update(10)

    low_qg = l.downscaling(qg,myproj)
    l.getStats(low_qg,'low_qg')
    pbar.update(11)

    low_tr = l.downscaling(tr,myproj)
    l.getStats(low_tr,'low_tr')
    pbar.update(12)

    pbar.finish()

    # Stability sequence
    widgets = [' Stability sequence:      ', progressBar.Percentage(), ' ', progressBar.Bar(marker='-',left='[',right=']'),
                       ' ', ' ', ' ', ' ']
    pbar = progressBar.ProgressBar(widgets=widgets, maxval=myproj.gridNb[0]*myproj.gridNb[1]).start()

    process_logger.info("Launching Stability sequence")

    low_d0 = l.z0m2d0(low_z0m)
    l.getStats(low_d0,'low_d0')
    ustar_i = l.u2ustar(low_d0,low_hr,k,low_ur,low_z0m)
    l.getStats(ustar_i,'ustar_i')
    ra_i = l.ra(low_d0,low_hr,low_z0h)
    l.getStats(ra_i,'ra_i')
    low_delta_a = l.tpot(cp,low_pg,p0,low_qg,rd,low_ts) - \
                    l.tpot(cp,low_pr,p0,low_qr,rd,low_tr)
    l.getStats(low_delta_a,'low_delta_a')
    low_es = l.esat(es0,low_ts)
    l.getStats(low_es,'low_es')
    low_e = l.eact(low_pg,low_qg,rd,rv)
    l.getStats(low_e,'low_e')
    low_rho = l.rho(low_e,low_pg,low_qg,\
                    rd,low_ts)
    l.getStats(low_rho,'low_rho')
    H_i = l.H(cp,low_delta_a,k,ra_i,low_rho,ustar_i)
    l.getStats(H_i,'H_i')
    delta = l.delta(low_es,low_ts)
    l.getStats(delta,'delta')
    Le_i = (delta*rd*(low_ts)**2)/(0.622*low_es)
    l.getStats(Le_i,'Le_i')
    L_i = (-ustar_i**3 *low_rho)/(k*g*0.61*(low_Rn-low_G0)/Le_i)
    l.getStats(L_i,'L_i')
    #pbar.update(1)

    # Modif >><<

    H_ic = low_delta_a * k * low_rho * cp
    L_ic = -low_rho *cp * (low_ts * (1 + 0.61 * low_qr)) / (k * g)
    ustar_i = k * low_ur / n.log((low_hr-low_d0) / low_z0m)
    H_i = H_ic * ustar_i / n.log((low_hr-low_d0) / low_z0h)
    H_target = H_i
    L_i = L_i-L_i

    # >><<

    # Starting the iterative sequence
    vars = n.zeros([11,1],dtype=float)
    # Variables for output
    slvUstar = n.zeros([myproj.gridNb[0],myproj.gridNb[1]],dtype=float)
    slvH = n.zeros([myproj.gridNb[0],myproj.gridNb[1]],dtype=float)
    slvL = n.zeros([myproj.gridNb[0],myproj.gridNb[1]],dtype=float)
    iterator = n.zeros([myproj.gridNb[0],myproj.gridNb[1]],dtype=float)

    if myproj.iterate == "True":
        for i in n.arange(0,myproj.gridNb[0]):
            for j in n.arange(0,myproj.gridNb[1]):

                stabDif = 10.
                stabLoops = 0

                while stabDif > 0.01 and stabLoops < 100:
                    L_i[i,j] = L_ic[i,j] * (ustar_i[i,j]**3) / H_i[i,j]
                    ustar_i[i,j] = k* low_ur[i,j] / (n.log((low_hr[i,j]-low_d0[i,j]) / low_z0m[i,j]) - l.Bw(low_hr[i,j],L_i[i,j],low_z0h[i,j],low_z0m[i,j]))
                    H_i[i,j] = H_ic[i,j] * ustar_i[i,j] / (n.log((low_hr[i,j]-low_d0[i,j]) / low_z0h[i,j]) - (-7.6*n.log(low_hr[i,j]/L_i[i,j])))
                    stabDif   = n.abs(H_target[i,j] - H_i[i,j])
                    H_target[i,j] = H_i[i,j]
                    stabLoops+=1

                slvUstar[i,j] = ustar_i[i,j]
                slvH[i,j] = H_i[i,j]
                slvL[i,j] = L_i[i,j]
                iterator[i,j] = stabLoops

                ## Grid stability functions
                #Cw = -7.6*n.log(low_hr[i,j]/L_i[i,j])
                #ra = n.log((low_hr[i,j]-low_d0[i,j])/low_z0h[i,j])
                #Bw = l.Bw(low_hr[i,j],L_i[i,j],low_z0h[i,j],low_z0m[i,j])
                ## Prepare the file to provide to l.stabFunc
                #vars[0] = low_ur[i,j]
                #vars[1] = low_hr[i,j]
                #vars[2] = low_d0[i,j]
                #vars[3] = low_z0m[i,j]
                #vars[4] = Bw
                #vars[5] = low_delta_a[i,j]
                #vars[6] = low_rho[i,j]
                #vars[7] = ra
                #vars[8] = l.tpot(cp,low_pg[i,j],p0,low_qg[i,j],rd,0.5*(low_ts[i,j]+low_tr[i,j]))
                #vars[9] = Le_i[i,j]
                #vars[10] = low_Rn[i,j] - low_G0[i,j]
                #vars.tofile('tmp000')
                #
                ##slvUstar[i,j],slvH[i,j],slvL[i,j] = fsolve(l.stabFunc,[ustar_i[i,j],H_i[i,j],L_i[i,j]],warning=False)
                #try:
                #    slvUstar[i,j],slvH[i,j],slvL[i,j] = broyden2(\
                #            l.stabFunc,[ustar_i[i,j],H_i[i,j],L_i[i,j]],iter=40,verbose=False)
                #except(OverflowError):
                #    slvUstar[i,j] = ustar_i[i,j]
                #    slvH[i,j] = H_i[i,j]
                #    slvL[i,j] = L_i[i,j]

                pbar.update(myproj.gridNb[1]*i+j)

        # add some stats
        l.getStats(slvUstar,'slvUstar')
        l.getStats(slvH,'slvH')
        l.getStats(slvL,'slvL')
        l.getStats(iterator,'iterator')
        pbar.finish()

    else:
        # 2010-02-05: TMP TMP TMP TMP TMP TMP TMP TMP TMP TMP TMP TMP TMP TMP TMP TMP TMP TMP TMP TMP TMP TMP
        slvUstar = ustar_i
        slvH = H_i
        slvL = L_i

        # TMP TMP TMP TMP TMP TMP TMP TMP TMP TMP TMP TMP TMP TMP TMP TMP TMP TMP TMP TMP TMP TMP TMP TMP TMP

    # Low resolution at-limit parameters
    low_Ld = l.L(cp,low_delta_a,g,k,Le_i,low_rho,low_Rn,low_G0,slvUstar,state='dry')
    low_Lw = l.L(cp,low_delta_a,g,k,Le_i,low_rho,low_Rn,low_G0,slvUstar,state='wet')
    low_Cwd = l.Cw(low_hr,low_Ld,low_z0h,low_z0m)
    low_Cww = l.Cw(low_hr,low_Lw,low_z0h,low_z0m)
    low_Cw = l.Cw(low_hr,slvL,low_z0h,low_z0m)
    l.getStats(low_Ld,'low_Ld')
    l.getStats(low_Lw,'low_Lw')
    l.getStats(low_Cwd,'low_Cwd')
    l.getStats(low_Cww,'low_Cww')

    # Upscaling
    widgets = [' Upscaling:               ', progressBar.Percentage(), ' ', progressBar.Bar(marker='-',left='[',right=']'),
                       ' ', ' ', ' ', ' ']
    pbar = progressBar.ProgressBar(widgets=widgets, maxval=6).start()
    process_logger.info("Launching Upscaling")

    # TMP TMP TMP TMP TMP TMP TMP TMP TMP TMP TMP TMP TMP TMP TMP TMP TMP TMP TMP TMP TMP TMP TMP TMP TMP
    low_rho = l.nan2flt(low_rho,n.nansum(low_rho)/low_rho.size)
    slvUstar = l.nan2flt(slvUstar,n.nansum(slvUstar)/slvUstar.size)
    low_Cwd = l.nan2flt(low_Cwd,n.nansum(low_Cwd)/low_Cwd.size)
    low_Cww = l.nan2flt(low_Cww,n.nansum(low_Cww)/low_Cww.size)
    low_Cw = l.nan2flt(low_Cw,n.nansum(low_Cw)/low_Cw.size)
    slvL = l.nan2flt(slvL,n.nansum(slvL)/slvL.size)
    # TMP TMP TMP TMP TMP TMP TMP TMP TMP TMP TMP TMP TMP TMP TMP TMP TMP TMP TMP TMP TMP TMP TMP TMP TMP

    rho = congrid.congrid(low_rho,[myproj.imgDims[0],myproj.imgDims[1]],method='spline',minusone=True)
    pbar.update(1)
    ustar = congrid.congrid(slvUstar,[myproj.imgDims[0],myproj.imgDims[1]],method='spline',minusone=True)
    pbar.update(2)
    Cwd = congrid.congrid(low_Cwd,[myproj.imgDims[0],myproj.imgDims[1]],method='spline',minusone=True)
    pbar.update(3)
    Cww = congrid.congrid(low_Cww,[myproj.imgDims[0],myproj.imgDims[1]],method='spline',minusone=True)
    pbar.update(4)
    Cw = congrid.congrid(low_Cw,[myproj.imgDims[0],myproj.imgDims[1]],method='spline',minusone=True)
    pbar.update(5)
    L = congrid.congrid(slvL,[myproj.imgDims[0],myproj.imgDims[1]],method='spline',minusone=True)
    pbar.update(6)

    pbar.finish()

    # External resistances and gradients
    widgets = [' Processing SEBI:         ', progressBar.Percentage(), ' ', progressBar.Bar(marker='-',left='[',right=']'),
                       ' ', ' ', ' ', ' ']
    pbar = progressBar.ProgressBar(widgets=widgets, maxval=16).start()

    process_logger.info("Launching external resistances and gradients")

    d0 = l.z0m2d0(z0m)
    es = l.esat(es0,0.5*(ts+tr))
    e = l.eact(0.5*(pg+pr),0.5*(qg+qr),rd,rv)
    delta = l.delta(es,0.5*(ts+tr))
    pbar.update(1)

    # dry limit
    red = l.re(Cwd,d0,hr,k,ustar,z0h)
    deltad = l.deltad(cp,red,rho,Rn,G0)
    l.getStats(Cwd,'Cwd')
    l.getStats(red,'red')
    l.getStats(deltad,'deltad')
    pbar.update(2)

    # wet limit
    rew = l.re(Cww,d0,hr,k,ustar,z0h)
    deltaw = l.deltaw(cp,delta,e,es,gamma,rew,rho,Rn,G0)
    l.getStats(Cww,'Cww')
    l.getStats(rew,'rew')
    l.getStats(deltaw,'deltaw')
    pbar.update(3)

    # actual conditions
    Cw = l.Cw(hr,L,z0h,z0m)
    pbar.update(4)

    re = l.re(Cw,d0,hr,k,ustar,z0h)
    pbar.update(5)

    deltaa = l.tpot(cp,pg,p0,qg,rd,ts) - l.tpot(cp,pr,p0,qr,rd,tr)
    pbar.update(6)

    SEBI = (deltaa/re - deltaw/rew) / (deltad/red - deltaw/rew)
    pbar.update(7)

    ef = 1 - SEBI
    pbar.update(8)

    search = n.where(ef > 1.)
    ef[search] = 1.
    pbar.update(9)

    LE = (Rn-G0)*ef
    H = Rn - LE - G0
    pbar.update(10)

    # relative evap (alternate)
    Hd = Rn - G0
    Hw = ((Rn - G0) - (rho*cp / rew) * es / gamma) / (1.0 + delta / gamma)
    Ha = rho*cp * deltaa / re
    pbar.update(11)

    search = n.where(Ha>Hd)
    Ha[search] = Hd[search]
    pbar.update(12)

    search = n.where(Ha<Hw)
    Ha[search] = Hw[search]
    pbar.update(13)

    Le_re = 1. - ((Ha - Hw) / (Hd - Hw))

    search = n.where(Hd<=Hw)
    Le_re[search] = 1.
    pbar.update(14)

    Le_fr = Le_re * (Rn - G0 - Hw) / (Rn - G0)

    search = n.where((Rn-G0)<=0)
    Le_fr[search] = 1.
    pbar.update(15)

    if myproj.RnDaily != 'undef':
        ETdaily = Le_fr * (RnDaily * (1 - G0_Rn)) * 24*3600 / 2.45E6

    pbar.update(16)
    pbar.finish()

    widgets = [' Output statistics        ', progressBar.Percentage(), ' ', progressBar.Bar(marker='-',left='[',right=']'),
                       ' ', ' ', ' ', ' ']
    pbar = progressBar.ProgressBar(widgets=widgets, maxval=12).start()

    l.getStats(L,'L')
    pbar.update(1)
    l.getStats(Cw,'Cw')
    pbar.update(2)
    l.getStats(re,'re')
    pbar.update(3)
    l.getStats(deltaa,'deltaa')
    pbar.update(4)
    l.getStats(SEBI,'SEBI')
    pbar.update(5)
    l.getStats(ef,'ef')
    pbar.update(6)
    l.getStats(Ha,'Ha')
    pbar.update(7)
    l.getStats(Hd,'Hd')
    pbar.update(8)
    l.getStats(Hw,'Hw')
    pbar.update(9)
    l.getStats(Le_re,'Le_re')
    pbar.update(10)
    l.getStats(Le_fr,'Le_fr')
    pbar.update(11)

    # Stats for daily ET only if variable exist (may not be calculated if no daily Rn provided)
    try:
        l.getStats(ETdaily,'ETdaily')
    except UnboundLocalError :
        pass
    
    pbar.update(12)
    pbar.finish()



    # Saving to files
    widgets = [' Saving to files:         ', progressBar.Percentage(), ' ', progressBar.Bar(marker='-',left='[',right=']'),
                       ' ', ' ', ' ', ' ']
    pbar = progressBar.ProgressBar(widgets=widgets, maxval=8).start()

    # Data layers:
    #ascIO.ascWritter(myproj.path+myproj.prefix+'ef',
    #                 ef,myproj.imgDims[1],myproj.imgDims[0],xllcorner=myproj.xllcorner,yllcorner=myproj.yllcorner,cellsize=myproj.cellsize,NODATA_value='nan')
    myproj.writeRaw(ef,'ef')
    pbar.update(1)

    #ascIO.ascWritter(myproj.path+myproj.prefix+'kb',
    #                 kB_1,myproj.imgDims[1],myproj.imgDims[0],xllcorner=myproj.xllcorner,yllcorner=myproj.yllcorner,cellsize=myproj.cellsize,NODATA_value='nan')
    myproj.writeRaw(kB_1,'kb')
    pbar.update(2)

    #ascIO.ascWritter(myproj.path+myproj.prefix+'LE',
    #                 LE,myproj.imgDims[1],myproj.imgDims[0],xllcorner=myproj.xllcorner,yllcorner=myproj.yllcorner,cellsize=myproj.cellsize,NODATA_value='nan')
    myproj.writeRaw(LE,'LE')
    pbar.update(3)

    #ascIO.ascWritter(myproj.path+myproj.prefix+'H',
    #                 H,myproj.imgDims[1],myproj.imgDims[0],xllcorner=myproj.xllcorner,yllcorner=myproj.yllcorner,cellsize=myproj.cellsize,NODATA_value='nan')
    myproj.writeRaw(H,'H')
    pbar.update(4)

    #ascIO.ascWritter(myproj.path+myproj.prefix+'Rn',
    #                 Rn,myproj.imgDims[1],myproj.imgDims[0],xllcorner=myproj.xllcorner,yllcorner=myproj.yllcorner,cellsize=myproj.cellsize,NODATA_value='nan')
    myproj.writeRaw(Rn,'Rn')
    pbar.update(5)

    #ascIO.ascWritter(myproj.path+myproj.prefix+'G0',
    #                 G0,myproj.imgDims[1],myproj.imgDims[0],xllcorner=myproj.xllcorner,yllcorner=myproj.yllcorner,cellsize=myproj.cellsize,NODATA_value='nan')
    myproj.writeRaw(G0,'G0')
    pbar.update(6)

    myproj.writeRaw(Le_fr,'Le_fr')
    pbar.update(7)

    if myproj.RnDaily != 'undef':
        myproj.writeRaw(ETdaily,'ETdaily')

    pbar.update(8)

    pbar.finish()

    print 'Done...'
        
    #except Exception, err:
        #log.exception('Error from process():')
        #myproj.logs += log.exception('Error from process():')
        #process_logger.info("ERROR: process aborted")
        
    #finally:
    print "Check sebi-cf.log for more information."
コード例 #45
0
ファイル: geomap_5kmto1km.py プロジェクト: CMDA-CMU/CMDA
    def geomap_5kmto1km(self, geodata_5km):

	#print >> self.out, 'in geomap_5kmto1km() ...'
	#print >> self.out, 'geodata_5km: ', geodata_5km

	LowResDims = geodata_5km.shape
	#print >> self.out, 'LowResDims[0]: ', LowResDims[0], ', LowResDims[1]: ', LowResDims[1]
	ResolutionFactor = 5   # 5km res -> 1km res
	first_col_1km =	2      # first SDS pixel 5km has a coordinate (2,2) from 1km data
	first_row_1km =	2

	if self.Cell_Along_Swath_5km == 0:
	    self.Cell_Along_Swath_5km =	LowResDims[0]   # 406

	if self.Cell_Across_Swath_5km == 0:
	    self.Cell_Across_Swath_5km = LowResDims[1]   # 270

	if self.Cell_Along_Swath_1km == 0:
	    self.Cell_Along_Swath_1km =	Cell_Along_Swath_5km*5   # 2030

	if self.Cell_Across_Swath_1km == 0:
	    self.Cell_Across_Swath_1km = Cell_Across_Swath_5km*5 + 4   # 1354

	#print >> self.out, 'self.Cell_Along_Swath_5km: ', self.Cell_Along_Swath_5km
	#print >> self.out, 'self.Cell_Across_Swath_5km: ', self.Cell_Across_Swath_5km
	#print >> self.out, 'self.Cell_Along_Swath_1km: ', self.Cell_Along_Swath_1km
	#print >> self.out, 'self.Cell_Across_Swath_1km: ', self.Cell_Across_Swath_1km

	"""
	print >> self.out, 'a row: '
	for i  in range(self.Cell_Across_Swath_5km):
	    print >> self.out, geodata_5km[0, i]
	"""


	# two beginning columns
	begin_col_exp = 2 * geodata_5km[:, 0] - geodata_5km[:, 1]
	#print >> self.out, 'len(begin_col_exp): ', len(begin_col_exp)
	### print >> self.out, 'begin_col_exp: ', begin_col_exp

	# two end columns
	end_col_exp = 2 * geodata_5km[:, self.Cell_Across_Swath_5km-1] - geodata_5km[:, self.Cell_Across_Swath_5km-2]
	### print >> self.out, 'end_col_exp: ', end_col_exp

	end_col_exp_rest = 2 * end_col_exp - geodata_5km[:, self.Cell_Across_Swath_5km-1]
	### print >> self.out, 'end_col_exp_rest: ', end_col_exp_rest

	# a larger array for temp use
	exp_geodata_5km = N.array([0.0]*(self.Cell_Across_Swath_5km + 3)*self.Cell_Along_Swath_5km)
	exp_geodata_5km = exp_geodata_5km.reshape(self.Cell_Along_Swath_5km, (self.Cell_Across_Swath_5km + 3))
	### print >> self.out, 'exp_geodata_5km: ', exp_geodata_5km
	exp_geodata_5km[:, 0] = begin_col_exp
	exp_geodata_5km[:, 1:self.Cell_Across_Swath_5km+1] = geodata_5km
	### print >> self.out, '1. exp_geodata_5km: ', exp_geodata_5km

	exp_geodata_5km[:, self.Cell_Across_Swath_5km + 1] = end_col_exp
	exp_geodata_5km[:, self.Cell_Across_Swath_5km + 2] = end_col_exp_rest
	### print >> self.out, '2. exp_geodata_5km: ', exp_geodata_5km


	Internal_geodata_5km = exp_geodata_5km.copy()

	begin_row_exp =	2 * Internal_geodata_5km[0, :] - Internal_geodata_5km[1, :]
	end_row_exp = 2 * Internal_geodata_5km[self.Cell_Along_Swath_5km-1, :] - Internal_geodata_5km[self.Cell_Along_Swath_5km-2, :]

	exp_geodata_5km	= N.array([0.0]*((self.Cell_Across_Swath_5km + 3)*(self.Cell_Along_Swath_5km + 2)))
	exp_geodata_5km = exp_geodata_5km.reshape((self.Cell_Along_Swath_5km+2), (self.Cell_Across_Swath_5km + 3))
	exp_geodata_5km[0, :] =	begin_row_exp
	exp_geodata_5km[1:self.Cell_Along_Swath_5km+1, :] = Internal_geodata_5km
	exp_geodata_5km[self.Cell_Along_Swath_5km+1, :] = end_row_exp

	### print >> self.out, '3. exp_geodata_5km: ', exp_geodata_5km

	Internal_geodata_5km = exp_geodata_5km.copy()
	#print >> self.out, 'Internal_geodata_5km: ', Internal_geodata_5km
	dims5km = Internal_geodata_5km.shape
	#print >> self.out, 'Internal_geodata_5km.shape: ', dims5km

	Expand_cell_Along_Swath_1km = ( self.Cell_Along_Swath_5km + 1 ) * ResolutionFactor + 1
	Expand_Cell_Across_Swath_1km = ( self.Cell_Across_Swath_5km + 2 ) * ResolutionFactor + 1

	dims1km = N.array([0,0])

	dims1km[0] = Expand_cell_Along_Swath_1km
	dims1km[1] = Expand_Cell_Across_Swath_1km

	Expand_geodata_1km = congrid.congrid(Internal_geodata_5km, dims1km)

	geodata_1km = Expand_geodata_1km[(first_row_1km+1):self.Cell_Along_Swath_1km+first_row_1km, \
					    (first_col_1km+1):self.Cell_Across_Swath_1km+first_col_1km]
	#print >> self.out, 'geodata_1km: ', geodata_1km
	#print >> self.out, 'geodata_1km.shape: ', geodata_1km.shape

	return geodata_1km
コード例 #46
0
def process_single_filter(data,
                          lcdata,
                          filname,
                          fil_index,
                          output_dir,
                          image_filelabel,
                          image_suffix,
                          eff_lambda_microns,
                          lim=None,
                          minz=None):

    data = copy.copy(data)

    print('Processing:  ', filname)
    full_npix = data['full_npix'][0]
    pixsize_arcsec = data['pixsize_arcsec'][0]
    n_galaxies = data['full_npix'].shape[0]

    if filname.find('WFI') == 0:
        fn = filname[-4:]
        filname = 'wfirst/wfidrm15_' + fn

    try:
        pbi = filters_to_analyze == filname
        this_psf_file = os.path.join(psf_dir, psf_names[pbi][0])
        this_psf_pixsize_arcsec = psf_pix_arcsec[pbi][0]
        this_psf_fwhm = psf_fwhm[pbi][0]
        this_photfnu_Jy = photfnu_Jy[pbi][0]
        print('PSF info: ', this_psf_file, this_psf_pixsize_arcsec,
              this_psf_fwhm, this_photfnu_Jy)
        do_psf = True
    except:
        print('Missing filter info, skipping PSF: ', filname)
        do_psf = False
        this_psf_pixsize_arcsec = pixsize_arcsec
        #return None

    desired_pixsize_arcsec = this_psf_pixsize_arcsec

    full_fov = full_npix * pixsize_arcsec

    desired_npix = full_fov / desired_pixsize_arcsec

    print('Orig pix: ', full_npix, ' Desired pix: ', desired_npix)

    if do_psf is True:
        orig_psf_kernel = pyfits.open(this_psf_file)[0].data

        #psf kernel shape must be odd for astropy.convolve??
        if orig_psf_kernel.shape[0] % 2 == 0:
            new_psf_shape = orig_psf_kernel.shape[0] - 1
            psf_kernel = congrid.congrid(orig_psf_kernel,
                                         (new_psf_shape, new_psf_shape))
        else:
            psf_kernel = orig_psf_kernel

        assert (psf_kernel.shape[0] % 2 != 0)

    image_cube = np.zeros((full_npix, full_npix), dtype=np.float64)

    success = []
    mag = []

    #for bigger files, may need to split by filter first
    index = np.arange(n_galaxies)

    for pos_i, pos_j, origin_i, origin_j, run_dir, this_npix, this_z, num in zip(
            data['pos_i'], data['pos_j'], data['origin_i'], data['origin_j'],
            data['run_dir'], data['this_npix'], data['z'], index):
        if lim is not None:
            if num > lim:
                success.append(False)
                mag.append(99.0)
                continue
        if minz is not None:
            if this_z < minz:
                success.append(False)
                mag.append(99.0)
                continue

        try:
            bblist = pyfits.open(os.path.join(run_dir, 'broadbandz.fits'))
            this_cube = bblist['CAMERA0-BROADBAND-NONSCATTER'].data
            this_mag = ((
                bblist['FILTERS'].data)['AB_mag_nonscatter0'])[fil_index]

            bblist.close()

            #if catalog and image have different npix, this is a failure somewhere
            cube_npix = this_cube.shape[-1]
            assert (cube_npix == this_npix)

            success.append(True)
            mag.append(this_mag)
        except:
            print('Missing file or mismatched shape, ', run_dir, this_npix)
            success.append(False)
            mag.append(99.0)
            continue

        i_tc = 0
        j_tc = 0
        i_tc1 = this_npix
        j_tc1 = this_npix

        if origin_i < 0:
            i0 = 0
            i_tc = -1 * origin_i
        else:
            i0 = origin_i

        if origin_j < 0:
            j0 = 0
            j_tc = -1 * origin_j
        else:
            j0 = origin_j

        if i0 + this_npix > full_npix:
            i1 = full_npix
            i_tc1 = full_npix - i0  #this_npix - (i0+this_npix-full_npix)
        else:
            i1 = i0 + this_npix - i_tc

        if j0 + this_npix > full_npix:
            j1 = full_npix
            j_tc1 = full_npix - j0
        else:
            j1 = j0 + this_npix - j_tc

        sub_cube1 = image_cube[i0:i1, j0:j1]
        this_subcube = this_cube[fil_index, i_tc:i_tc1, j_tc:j_tc1]
        print(run_dir, this_subcube.shape)

        image_cube[i0:i1, j0:j1] = sub_cube1 + this_subcube

    #convolve here

    #first, re-grid to desired scale

    new_image = congrid.congrid(image_cube, (desired_npix, desired_npix))
    new_i = data['pos_i'] * desired_npix / full_npix
    new_j = data['pos_j'] * desired_npix / full_npix

    pixel_Sr = (desired_pixsize_arcsec**
                2) / sq_arcsec_per_sr  #pixel area in steradians:  Sr/pixel
    to_nJy_per_Sr = (1.0e9) * (1.0e14) * (eff_lambda_microns**
                                          2) / c  #((pixscale/206265.0)^2)*
    #sigma_nJy = 0.3*(2.0**(-0.5))*((1.0e9)*(3631.0/5.0)*10.0**(-0.4*self.maglim))*self.Pix_arcsec*(3.0*self.FWHM_arcsec)
    to_nJy_per_pix = to_nJy_per_Sr * pixel_Sr

    nopsf_im = new_image * to_nJy_per_pix

    if do_psf is True:
        conv_im = convolve_fft(new_image,
                               psf_kernel,
                               boundary='fill',
                               fill_value=0.0,
                               normalize_kernel=True,
                               allow_huge=True)
        final_im = conv_im * to_nJy_per_pix

    outname = os.path.join(
        output_dir, image_filelabel + '_' + filname.replace('/', '-') + '_' +
        image_suffix + '_v1_lightcone.fits')
    print('saving:', outname)

    primary_hdu = pyfits.PrimaryHDU(nopsf_im)
    primary_hdu.header['FILTER'] = filname.replace('/', '-')
    primary_hdu.header['PIXSIZE'] = (desired_pixsize_arcsec, 'arcsec')
    primary_hdu.header['UNIT'] = ('nanoJanskies', 'per pixel')
    abzp = -2.5 * (-9.0) + 2.5 * np.log10(3631.0)  #images in nanoJanskies
    primary_hdu.header['ABZP'] = (abzp, 'AB mag zeropoint')
    if do_psf is True:
        primary_hdu.header['PHOTFNU'] = (this_photfnu_Jy,
                                         'Jy; approx flux[Jy] at 1 count/sec')

    primary_hdu.header['EXTNAME'] = 'IMAGE_NOPSF'

    if do_psf is True:
        psfim_hdu = pyfits.ImageHDU(final_im)
        psfim_hdu.header['EXTNAME'] = 'IMAGE_PSF'

        psf_hdu = pyfits.ImageHDU(psf_kernel)
        psf_hdu.header['EXTNAME'] = 'MODELPSF'
        psf_hdu.header['PIXSIZE'] = (desired_pixsize_arcsec, 'arcsec')

    if np.sum(np.asarray(data.colnames) == 'success') == 0:
        newcol = astropy.table.column.Column(data=success, name='success')
        data.add_column(newcol)

    if np.sum(np.asarray(data.colnames) == 'new_i') == 0:
        newicol = astropy.table.column.Column(data=new_i, name='new_i')
        newjcol = astropy.table.column.Column(data=new_j, name='new_j')

        data.add_column(newicol)
        data.add_column(newjcol)

    magcol = astropy.table.column.Column(data=mag,
                                         name='AB_absmag_' +
                                         filname.replace('/', '-'))
    data.add_column(magcol)

    data_df = data.to_pandas()
    lc_df = lcdata.to_pandas()
    lc_df.rename(columns=lcfile_cols, inplace=True)

    assert (lc_df.shape[0] == data_df.shape[0])

    new_df = lc_df.join(data_df)

    failures = new_df.where(new_df['success'] == False).dropna()
    successes = new_df.drop(failures.index)
    print('N successes: ', successes.shape[0])

    new_data = astropy.table.Table.from_pandas(successes)

    table_hdu = pyfits.table_to_hdu(new_data)
    table_hdu.header['EXTNAME'] = 'Catalog'

    if do_psf is True:
        output_list = pyfits.HDUList(
            [primary_hdu, psfim_hdu, psf_hdu, table_hdu])
    else:
        output_list = pyfits.HDUList([primary_hdu, table_hdu])

    tempfile = os.path.join(os.path.expandvars('/scratch/$USER/$SLURM_JOBID'),
                            os.path.basename(outname))
    print('saving to scratch first.. , ', tempfile)
    output_list.writeto(tempfile, overwrite=True)
    output_list.close()

    shutil.copy(tempfile, output_dir)

    return success
コード例 #47
0
def step_through_images(
    iim,
    vim,
    bim,
    iom,
    vom,
    bom,
    cli,
    clv,
    clb,
    star1i,
    star1v,
    star1b,
    star2i,
    star2v,
    star2b,
    star3i,
    star3v,
    star3b,
    war1,
    war2,
    war3,
    psfi,
    psfv,
    psfb,
):

    # scaling variables to add 'real' bright stars...
    war1 = war1 * 0.25
    nar1 = 1 - war1
    war2 = war2 * 0.25
    nar2 = 1 - war2
    war3 = war3 * 0.25
    nar3 = 1 - war3

    # in sim cluster gals
    # vflux = 0.440 iflux
    # bflux = 0.105 iflux
    # in SPT obs of z=0.55 cluster in gri
    # vflux = 1.09  iflux
    # bflux = 0.054 iflux
    # print np.shape(bom),np.shape(bim),np.shape(clb)

    # hence adjust summed gri frames to match spt fluxes << ***
    # here are adding simmed cluster gals with scaling, to lensed images
    skyi = 2.5515003687
    sigi = 6.69451075049
    skyv = 3.43434568331
    sigv = 8.14642639767
    skyb = 0.661582802206
    sigb = 2.38341579459

    save_to_jpeg(iim * 5, vim * 5, bim * 5, 2048, skyi, sigi, skyv, sigv, skyb, sigb, "slides_0.jpg")

    igm = iim + cli * 1.00
    vgm = vim + clv * 2.48
    bgm = bim + clb * 0.52

    save_to_jpeg(igm * 5, vgm * 5, bgm * 5, 2048, skyi, sigi, skyv, sigv, skyb, sigb, "slides_1.jpg")

    # igm=igm+iom[2048-1024:2048+1024,2048-1024:2048+1024]
    # vgm=vgm+vom[2048-1024:2048+1024,2048-1024:2048+1024]
    # bgm=bgm+bom[2048-1024:2048+1024,2048-1024:2048+1024]

    # save_to_jpeg(igm*5,vgm*5,bgm*5,2048,skyi,sigi,skyv,sigv,skyb,sigb,"slides_2.jpg")

    # bgm=bim[2048-1024:2048+1024,2048-1024:2048+1024]+clb*0.52
    # vgm=vim[2048-1024:2048+1024,2048-1024:2048+1024]+clv*2.48
    # igm=iim[2048-1024:2048+1024,2048-1024:2048+1024]+cli*1.00

    # bgm=clb*0.52
    # vgm=clv*2.48
    # igm=cli*1.00

    # then convolve with moffat profile...

    bgm = scipy.signal.fftconvolve(bgm, psfb, mode="same")
    vgm = scipy.signal.fftconvolve(vgm, psfv, mode="same")
    igm = scipy.signal.fftconvolve(igm, psfi, mode="same")

    # save_to_jpeg(igm*5,vgm*5,bgm*5,2048,skyi,sigi,skyv,sigv,skyb,sigb,"slides_30.jpg")

    # arbitrary scaling? why? no idea...
    bgm = bgm * 5.0
    vgm = vgm * 5.0
    igm = igm * 5.0

    # rebin to desired pixel scale
    bgm = congrid.congrid(bgm, [1174, 1174])
    vgm = congrid.congrid(vgm, [1174, 1174])
    igm = congrid.congrid(igm, [1174, 1174])

    save_to_jpeg(igm, vgm, bgm, 1174, skyi, sigi, skyv, sigv, skyb, sigb, "slides_3.jpg")

    # NOW, add noise to each image to get desired noise scaling
    # scaling comes from measureing real SPT image
    nb = 4.59
    nv = 15.02
    ni = 12.3

    bgm = bgm + np.random.normal(0, 1, (1174L, 1174L)) * nb * 0.95
    vgm = vgm + np.random.normal(0, 1, (1174L, 1174L)) * nv * 0.95
    igm = igm + np.random.normal(0, 1, (1174L, 1174L)) * ni * 0.95

    save_to_jpeg(igm, vgm, bgm, 1174, skyi, sigi, skyv, sigv, skyb, sigb, "slides_4.jpg")

    # adds 'real' star images
    x1 = 200
    y1 = 1000

    bgm[x1 - 110 : x1 + 110, y1 - 110 : y1 + 110] = star1b * war1 + nar1 * bgm[x1 - 110 : x1 + 110, y1 - 110 : y1 + 110]
    vgm[x1 - 110 : x1 + 110, y1 - 110 : y1 + 110] = star1v * war1 + nar1 * vgm[x1 - 110 : x1 + 110, y1 - 110 : y1 + 110]
    igm[x1 - 110 : x1 + 110, y1 - 110 : y1 + 110] = star1i * war1 + nar1 * igm[x1 - 110 : x1 + 110, y1 - 110 : y1 + 110]

    x1 = 250
    y1 = 550

    bgm[x1 - 45 : x1 + 45, y1 - 45 : y1 + 45] = star2b * war2 + nar2 * bgm[x1 - 45 : x1 + 45, y1 - 45 : y1 + 45]
    vgm[x1 - 45 : x1 + 45, y1 - 45 : y1 + 45] = star2v * war2 + nar2 * vgm[x1 - 45 : x1 + 45, y1 - 45 : y1 + 45]
    igm[x1 - 45 : x1 + 45, y1 - 45 : y1 + 45] = star2i * war2 + nar2 * igm[x1 - 45 : x1 + 45, y1 - 45 : y1 + 45]

    x1 = 900
    y1 = 400

    igm[x1 - 43 : x1 + 43, y1 - 43 : y1 + 43] = star3i * war3 + nar3 * igm[x1 - 43 : x1 + 43, y1 - 43 : y1 + 43]
    vgm[x1 - 43 : x1 + 43, y1 - 43 : y1 + 43] = star3v * war3 + nar3 * vgm[x1 - 43 : x1 + 43, y1 - 43 : y1 + 43]
    bgm[x1 - 43 : x1 + 43, y1 - 43 : y1 + 43] = star3b * war3 + nar3 * bgm[x1 - 43 : x1 + 43, y1 - 43 : y1 + 43]

    ##add a little more noise to 'smooth' real stars into background
    # iim=iim+np.random.normal(0.0,1.0,(1174L,1174L))*ni/4.0
    # vim=vim+np.random.normal(0.0,1.0,(1174L,1174L))*nv/4.0
    # bim=bim+np.random.normal(0.0,1.0,(1174L,1174L))*nb/4.0

    save_to_jpeg(igm, vgm, bgm, 1174, skyi, sigi, skyv, sigv, skyb, sigb, "slides_5.jpg")

    igm = igm + np.random.normal(0.0, 1.0, (1174L, 1174L)) * ni / 2.0
    vgm = vgm + np.random.normal(0.0, 1.0, (1174L, 1174L)) * nv / 2.0
    bgm = bgm + np.random.normal(0.0, 1.0, (1174L, 1174L)) * nb / 2.0

    save_to_jpeg(igm, vgm, bgm, 1174, skyi, sigi, skyv, sigv, skyb, sigb, "slides_6.jpg")

    return igm, vgm, bgm
コード例 #48
0
def do_jwst_illustris(fieldstr, alph, Q, rf, gf, bf, x=None, y=None, n=None):

    #in nJy
    ui = 'IMAGE_PSF'

    fielda_f435 = fits.open(
        hlsp_dir + 'mag30-' + fieldstr +
        '-11-10/hlsp_misty_illustris_hst-acs_f435w_mag30-' + fieldstr +
        '-11-10_v1_lightcone.fits')[ui].data
    fielda_f606 = fits.open(
        hlsp_dir + 'mag30-' + fieldstr +
        '-11-10/hlsp_misty_illustris_hst-acs_f606w_mag30-' + fieldstr +
        '-11-10_v1_lightcone.fits')[ui].data
    fielda_f775 = fits.open(
        hlsp_dir + 'mag30-' + fieldstr +
        '-11-10/hlsp_misty_illustris_hst-acs_f775w_mag30-' + fieldstr +
        '-11-10_v1_lightcone.fits')[ui].data

    fielda_f090 = fits.open(
        hlsp_dir + 'mag30-' + fieldstr +
        '-11-10/hlsp_misty_illustris_jwst-nircam_f090w_mag30-' + fieldstr +
        '-11-10_v1_lightcone.fits')[ui].data

    fielda_f115 = fits.open(
        hlsp_dir + 'mag30-' + fieldstr +
        '-11-10/hlsp_misty_illustris_jwst-nircam_f115w_mag30-' + fieldstr +
        '-11-10_v1_lightcone.fits')[ui].data
    fielda_f150 = fits.open(
        hlsp_dir + 'mag30-' + fieldstr +
        '-11-10/hlsp_misty_illustris_jwst-nircam_f150w_mag30-' + fieldstr +
        '-11-10_v1_lightcone.fits')[ui].data
    fielda_f200 = fits.open(
        hlsp_dir + 'mag30-' + fieldstr +
        '-11-10/hlsp_misty_illustris_jwst-nircam_f200w_mag30-' + fieldstr +
        '-11-10_v1_lightcone.fits')[ui].data

    fielda_f277 = fits.open(
        hlsp_dir + 'mag30-' + fieldstr +
        '-11-10/hlsp_misty_illustris_jwst-nircam_f277w_mag30-' + fieldstr +
        '-11-10_v1_lightcone.fits')[ui].data
    fielda_f356 = fits.open(
        hlsp_dir + 'mag30-' + fieldstr +
        '-11-10/hlsp_misty_illustris_jwst-nircam_f356w_mag30-' + fieldstr +
        '-11-10_v1_lightcone.fits')[ui].data
    fielda_f444 = fits.open(
        hlsp_dir + 'mag30-' + fieldstr +
        '-11-10/hlsp_misty_illustris_jwst-nircam_f444w_mag30-' + fieldstr +
        '-11-10_v1_lightcone.fits')[ui].data

    ra = rf * (0.50 * fielda_f150 + 0.50 * fielda_f200)
    ga = gf * (0.50 * fielda_f090 + 0.50 * fielda_f115)
    ba = bf * (0.33 * fielda_f435 + 0.33 * fielda_f606 + 0.33 * fielda_f775)

    gnew = congrid.congrid(ga, ra.shape) * 1.0  #preserve surface brightness
    bnew = congrid.congrid(ba, ra.shape) * 1.0

    print(fielda_f435.shape)
    print(ra.shape, ga.shape, gnew.shape)

    if n is not None:
        rgb_field = make_color_image.make_interactive_nasa(
            bnew[x:x + n, y:y + n], gnew[x:x + n, y:y + n],
            ra[x:x + n, y:y + n], alph, Q)
    else:
        rgb_field = make_color_image.make_interactive_nasa(
            bnew, gnew, ra, alph, Q)

    f1 = pyplot.figure(figsize=(10.0, 10.0), dpi=600)
    pyplot.subplots_adjust(left=0.0,
                           right=1.0,
                           bottom=0.0,
                           top=1.0,
                           wspace=0.0,
                           hspace=0.0)

    axi = f1.add_subplot(111)
    axi.imshow(rgb_field,
               interpolation='nearest',
               aspect='auto',
               origin='lower')

    if n is not None:
        saven = 'illustris_jwstzoom_' + fieldstr + '.pdf'
    else:
        saven = 'illustris_jwst_' + fieldstr + '.pdf'

    f1.savefig(saven, dpi=600)
    pyplot.close(f1)
コード例 #49
0
ファイル: myDetector.py プロジェクト: comptech/atrex
    def testCalibration (self, myim) :
    #def detector_calibration_test_points (self) :
        en = 37.077
        cut = 30.
        dist_tol = 1.8
        IovS = float(self.topLevel.ui.det_snrLE.text())
        start_dist = self.dist - self.dist * 0.5
        end_dist = self.dist + self.dist *.5
        im = myim.imArray.astype(np.int64)
        imarr = cgd.congrid (im, [500, 500], method='nearest',minusone=True).astype(np.int64)
        zarr = np.zeros ((500,500),dtype=np.uint8)

        bg = self.local_background (imarr)
        #imarr.tofile ("/home/harold/imarr.dat")
        # only for debug

        hpf = imarr / bg.astype(np.int64)

        self.ff = np.where ((hpf > IovS) & (imarr>20.))

        nn = len(self.ff[0])
        print 'number of pixels meeting the peak condition is %d'%(nn)


        ### equal proximity coarse search
        # in 5 pixel steps
        h = np.zeros((100,100), dtype=np.float32)
        for i in range (100) :
            print i
            for j in range (100) :
                dist = self.compdist (self.ff, [i*5,j*5])
                mx = int(dist.max())+1
                mn = int(dist.min())

                #nbins = int(dist.max() - dist.min()+1)
                histo,edges = np.histogram(dist, range=[mn,mx],bins=(mx-mn))
                h[i,j] = np.max (histo)
        maxsub = np.argmax(h)
        maxrow = maxsub / 100
        maxcol = maxsub - maxrow * 100
        print maxsub, maxrow, maxcol

        self.eqprox[0]=maxrow/100.
        self.eqprox[1]=maxcol/100.

        # is this even being used
        dist = self.compdist (self.ff, [maxrow, maxcol])
        nbins = int (dist.max()-dist.min()+1)
        h = np.histogram(dist, bins=nbins)

        ### equal proximity seach fine (in 500 space)
        h = np.zeros((11,11),dtype=np.float32)
        for i in range (-5,6) :
            for j in range(-5,6) :
                # note in gse_ada , there is a 5 + in the index calculation
                dist = self.compdist(self.ff, [5.*maxrow+i, 5.*maxcol+j])
                nbins = int(dist.max() - dist.min() +1)
                mx = int(dist.max()+1)
                mn = int(dist.min())
                histo, edges = np.histogram(dist, range=[mn,mx],bins=mx-mn)
                h[i+5][j+5]=np.max(histo)
        maxsub = np.argmax (h)

        # then back in 100 space
        xy = self.xy_from_ind (11,11,maxsub)
        maxrow = maxrow + (xy[0] - 5)/5.
        maxcol = maxcol + (xy[1] - 5)/5.
        xy0 = [maxrow, maxcol]
        self.eqproxfine[0]=maxrow/100.
        self.eqproxfine[1]=maxcol/100.

        self.beamx = xy0[0]/100. * self.nopixx
        self.beamy = xy0[1]/100. * self.nopixy
        xy0[0]*=5.
        xy0[1]*=5.
        dist = self.compdist (self.ff, xy0)
        mn = int(dist.min())
        mx = int(dist.max()+1)
        nbins = mx-mn
        h,edges = np.histogram (dist, range=[mn,mx],bins=nbins)
        h1 = np.copy(h)
        #h = h[0]
        numH = len(h)

        while (np.max(h1)>cut) :
            i = np.argmax (h1)
            m = np.max(h1)
            h1[i] = 0.
            if (i > 0 and i < numH-1) :
                j = i - 1
                while (j >= 0) :
                    if (h1[j] > cut/2.) :
                        h[i] += h[j]
                        h[j] =0.
                        h1[j]=0.
                    else :
                        j = 0
                    j=j-1

                j=i+1
                while (j <= numH-1) :
                    if (h1[j]>cut/2.) :
                        h[i]+=h[j]
                        h[j]=0
                        h1[j]=0
                    else :
                        j=numH-1
                    j=j+1
        # NOTE - should be cut not cut/2.
        fh = np.where (h > cut)[0]
        numB = len(fh)
        # number of different rings with sufficient number of points
        rings = np.zeros(nn, dtype=np.int64)
        for i in range (nn) :
            c = np.absolute (np.subtract(dist[i],edges[fh]))
            ri = np.min (c)
            kk = np.argmin (c)
            if (ri < dist_tol) :
                rings[i] = kk
            else :
                rings[i] = -1


        nr = np.zeros(numB, dtype=np.int64)
        ds = np.zeros (numB, dtype=np.float32)
        for k in range (numB) :
            r = np.where(rings == k)[0]
            nr[k]= len(r)
        print "Classes Done ...\r\n"
        m = np.max(nr)
        print 'Max of nr is : %d'%(m)

        # x,y coords of points in ring
        self.rgx = np.zeros((numB,m), dtype=np.float32)
        self.rgy = np.zeros((numB,m), dtype=np.float32)
        self.rgN = np.zeros (numB,dtype=np.uint16)

        self.numRings = numB
        for k in range (numB) :
            r = np.where (rings == k)[0]
            ds[k] = np.mean(dist[r])*self.nopixx/500. * self.psizex
            print 'ds of %d is : %f'%(k, ds[k])
            #xya=self.xy_from_indArr(500,500,self.ff[r])
            self.rgy[k,0:nr[k]] = self.ff[0][r]/500.
            self.rgx[k,0:nr[k]] = self.ff[1][r]/500.
            self.rgN[k] = len(r)

        step = (end_dist - start_dist) / 1000.
        ddists = np.zeros((2,1000), dtype = np.float32)
        for i in range (1000) :
            thisstep = start_dist + i * step
            ddists[0][i] = thisstep
            ddists[1][i] = self.sum_closest_refs (ds, thisstep)
        aa=np.argmin (ddists[1][:])
        dst = ddists[0][aa]

        print 'Coarse estimated detector distance : %f'%(dst)


        # fine tune detector distance
        start_dist = dst- step*5.
        end_dist = dst + step * 5.
        step = (end_dist-start_dist) / 1000.
        for i in range (1000):
            ddists[0][i] = start_dist + i * step
            ddists[1][i] = self.sum_closest_refs (ds, ddists[0][i])
        aa=np.argmin (ddists[1][:])
        dst = ddists[0][aa]
        print 'Refined estimated detector distance : %f'%(dst)


        # use only peaks which match standard and are unique
        cr = np.zeros ((2,numB), dtype=np.float32)
        for i in range (numB) :
            cr[0][i]= self.closest_ref (ds[i], dst)
            cr[1][i]= self.closest_ref_d(ds[i], dst)

        X = self.rgx[0][0:nr[0]]*self.nopixx/500.
        Y = self.rgy[0][0:nr[0]]*self.nopixx/500.

        dspcc = np.ones(nr[0]) * cr[1][0]

        self.calPeaks.emit()