def getWeightedAvg(a, neighbourArray):
	kernSize = 2
	kernel = utils.gauss_kern(kernSize)	  
	pixVal = np.zeros((a.shape)) #initialising, pixel value for inpainting
	weightSum = np.zeros((a.shape)) #initialising, value for later normalisation
	#for pixels further away from the edges:
	maxNeighbours = np.max(neighbourArray)
	print np.max(neighbourArray)
	neighbours = ((-2, -2), (-2, -1), (-2, 0), (-2, 1), (-2, 2), (-1, -2), (-1, -1), (-1, 0), (-1, 1), (-1, 2), (0, -2), (0, -1), (0, 1), (0, 2), (1, -2), (1, -1), (1, 0), (1, 1), (1, 2), (2, -2), (2, -1), (2, 0), (2, 1), (2, 2))
	a_reduced = a[2:-2, 2:-2].copy()
	maxNMask = ma.make_mask_none((a.shape))
	
	maxNMask[np.where(neighbourArray == maxNeighbours)] = 1
	print a_reduced.shape
	for hor_shift,vert_shift in neighbours:
	    #if not np.any(a.mask): break
	    a_shifted = np.roll(a_reduced, shift = hor_shift, axis = 1)
	    a_shifted = np.roll(a_shifted, shift = vert_shift, axis = 0)
	    idx=~a_shifted.mask*maxNMask[2:-2, 2:-2]

	    weightSum = weightSum + kernel[2-vert_shift, 2-hor_shift]
	    pixVal[idx] = pixVal[idx]+ a_shifted[idx]*kernel[2-vert_shift, 2-hor_shift]
	b = a.copy()
	b[idx] = np.divide(pixVal[idx], weightSum[idx])    
	edgesTop = a[0:2, :]
	edgesLeft = a[:, 0:2]
	edgesRight = a[:, -1:-3:-1]
	edgesBottom = a[-1:-3:-1, :]
	for i in range(0, 3):
		for j in range(0, a.shape[1]):
			if neighbourArray[i, j] == maxNeighbours:
				b[i, j] = getWeightedAvgEdges(inputArray, i, j)
	for i in range(a.shape[0] - 2, a.shape[0]):
		for j in range(0, a.shape[1]):
			if neighbourArray[i, j] == maxNeighbours:
				b[i, j] = getWeightedAvgEdges(inputArray, i, j)	
	for i in range(0, a.shape[0]):
		for j in range(0, 3):
			if neighbourArray[i, j] == maxNeighbours:
				b[i, j] = getWeightedAvgEdges(inputArray, i, j)
	for i in range(0, a.shape[0]):
		for j in range(a.shape[1] - 2, a.shape[1]):
			if neighbourArray[i, j] == maxNeighbours:
				b[i, j] = getWeightedAvgEdges(inputArray, i, j)														
	return b
def fill(inputArray, neighbourArray, tree):
	maxNeighbours = np.max(neighbourArray)	
	kernSize = 2
	kernel = utils.gauss_kern(kernSize)
	#print kernel

	for ind, x in np.ndenumerate(inputArray): 		
			if neighbourArray[ind] == maxNeighbours:
				#print ind, 'ind', x, 'mn'
				pts = np.array(([ind]))
				distances, indices = tree.query(pts, k = 25, p = 2)			
				print distances, len(distances[0])
				indices = indices[np.where((distances <= np.sqrt(8)))][1:]				
				distances = distances[np.where((distances <= np.sqrt(8)))][1:]
				#print distances, 'DDD'
				pixelVal = 0 #value of the pixel being interpolated over
				nPixUsed = 0 #effective number of pixels used
				print len(indices), len(distances)
				print kernel
				for j in range(0, len(indices)-1):
					print distances[j], j, 'jth distance, j'
					if distances[j] == 0:
						weight = kernel[2, 2]
					elif distances[j] == 1:
						weight = kernel[2, 1]
					elif distances[j] == np.sqrt(2):
						weight = kernel[1, 1]
					elif distances[j] == 2:
						weight = kernel[2, 0]
					elif distances[j] == np.sqrt(5):
						weight = kernel[1, 0]
					elif distances[j] == np.sqrt(8):
						weight = kernel[0, 0]
					else:
						print 'wtf?', distances[j]
					if np.isfinite(inputArray[tree.data[indices][j][0], tree.data[indices][j][1]]):				
						pixelVal+= inputArray[tree.data[indices][j][0], tree.data[indices][j][1]]*weight
						print pixelVal, 'pixval'
						nPixUsed+= weight
				inputArray[ind] = pixelVal/nPixUsed
				print inputArray[ind], pixelVal, nPixUsed				
	return inputArray		
Ejemplo n.º 3
0
    def __init__(self,
                 file_map,
                 file_noise,
                 psf,
                 color_correction=1.0,
                 beam_area=1.0,
                 wavelength=None,
                 fwhm=None):
        ''' This Class creates Objects for a set of
		maps/noisemaps/beams/TransferFunctions/etc.,
		at each Wavelength.
		This is a work in progress!
		Issues:  If the beam has a different pixel size from the map,
		it is not yet able to re-scale it.
		Just haven't found a convincing way to make it work.
		Future Work:
		Will shift some of the work into functions (e.g., read psf,
		color_correction) and increase flexibility.
		'''
        #READ MAPS
        if file_map == file_noise:
            #SPIRE Maps have Noise maps in the second extension.
            cmap, hd = fits.getdata(file_map, 1, header=True)
            cnoise, nhd = fits.getdata(file_map, 2, header=True)
        else:
            #This assumes that if Signal and Noise are different maps, they are contained in first extension
            cmap, hd = fits.getdata(file_map, 0, header=True)
            cnoise, nhd = fits.getdata(file_noise, 0, header=True)

        #GET MAP PIXEL SIZE
        if 'CD2_2' in hd:
            pix = hd['CD2_2'] * 3600.
        else:
            pix = hd['CDELT2'] * 3600.

        #READ BEAMS
        #Check first if beam is a filename (actual beam) or a number (approximate with Gaussian)
        if isinstance(psf, six.string_types):
            beam, phd = fits.getdata(psf, 0, header=True)
            #GET PSF PIXEL SIZE
            if 'CD2_2' in phd:
                pix_beam = phd['CD2_2'] * 3600.
            elif 'CDELT2' in phd:
                pix_beam = phd['CDELT2'] * 3600.
            else:
                pix_beam = pix
            #SCALE PSF IF NECESSARY
            if np.round(10. * pix_beam) != np.round(10. * pix):
                raise ValueError("Beam and Map have different size pixels")
                scale_beam = pix_beam / pix
                pms = np.shape(beam)
                new_shape = (np.round(pms[0] * scale_beam),
                             np.round(pms[1] * scale_beam))
                #pdb.set_trace()
                kern = rebin(clean_nans(beam),
                             new_shape=new_shape,
                             operation='ave')
                #kern = rebin(clean_nans(beam),new_shape[0],new_shape[1])
            else:
                kern = clean_nans(beam)
            self.psf_pixel_size = pix_beam
        else:
            sig = psf / 2.355 / pix
            #pdb.set_trace()
            #kern = gauss_kern(psf, np.floor(psf * 8.), pix)
            kern = gauss_kern(psf, np.floor(psf * 8.) / pix, pix)

        self.map = clean_nans(cmap) * color_correction
        self.noise = clean_nans(cnoise,
                                replacement_char=1e10) * color_correction
        if beam_area != 1.0:
            self.beam_area_correction(beam_area)
        self.header = hd
        self.pixel_size = pix
        self.psf = clean_nans(kern)
        self.rms = map_rms(self.map, silent=False)

        if wavelength != None:
            add_wavelength(wavelength)

        if fwhm != None:
            add_fwhm(fwhm)
Ejemplo n.º 4
0
def stack_in_redshift_slices(
  cmap,
  hd,
  layers_radec,
  fwhm=None,
  psf_names=None,
  cnoise=None,
  mask=None,
  beam_area=None,
  err_ss=None,
  quiet=None):
  ''' The first iteration of the translation from IDL to Python.
      Looks like an IDL function.
      Suggest using wrappers like viero_quick_stack.py
      but highly recommend Pythonic: stack_libraries_in_layers
      function that can be found below.
  '''

  w = WCS(hd)
  #FIND SIZES OF MAP AND LISTS
  cms = np.shape(cmap)
  zeromask = np.zeros(cms)

  size_cube = np.shape(layers_radec)
  nsrcmax = size_cube[0]
  nlists = int(size_cube[1])

  ind_map_zero = np.where(np.isnan(cmap))
  nzero = np.shape(ind_map_zero)[1]

  if np.sum(cnoise) == 0: cnoise=cmap*0.0 + 1.0

  pix=hd["CD2_2"]*3600.
  if pix == 0: pix=hd["CDELT2"]*3600.

  #[STEP 0] - Calibrate maps
  if beam_area != None:
    cmap=cmap*beam_area*1e6
    cnoise=noise*beam_area*1e6

  # STEP 1  - Make Layers Cube
  layers=np.zeros([nlists,cms[0],cms[1]])

  for s in range(nlists):
    ind_src = np.where(layers_radec[:,s,0] != 0)
    if np.shape(ind_src)[1] > 0:
      ra = layers_radec[ind_src,s,0]
      dec = layers_radec[ind_src,s,1]
      # CONVERT FROM RA/DEC to X/Y
      # DANGER!!  NOTICE THAT I FLIP X AND Y HERE!!
      ty,tx = w.wcs_world2pix(ra, dec, 0)
      # CHECK FOR SOURCES THAT FALL OUTSIDE MAP
      ind_keep = np.where((tx[0] >= 0) & (np.round(tx[0]) < cms[0]) & (ty[0] >= 0) & (np.round(ty[0]) < cms[1]))
      nt0 = np.shape(ind_keep)[1]
      real_x=np.round(tx[0,ind_keep][0]).astype(int)
      real_y=np.round(ty[0,ind_keep][0]).astype(int)
      # CHECK FOR SOURCES THAT FALL ON ZEROS MAP
      if nzero > 0:
        tally = np.zeros(nt0)
        for d in range(nt0):
          if cmap[real_x[d],real_y[d]] != 0:
            tally[d]=1.
        ind_nz=np.where(tally == 1)
        nt = np.shape(ind_nz)[1]
        real_x = real_x[ind_nz]
        real_y = real_y[ind_nz]
      else: nt = nt0
      for ni in range(nt):
        layers[s, real_x[ni],real_y[ni]]+=1.0

  # STEP 2  - Convolve Layers and put in pixels
  radius = 1.1
  sig = fwhm / 2.355 / pix
  flattened_pixmap = np.sum(layers,axis=0)
  total_circles_mask = circle_mask(flattened_pixmap, radius * fwhm, pix)
  #ind_fit = np.where(total_circles_mask >= 1)
  ind_fit = np.where((total_circles_mask >= 1) & (zeromask != 0))
  nhits = np.shape(ind_fit)[1]
  cfits_maps = np.zeros([nlists,nhits])

  #kern = gauss_kern(fwhm, np.floor(fwhm * 10), pix)
  pdb.set_trace()
  kern = gauss_kern(fwhm, np.floor(fwhm * 10)/pix, pix)
  for u in range(nlists):
    layer = layers[u,:,:]
    tmap = smooth_psf(layer, kern)
    #tmap[ind_fit] -= np.mean(tmap[ind_fit])
    cfits_maps[u,:] = tmap[ind_fit]

  # STEP 3 - Regress Layers with Map (i.e., stack!)

  cmap[ind_fit] -= np.mean(cmap[ind_fit], dtype=np.float32)

  fit_params = Parameters()

  for iarg in range(nlists):
    fit_params.add('layer'+str(iarg),value= 1e-3*np.random.randn())
  imap = cmap[ind_fit]
  ierr = cnoise[ind_fit]

  cov_ss_1d = minimize(simultaneous_stack_array_oned, fit_params,
    args=(np.ndarray.flatten(cfits_maps),), kws={'data1d':np.ndarray.flatten(imap),'err1d':np.ndarray.flatten(ierr)})

  return cov_ss_1d
def replace_nans(array, max_iter, tol,kernel_size=1,method='localmean'):
    """Replace NaN elements in an array using an iterative image inpainting algorithm.
The algorithm is the following:
1) For each element in the input array, replace it by a weighted average
of the neighbouring elements which are not NaN themselves. The weights depends
of the method type. If ``method=localmean`` weight are equal to 1/( (2*kernel_size+1)**2 -1 )
2) Several iterations are needed if there are adjacent NaN elements.
If this is the case, information is "spread" from the edges of the missing
regions iteratively, until the variation is below a certain threshold.
Parameters
----------
array : 2d np.ndarray
an array containing NaN elements that have to be replaced
max_iter : int
the number of iterations
kernel_size : int
the size of the kernel, default is 1
method : str
the method used to replace invalid values. Valid options are
`localmean`.
Returns
-------
filled : 2d np.ndarray
a copy of the input array, where NaN elements have been replaced.
"""
    
#    cdef int i, j, I, J, it, n, k, l
#    cdef int n_invalids
    
    filled = np.empty( [array.shape[0], array.shape[1]], dtype=DTYPEf)
    kernel = np.empty( (2*kernel_size+1, 2*kernel_size+1), dtype=DTYPEf )
    
#    cdef np.ndarray[np.int_t, ndim=1] inans
#    cdef np.ndarray[np.int_t, ndim=1] jnans
    
    # indices where array is NaN
    inans, jnans = np.nonzero( np.isnan(array) )
    
    # number of NaN elements
    n_nans = len(inans)
    
    # arrays which contain replaced values to check for convergence
    replaced_new = np.zeros( n_nans, dtype=DTYPEf)
    replaced_old = np.zeros( n_nans, dtype=DTYPEf)
    
    # depending on kernel type, fill kernel array
    if method == 'localmean':
      
        print 'kernel_size', kernel_size       
        for i in range(2*kernel_size+1):
            for j in range(2*kernel_size+1):
                kernel[i,j] = 1
        print kernel, 'kernel'

    elif method == 'idw':
        kernel = utils.gauss_kern(2)
        print kernel, 'IDW kernel'		    
    else:
        raise ValueError( 'method not valid. Should be one of `localmean` or idw.')
    
    # fill new array with input elements
    #for i in range(array.shape[0]):
    #    for j in range(array.shape[1]):
    #        filled[i,j] = array[i,j]
    filled = array	 #why loop through the array?
    # make several passes
    # until we reach convergence
    for it in range(max_iter):
        print 'iteration', it
        # for each NaN element
        for k in range(n_nans):
            i = inans[k]
            j = jnans[k]
            
            # initialize to zero
            filled[i,j] = 0.0
            n = 0
            
            # loop over the kernel
            for I in range(2*kernel_size+1):
                for J in range(2*kernel_size+1):
                   
                    # if we are not out of the boundaries
                    if i+I-kernel_size < array.shape[0] and i+I-kernel_size >= 0:
                        if j+J-kernel_size < array.shape[1] and j+J-kernel_size >= 0:
                                                
                            # if the neighbour element is not NaN itself.
                            if filled[i+I-kernel_size, j+J-kernel_size] == filled[i+I-kernel_size, j+J-kernel_size] :
                                
                                # do not sum itself
                                if I-kernel_size != 0 and J-kernel_size != 0:
                                    
                                    # convolve kernel with original array
                                    filled[i,j] = filled[i,j] + filled[i+I-kernel_size, j+J-kernel_size]*kernel[I, J]
                                    n = n + 1*kernel[I,J]
                                    #print n

            # divide value by effective number of added elements
            if n != 0:
                filled[i,j] = filled[i,j] / n
                replaced_new[k] = filled[i,j]
            else:
                filled[i,j] = np.nan
                
        # check if mean square difference between values of replaced
        #elements is below a certain tolerance
        print 'tolerance', np.mean( (replaced_new-replaced_old)**2 )
        if np.mean( (replaced_new-replaced_old)**2 ) < tol:
            break
        else:
            for l in range(n_nans):
                replaced_old[l] = replaced_new[l]
    
    return filled
Ejemplo n.º 6
0
def stack_in_redshift_slices(
  cmaps, 
  hd, 
  layers_radec, 
  wavelengths,
  fwhm=None, 
  psf_names=None,
  cnoise=None, 
  mask=None, 
  beam_area=None, 
  err_ss=None, 
  zed=0.01,
  quiet=None):
  
  w = WCS(hd)
  #FIND SIZES OF MAP AND LISTS
  cms = np.shape(cmaps) # should be a cube
  nwv = cms[0] 
  #zeromask = np.zeros(cms)

  size_cube = np.shape(layers_radec)
  nsrcmax = size_cube[0]
  nlists = int(size_cube[1])
  
  ind_map_zero = np.where(np.isnan(cmaps))
  nzero = np.shape(ind_map_zero)[1]

  if np.sum(cnoise) == 0: cnoise=cmaps*0.0 + 1.0

  pix=hd["CD2_2"]*3600.
  if pix == 0: pix=hd["CDELT2"]*3600.

  for iwv in range(nwv):
    #[STEP 0] - Calibrate maps
    if beam_area != None:
      cmaps[iwv,:,:]=cmaps[iwv,:,:]*beam_area[iwv]*1e6
      cnoise[iwv,:,:]=noise[iwv,:,:]*beam_area[iwv]*1e6

  # STEP 1  - Make Layers Cube
  layers=np.zeros([nlists,cms[1],cms[2]])

  for s in range(nlists):
    ind_src = np.where(layers_radec[:,s,0] != 0)
    if np.shape(ind_src)[1] > 0:
      ra = layers_radec[ind_src,s,0]
      dec = layers_radec[ind_src,s,1]
      ty,tx = w.wcs_world2pix(ra, dec, 0) 
      # CHECK FOR SOURCES THAT FALL OUTSIDE MAP
      ind_keep = np.where((tx[0] >= 0) & (np.round(tx[0]) < cms[1]) & (ty[0] >= 0) & (np.round(ty[0]) < cms[2]))
      nt0 = np.shape(ind_keep)[1]
      real_x=np.round(tx[0,ind_keep][0]).astype(int)
      real_y=np.round(ty[0,ind_keep][0]).astype(int)
      # CHECK FOR SOURCES THAT FALL ON ZEROS MAP
      # THIS NEEDS COMPLETE OVERHAUL, PARTICULARLY WHEN INCLUDING DIFFERENT AREA MAPS!!
      if nzero > 0:
        tally = np.zeros(nt0)
        for d in range(nt0):
          if cmaps[0,real_x[d],real_y[d]] != 0: 
            tally[d]=1.
        ind_nz=np.where(tally == 1)
        nt = np.shape(ind_nz)[1]
        real_x = real_x[ind_nz]
        real_y = real_y[ind_nz]
      else: nt = nt0
      for ni in range(nt):
        layers[s, real_x[ni],real_y[ni]]+=1.0

  # STEP 2  - Convolve Layers and put in pixels
  #all_map_layers = np.zeros(np.append(nwv,np.shape(layers)))

  cfits_flat = np.asarray([])
  cfits_flat2= np.asarray([])
  flat_maps= np.asarray([])
  flat_noise= np.asarray([])
  LenLayers= np.zeros([nwv])

  radius = 1.1
  for iwv in range(nwv):
    sig = fwhm[iwv] / 2.355 / pix 
    flattened_pixmap = np.sum(layers,axis=0)
    total_circles_mask = circle_mask(flattened_pixmap, radius * fwhm[iwv], pix)
    ind_fit = np.where(total_circles_mask >= 1) # & zeromask != 0)
    nhits = np.shape(ind_fit)[1]
    LenLayers[iwv] = nhits

    kern = gauss_kern(fwhm[iwv], np.floor(fwhm[iwv] * 10), pix)
    for u in range(nlists):
      layer = layers[u,:,:]  
      tmap = smooth_psf(layer, kern)
      tmap[ind_fit] -= np.mean(tmap[ind_fit])
      cfits_flat = np.append(cfits_flat,np.ndarray.flatten(tmap[ind_fit]))

    lmap = cmaps[iwv]
    lnoise = cnoise[iwv]
    lmap[ind_fit] -= np.mean(lmap[ind_fit], dtype=np.float32)
    flat_maps = np.append(flat_maps,np.ndarray.flatten(lmap[ind_fit]))
    flat_noise = np.append(flat_noise,np.ndarray.flatten(lnoise[ind_fit]))

  # STEP 3 - Regress Layers with Map (i.e., stack!)

  fit_params = Parameters()

  fit_params.add('b',value= 2.0,vary=False)
  for iarg in range(nlists): 
    fit_params.add('T'+str(iarg),value= 25.,vary=True,min=8.,max=80.)
    fit_params.add('L'+str(iarg),value= 1e12,min=0.,max=1e14)

  pdb.set_trace()
  cov_ss_1d = minimize(simultaneous_stack_sed_oned, fit_params, 
    args=(cfits_flat,), kws={'data1d':flat_maps,'err1d':flat_noise,'wavelengths':wavelengths,'LenLayers':LenLayers,'zed':zed})
    
  return cov_ss_1d
def getWeightedAvg(inputArray, y, x):
		kernSize = 2
		kernel = utils.gauss_kern(kernSize)	  
		pixVal = 0 #initialising, pixel value for inpainting
		weightSum = 0 #initialising, value for later normalisation
		#4 adjacent pixels at dist 2 with weight = kernel[2, 0]
		try:	
			if np.isfinite(inputArray[y-2, x]):
				pixVal+= inputArray[y-2, x] * kernel[2, 0]
				weightSum+= kernel[2, 0]
		except IndexError:
			pass
		try:	
			if np.isfinite(inputArray[y+2, x]):
				pixVal+= inputArray[y+2, x] * kernel[2, 0]
				weightSum+= kernel[2, 0]
		except IndexError:
			pass
		try:	
			if np.isfinite(inputArray[y, x+2]):
				pixVal+= inputArray[y, x+2] * kernel[2, 0]
				weightSum+= kernel[2, 0]
		except IndexError:
			pass
		try:	
			if np.isfinite(inputArray[y, x-2]):
				pixVal+= inputArray[y, x-2] * kernel[2, 0]
				weightSum+= kernel[2, 0]
		except IndexError:
			pass

		#4 adjacent pixels at dist 1 with weight = kernel[2, 1]
		try:	
			if np.isfinite(inputArray[y-1, x]):
				pixVal+= inputArray[y-1, x] * kernel[2, 1]
				weightSum+= kernel[2, 1]
		except IndexError:
			pass
		try:	
			if np.isfinite(inputArray[y+1, x]):
				pixVal+= inputArray[y+1, x] * kernel[2, 1]
				weightSum+= kernel[2, 1]
		except IndexError:
			pass
		try:	
			if np.isfinite(inputArray[y, x+1]):
				pixVal+= inputArray[y, x+1] * kernel[2, 1]
				weightSum+= kernel[2, 1]
		except IndexError:
			pass
		try:	
			if np.isfinite(inputArray[y, x-1]):
				pixVal+= inputArray[y, x-1] * kernel[2, 1]
				weightSum+= kernel[2, 1]
		except IndexError:
			pass

		#4 diagonal pixels with weight = kernel[1, 1]

		try:	
			if np.isfinite(inputArray[y-1, x-1]):
				pixVal+= inputArray[y-1, x-1] * kernel[1, 1]
				weightSum+= kernel[1, 1]
		except IndexError:
			pass
		try:	
			if np.isfinite(inputArray[y+1, x-1]):
				pixVal+= inputArray[y+1, x-1] * kernel[1, 1]
				weightSum+= kernel[1, 1]
		except IndexError:
			pass
		try:	
			if np.isfinite(inputArray[y+1, x+1]):
				pixVal+= inputArray[y+1, x+1] * kernel[1, 1]
				weightSum+= kernel[1, 1]
		except IndexError:
			pass
		try:	
			if np.isfinite(inputArray[y-1, x+1]):
				pixVal+= inputArray[y-1, x+1] * kernel[1, 1]
				weightSum+= kernel[1, 1]
		except IndexError:
			pass

		#4 diagonal pixels with weight = kernel[0, 0]	
		try:	
			if np.isfinite(inputArray[y-2, x-2]):
				pixVal+= inputArray[y-2, x-2] * kernel[0, 0]
				weightSum+= kernel[0, 0]
		except IndexError:
			pass
		try:	
			if np.isfinite(inputArray[y+2, x-2]):
				pixVal+= inputArray[y+2, x-2] * kernel[0, 0]
				weightSum+= kernel[0, 0]
		except IndexError:
			pass
		try:	
			if np.isfinite(inputArray[y+2, x+2]):
				pixVal+= inputArray[y+2, x+2] * kernel[0, 0]
				weightSum+= kernel[0, 0]
		except IndexError:
			pass
		try:	
			if np.isfinite(inputArray[y-2, x+2]):
				pixVal+= inputArray[y-2, x+2] * kernel[0, 0]
				weightSum+= kernel[0, 0]
		except IndexError:
			pass

		
		#8 adjacent pixels with weight = kernel[1, 0]		
		try:	
			if np.isfinite(inputArray[y-2, x-1]):
				pixVal+= inputArray[y-2, x-1] * kernel[1, 0]
				weightSum+= kernel[1, 0]
		except IndexError:
			pass
		try:	
			if np.isfinite(inputArray[y+2, x-1]):
				pixVal+= inputArray[y+2, x-1] * kernel[1, 0]
				weightSum+= kernel[1, 0]
		except IndexError:
			pass
		try:	
			if np.isfinite(inputArray[y+2, x+1]):
				pixVal+= inputArray[y+2, x+1] * kernel[1, 0]
				weightSum+= kernel[1, 0]
		except IndexError:
			pass
		try:	
			if np.isfinite(inputArray[y-2, x+1]):
				pixVal+= inputArray[y-2, x+1] * kernel[1, 0]
				weightSum+= kernel[1, 0]
		except IndexError:
			pass
		# -- just swapping 2 and 1 in y, x:
		try:	
			if np.isfinite(inputArray[y-1, x-2]):
				pixVal+= inputArray[y-1, x-2] * kernel[1, 0]
				weightSum+= kernel[1, 0]
		except IndexError:
			pass
		try:	
			if np.isfinite(inputArray[y+1, x-2]):
				pixVal+= inputArray[y+1, x-2] * kernel[1, 0]
				weightSum+= kernel[1, 0]
		except IndexError:
			pass
		try:	
			if np.isfinite(inputArray[y+1, x+2]):
				pixVal+= inputArray[y+1, x+2] * kernel[1, 0]
				weightSum+= kernel[1, 0]
		except IndexError:
			pass
		try:	
			if np.isfinite(inputArray[y-1, x+2]):
				pixVal+= inputArray[y-1, x+2] * kernel[1, 0]
				weightSum+= kernel[1, 0]
		except IndexError:
			pass

		return pixVal/weightSum
Ejemplo n.º 8
0
	def __init__(self,file_map,file_noise,psf,color_correction=1.0,beam_area=1.0,wavelength=None,fwhm=None):
		''' This Class creates Objects for a set of
		maps/noisemaps/beams/TransferFunctions/etc.,
		at each Wavelength.
		This is a work in progress!
		Issues:  If the beam has a different pixel size from the map,
		it is not yet able to re-scale it.
		Just haven't found a convincing way to make it work.
		Future Work:
		Will shift some of the work into functions (e.g., read psf,
		color_correction) and increase flexibility.
		'''
		#READ MAPS
		if file_map == file_noise:
			#SPIRE Maps have Noise maps in the second extension.
			cmap, hd = fits.getdata(file_map, 1, header = True)
			cnoise, nhd = fits.getdata(file_map, 2, header = True)
		else:
			#This assumes that if Signal and Noise are different maps, they are contained in first extension
			cmap, hd = fits.getdata(file_map, 0, header = True)
			cnoise, nhd = fits.getdata(file_noise, 0, header = True)

		#GET MAP PIXEL SIZE
		if 'CD2_2' in hd:
			pix = hd['CD2_2'] * 3600.
		else:
			pix = hd['CDELT2'] * 3600.

		#READ BEAMS
		#Check first if beam is a filename (actual beam) or a number (approximate with Gaussian)
		if isinstance(psf, six.string_types):
			beam, phd = fits.getdata(psf, 0, header = True)
			#GET PSF PIXEL SIZE
			if 'CD2_2' in phd:
				pix_beam = phd['CD2_2'] * 3600.
			elif 'CDELT2' in phd:
				pix_beam = phd['CDELT2'] * 3600.
			else: pix_beam = pix
			#SCALE PSF IF NECESSARY
			if np.round(10.*pix_beam) != np.round(10.*pix):
				raise ValueError("Beam and Map have different size pixels")
				scale_beam = pix_beam / pix
				pms = np.shape(beam)
				new_shape=(np.round(pms[0]*scale_beam),np.round(pms[1]*scale_beam))
				#pdb.set_trace()
				kern = rebin(clean_nans(beam),new_shape=new_shape,operation='ave')
				#kern = rebin(clean_nans(beam),new_shape[0],new_shape[1])
			else:
				kern = clean_nans(beam)
			self.psf_pixel_size = pix_beam
		else:
			sig = psf / 2.355 / pix
			#pdb.set_trace()
			#kern = gauss_kern(psf, np.floor(psf * 8.), pix)
			kern = gauss_kern(psf, np.floor(psf * 8.)/pix, pix)

		self.map = clean_nans(cmap) * color_correction
		self.noise = clean_nans(cnoise,replacement_char=1e10) * color_correction
		if beam_area != 1.0:
			self.beam_area_correction(beam_area)
		self.header = hd
		self.pixel_size = pix
		self.psf = clean_nans(kern)
		self.rms = map_rms(self.map.copy(), silent=True)

		if wavelength != None:
			add_wavelength(wavelength)

		if fwhm != None:
			add_fwhm(fwhm)