def test_1d_mean(self): """Test 1D array with func=np.mean.""" data = np.arange(4) block_size = 2. expected = block_reduce(data, block_size, func=np.sum) / block_size result_mean = block_reduce(data, block_size, func=np.mean) assert np.all(result_mean == expected)
def test_2d_mean(self): """Test 2D array with func=np.mean.""" data = np.arange(4).reshape(2, 2) block_size = 2. expected = (block_reduce(data, block_size, func=np.sum) / block_size**2) result = block_reduce(data, block_size, func=np.mean) assert np.all(result == expected)
def test_2d_trim(self): """ Test trimming of 2D array when size is not perfectly divisible by block_size. """ data1 = np.arange(15).reshape(5, 3) result1 = block_reduce(data1, 2) data2 = data1[0:4, 0:2] result2 = block_reduce(data2, 2) assert np.all(result1 == result2)
def downsample(self, factor, preserve_counts=True, axis_name=None, weights=None): if factor == 1 or factor is None: return self geom = self.geom.downsample(factor, axis_name=axis_name) if axis_name is None: block_size = (1, ) * len(self.geom.axes) + (factor, factor) else: block_size = [1] * self.data.ndim idx = self.geom.axes.index_data(axis_name) block_size[idx] = factor func = np.nansum if preserve_counts else np.nanmean if weights is None: weights = 1 else: weights = weights.data data = block_reduce(self.data * weights, tuple(block_size), func=func) return self._init_copy(geom=geom, data=data.astype(self.data.dtype))
def downsample(self, factor, preserve_counts=True, axis_name="energy", weights=None): """Downsample the non-spatial dimension by a given factor. Parameters ---------- factor : int Downsampling factor. preserve_counts : bool Preserve the integral over each bin. This should be true if the map is an integral quantity (e.g. counts) and false if the map is a differential quantity (e.g. intensity). axis_name : str Which axis to downsample. Default is "energy". weights : `RegionNDMap` Contains the weights to apply to the axis to reduce. Default is just weighs of one. Returns ------- map : `RegionNDMap` Downsampled region map. """ if axis_name is None: return self.copy() geom = self.geom.downsample(factor=factor, axis_name=axis_name) block_size = [1] * self.data.ndim idx = self.geom.axes.index_data(axis_name) block_size[idx] = factor if weights is None: weights = 1 else: weights = weights.data func = np.nansum if preserve_counts else np.nanmean if self.is_mask: func = np.all data = block_reduce(self.data * weights, tuple(block_size), func=func) return self._init_copy(geom=geom, data=data)
def cutout(ras, decs, scale=7): names = os.listdir('/project/r/rbond/jorlo/datasets/DESTileImages/') to_return = [] for i in range(len(ras)): print(i, end='\r') temp = np.zeros((399, 399, 6)) ra, dec = ras[i], decs[i] tileName = tiler.getTileName(ra, dec) if tileName == None: continue for name in names: if tileName in name: fileName = name[:21] break bands = ['g', 'r', 'i', 'z', 'Y'] for j, band in enumerate(bands): hi_data = fits.open( '/project/r/rbond/jorlo/datasets/DESTileImages/{}_{}.fits.fz'. format(fileName, band)) header = hi_data[1].header w = wcs.WCS(header) hdata = hi_data[1].data c = SkyCoord(ra=ra * u.deg, dec=dec * u.deg) px, py = wcs.utils.skycoord_to_pixel(c, w) size = u.Quantity([scale, scale], u.arcmin) cutout = Cutout2D(hdata, (px, py), size, wcs=w).data if cutout.shape != (1597, 1597): continue temp[..., j] = block_reduce(cutout, 4, func=np.mean) y_cut = y_cutout(ra, dec, 399) if y_cut is None: continue temp[..., 5] = y_cut temp = normalize_map(temp) """ if np.all(to_return) == 1: to_return = temp elif len(to_return.shape) == 3: print('here') to_return = np.stack((to_return, temp), axis = -1) else: to_return = np.append(to_return, temp, axis = -1) """ to_return.append(temp) to_return = np.stack(to_return, axis=0) return to_return
def get_masked_data_for_sep(filename, bin_factor=32, thresh=0.7): hdulist = fits.open(filename) data = hdulist['SCI'].data weight = hdulist['WHT'].data header = hdulist['SCI'].header binned = nddata.block_reduce(weight, bin_factor) debinned = nddata.block_replicate(binned, bin_factor) i0 = (weight.shape[0] - debinned.shape[0]) // 2 i1 = (debinned.shape[0] - weight.shape[0]) // 2 j0 = (weight.shape[1] - debinned.shape[1]) // 2 j1 = (debinned.shape[1] - weight.shape[1]) // 2 padded = np.pad(debinned, ((i0, -i1), (j0, -j1)), 'constant') mask = padded < thresh * np.max(weight) return data, mask, header
def image_thumbnails(dataMap,photCat,band='g',objid=None, nbin=None,old=False,trim=None): from astropy.visualization import ZScaleInterval from matplotlib.backends.backend_pdf import PdfPages # load object database objs = photCat.bokPhot try: objs['frameIndex'] = objs['frameId'] except: pass if objid is not None: obj_ii = np.where(objs['objId']==objid)[0] objs = objs[obj_ii] tmpObsDb = dataMap.obsDb.copy() tmpObsDb['mjd_mid'] = tmpObsDb['mjd'] + (tmpObsDb['expTime']/2)/(3600*24.) objs = bokrmphot.join_by_frameid(objs,tmpObsDb) objs = objs.group_by(['objId','filter']) # configure figures nrows,ncols = 8,6 figsize = (7.0,10.25) subplots = (0.11,0.07,0.89,0.93,0.00,0.03) size = 65 zscl = ZScaleInterval() nplot = nrows*ncols if old: outdir = 'bokcutouts_old/' else: outdir = 'bokcutouts/' ccdcolors = ['darkblue','darkgreen','darkred','darkmagenta'] if True and photCat.name=='rmqso': diffphot = Table.read('bok%s_photflags.fits'%band) errlog = open('bokflags_%s_err.log'%band,'a') bitstr = [ 'TinyFlux','BigFlux','TinyErr','BigErr','BigOff'] frameid = np.zeros(diffphot['MJD'].shape,dtype=np.int32) for i in range(len(diffphot)): jj = np.where(diffphot['MJD'][i]>0)[0] for j in jj: dt = diffphot['MJD'][i,j]-tmpObsDb['mjd_mid'] _j = np.abs(dt).argmin() if np.abs(dt[_j]) > 5e-4: raise ValueError("no match for ",i,diffphot['MJD'][i,j]) else: frameid[i,j] = tmpObsDb['frameIndex'][_j] diffphot['frameIndex'] = frameid matched = diffphot['MJD'] == 0 # ignoring these plt.ioff() for k,obj in zip(objs.groups.keys,objs.groups): objId = k['objId'] _band = k['filter'] if _band != band: continue if photCat.name=='rmqso' and objId >= 850: break cutfile = outdir+'bok%s%03d_%s.fits' % (photCat.name, obj['objId'][0],band) pdffile = cutfile.replace('.fits','.pdf') if os.path.exists(pdffile) or len(obj)==0: continue pdf = PdfPages(pdffile) cutfits = fits.open(cutfile) # number cutouts matches number observations if len(cutfits)-1 != len(obj): errlog.write('[RM%03d]: %d cutouts, %d obs; skipping\n' % (obj['objId'][0],len(cutfits)-1,len(obj))) pnum = -1 for i,(obs,hdu) in enumerate(zip(obj,cutfits[1:])): sys.stdout.write('\rRM%03d %4d/%4d' % (obs['objId'],(i+1),len(obj))) sys.stdout.flush() ccdNum = obs['ccdNum'] cut = hdu.data try: z1,z2 = zscl.get_limits(cut[cut>0]) except: try: z1,z2 = np.percentile(cut[cut>0],[10,90]) except: z1,z2 = cut.min(),cut.max() if not old: # rotate to N through E if ccdNum==1: cut = cut[:,::-1] elif ccdNum==2: cut = cut[::-1,::-1] elif ccdNum==3: pass elif ccdNum==4: cut = cut[::-1,:] # except now flip x-axis so east is right direction cut = cut[:,::-1] if trim is not None: cut = cut[trim:-trim,trim:-trim] if nbin is not None: cut = block_reduce(cut,nbin,np.mean) # if pnum==nplot+1 or pnum==-1: if pnum != -1: pdf.savefig() plt.close() plt.figure(figsize=figsize) plt.subplots_adjust(*subplots) pnum = 1 ax = plt.subplot(nrows,ncols,pnum) plt.imshow(cut,origin='lower',interpolation='nearest', vmin=z1,vmax=z2,cmap=plt.cm.gray_r,aspect='equal') framestr1 = '(%d,%d,%d)' % (obs['ccdNum'],obs['x'],obs['y']) framestr2 = '%.3f' % (obs['mjd']) utstr = obs['utDate'][2:]+' '+obs['utObs'][:5] frameclr = ccdcolors[obs['ccdNum']-1] ax.set_title(utstr,size=7,color='k',weight='bold') t = ax.text(0.01,0.98,framestr1, size=7,va='top',color=frameclr, transform=ax.transAxes) t.set_bbox(dict(color='white',alpha=0.45,boxstyle="square,pad=0")) t = ax.text(0.01,0.02,framestr2, size=7,color='blue', transform=ax.transAxes) t.set_bbox(dict(color='white',alpha=0.45,boxstyle="square,pad=0")) if obs['flags'][2] > 0: t = ax.text(0.03,0.7,'%d' % obs['flags'][2], size=10,ha='left',va='top',color='red', transform=ax.transAxes) if True and photCat.name=='rmqso': _j = np.where(diffphot['frameIndex'][objId] == obs['frameIndex'])[0] if len(_j)>0: matched[objId,_j] = True flg = diffphot['FLAG'][objId,_j] if flg > 0: flgstr = [ s for bit,s in enumerate(bitstr) if (flg & (1<<bit)) > 0 ] t = ax.text(0.97,0.8,'\n'.join(flgstr), size=10,ha='right',va='top',color='red', transform=ax.transAxes) else: errlog.write('no diff phot for %d %.4f %.4f\n' % (objId,obs['mjd'],obs['mjd_mid'])) ax.xaxis.set_visible(False) ax.yaxis.set_visible(False) pnum += 1 if True and photCat.name=='rmqso': jj = np.where(~matched[objId])[0] if len(jj)>0: errlog.write('unmatched for %d:\n'%objId) for j in jj: errlog.write(' %.5f %d\n'% (diffphot['MJD'][objId,j],diffphot['FLAG'][objId,j])) try: pdf.savefig() plt.close() except: pass pdf.close() plt.ion() if True and photCat.name=='rmqso': errlog.close()
def show_image(image, percl=99, percu=None, is_mask=False, figsize=(10, 10), cmap='viridis', log=False, clip=True, show_colorbar=True, show_ticks=True, fig=None, ax=None, input_ratio=None): """ Show an image in matplotlib with some basic astronomically-appropriat stretching. Parameters ---------- image The image to show percl : number The percentile for the lower edge of the stretch (or both edges if ``percu`` is None) percu : number or None The percentile for the upper edge of the stretch (or None to use ``percl`` for both) figsize : 2-tuple The size of the matplotlib figure in inches """ if percu is None: percu = percl percl = 100 - percl if (fig is None and ax is not None) or (fig is not None and ax is None): raise ValueError('Must provide both "fig" and "ax" ' 'if you provide one of them') elif fig is None and ax is None: if figsize is not None: # Rescale the fig size to match the image dimensions, roughly image_aspect_ratio = image.shape[0] / image.shape[1] figsize = (max(figsize) * image_aspect_ratio, max(figsize)) fig, ax = plt.subplots(1, 1, figsize=figsize) # To preserve details we should *really* downsample correctly and # not rely on matplotlib to do it correctly for us (it won't). # So, calculate the size of the figure in pixels, block_reduce to # roughly that,and display the block reduced image. # Thanks, https://stackoverflow.com/questions/29702424/how-to-get-matplotlib-figure-size fig_size_pix = fig.get_size_inches() * fig.dpi ratio = (image.shape // fig_size_pix).max() if ratio < 1: ratio = 1 ratio = input_ratio or ratio reduced_data = block_reduce(image, ratio) if not is_mask: # Divide by the square of the ratio to keep the flux the same in the # reduced image. We do *not* want to do this for images which are # masks, since their values should be zero or one. reduced_data = reduced_data / ratio**2 # Of course, now that we have downsampled, the axis limits are changed to # match the smaller image size. Setting the extent will do the trick to # change the axis display back to showing the actual extent of the image. extent = [0, image.shape[1], 0, image.shape[0]] if log: stretch = aviz.LogStretch() else: stretch = aviz.LinearStretch() norm = aviz.ImageNormalize(reduced_data, interval=aviz.AsymmetricPercentileInterval( percl, percu), stretch=stretch, clip=clip) if is_mask: # The image is a mask in which pixels should be zero or one. # block_reduce may have changed some of the values, so reset here. reduced_data = reduced_data > 0 # Set the image scale limits appropriately. scale_args = dict(vmin=0, vmax=1) else: scale_args = dict(norm=norm) im = ax.imshow(reduced_data, origin='lower', cmap=cmap, extent=extent, aspect='equal', **scale_args) if show_colorbar: # I haven't a clue why the fraction and pad arguments below work to make # the colorbar the same height as the image, but they do....unless the image # is wider than it is tall. Sticking with this for now anyway... # Thanks: https://stackoverflow.com/a/26720422/3486425 fig.colorbar(im, ax=ax, fraction=0.046, pad=0.04) # In case someone in the future wants to improve this: # https://joseph-long.com/writing/colorbars/ # https://stackoverflow.com/a/33505522/3486425 # https://matplotlib.org/mpl_toolkits/axes_grid/users/overview.html#colorbar-whose-height-or-width-in-sync-with-the-master-axes if not show_ticks: ax.tick_params(labelbottom=False, labelleft=False, labelright=False, labeltop=False)
def test_2d(self): """Test 2D array.""" data = np.arange(4).reshape(2, 2) expected = np.array([[6]]) result = block_reduce(data, 2) assert np.all(result == expected)
def test_block_size_len(self): """Test block_size length.""" data = np.ones((2, 2)) with pytest.raises(ValueError): block_reduce(data, (2, 2, 2))
def test_1d(self): """Test 1D array.""" data = np.arange(4) expected = np.array([1, 5]) result = block_reduce(data, 2) assert np.all(result == expected)
def run(self, id): gaia_file = parse(self.gaia_table) # ---------------- Fix me ------------------- #hsa_file = fits.open(self.hsa_table) hsa_file = fits.open(self.hsa_table) #hsa_file = fits.open("/data/output/data.fits") gaia_list = gaia_file.get_first_table().array dec_col = gaia_list['dec'] ra_col = gaia_list['ra'] image_data = hsa_file[1].data #CompImageHDU(hsa_file[1].data,hsa_file[0].header) ##from astropy.visualization.scripts import fits2bitmap ##fits2bitmap.fits2bitmap(filename = self.hsa_table, ext=1, #stretch = 'asinh', ## percent = 0.5, ## asinh_a = 10, #min_percent=0.1, max_percent=100, ## cmap='hot', ## out_fn = '/data/output/test.png') tools = "pan,wheel_zoom,box_zoom,reset,save" from astropy.nddata import block_reduce, block_replicate small = block_reduce(image_data, 8) color_mapper = LogColorMapper(palette="Viridis256", low=10, high=80) ##from scipy import misc ##data = misc.imread('/data/output/test.png') ##data = data[:,:,1] image_data = small plot = figure( title="Gaia and HSA", #plot_width=data.shape[0], #plot_height=data.shape[1]) x_range=(0, small.shape[0]), y_range=(0, small.shape[1]), toolbar_location="right") # plot.add_tools(WheelZoomTool()) plot.image(image=[small], color_mapper=color_mapper, dh=[small.shape[0]], dw=[small.shape[1]], x=[0], y=[0]) #plot.image_url(url=['/data/output/test.png'], x=0, y=1, w=100, h=100) #w=image_data.shape[0],h=image_data.shape[1]) #plot.axis.visible = False #from bokeh.io import export_png #export_png(plot, filename="/data/output/plot.png") save_plot(id, "example", plot) plot = figure( title="Gaia and HSA", tools=tools, #plot_width=data.shape[0], #plot_height=data.shape[1]) x_range=(0, small.shape[0]), y_range=(0, small.shape[1]), toolbar_location="below") plot.image(image=[small], color_mapper=color_mapper, dh=[small.shape[0]], dw=[small.shape[1]], x=[0], y=[0]) #plot.image_url(url=['/data/output/test.png'], x=0, y=1, w=100, h=100) #w=image_data.shape[0],h=image_data.shape[1]) #plot.axis.visible = False #from bokeh.io import export_png #export_png(plot, filename="/data/output/plot.png") save_plot(id, "example", plot) plot = figure(plot_width=400, tools=tools, plot_height=400, toolbar_location="right") plot.circle(dec_col, ra_col, fill_color="red", size=2) save_plot(id, "example gaia", plot) my_file = data(id).file("my_results.txt") my_file.write("test") my_file.close()
def test_block_size_broadcasting(self): """Test scalar block_size broadcasting.""" data = np.arange(16).reshape(4, 4) result1 = block_reduce(data, 2) result2 = block_reduce(data, (2, 2)) assert np.all(result1 == result2)
def bin_data_2d(data, oversampling_factor): """ Bin a 2d array into square bins determined by the oversampling factor """ # Astropy has a convenient function to do this bin_factors = [oversampling_factor, oversampling_factor] return nddata.block_reduce(data, bin_factors, np.mean)
def evaluate(self, x, y, *params, **kwargs): # Extract sub-model params as well as `psf_p` *sub_model_params, psf_p = params # Prepare image indices i = (x - x.min()).astype(int) j = (y - y.min()).astype(int) # Compute image size and oversampling factor grid_size = max([i.max(), j.max()]) + 1 grid_factor = self.oversample if isinstance(self.oversample, int) else 1 grid_range = (x.min(), x.max(), y.min(), y.max()) # Make the main grid if grid_size == self._cached_grid_size and self._cached_grid_factor == grid_factor and grid_range == self._cache_grid_range: # Check if the gird cached main_grid = self._cached_grid else: # Else make a gird main_grid = make_grid(grid_size, origin=(x.min(), y.min()), factor=grid_factor) if self.cache_grid: self._cached_grid = main_grid self._cached_grid_size = grid_size self._cached_grid_factor = grid_factor self._cache_grid_range = grid_range # Split main grid to x and y components x_grid, y_grid = main_grid # Main Model Image # ---------------- # Construct main model image by sampling sub-model model_image = self._model.evaluate(x_grid, y_grid, *sub_model_params) # Oversampling # ------------ if isinstance(self.oversample, int): # If the oversample factor is an int, block reduce the image model_image = block_reduce(model_image, grid_factor) / grid_factor ** 2 elif isinstance(self.oversample, tuple): # If the oversample is a window, compute pixel values for that window # Load the window params sub_grid_x0, sub_grid_y0, sub_grid_size, sub_grid_factor = self.oversample assert isinstance(sub_grid_size, int), "Oversampling window size must be an int" assert isinstance(sub_grid_factor, int), "Oversampling factor must be an int" # If the center of the window is a parameter name, extract its value if isinstance(sub_grid_x0, str): assert sub_grid_x0 in self._model.param_names, "oversample param '{}' is not in the wrapped model param list".format( sub_grid_x0) idx = self._model.param_names.index(sub_grid_x0) sub_grid_x0 = sub_model_params[idx][0] if isinstance(sub_grid_y0, str): assert sub_grid_y0 in self._model.param_names, "oversample param '{}' is not in the wrapped model param list".format( sub_grid_y0) idx = self._model.param_names.index(sub_grid_y0) sub_grid_y0 = sub_model_params[idx][0] # Compute the corner of the sub-grid sub_grid_origin = (np.round(sub_grid_x0) - sub_grid_size // 2, np.round(sub_grid_y0) - sub_grid_size // 2) # Make an oversampled sub-grid for window x_sub_grid, y_sub_grid = make_grid(sub_grid_size, origin=sub_grid_origin, factor=sub_grid_factor) # Sample the sub-model onto the sub-grid sub_model_oversampled_image = self._model.evaluate(x_sub_grid, y_sub_grid, *sub_model_params) # Block reduce the window to the main image resolution sub_model_image = block_reduce(sub_model_oversampled_image, sub_grid_factor) / sub_grid_factor ** 2 # Compute window indices in main image frame i_sub_min = int(np.round(sub_grid_origin[0])) j_sub_min = int(np.round(sub_grid_origin[1])) i_sub_max = i_sub_min + sub_grid_size j_sub_max = j_sub_min + sub_grid_size # Clip window indices if i_sub_min < 0: i_sub_min = 0 if j_sub_min < 0: j_sub_min = 0 if i_sub_max > i.max(): i_sub_max = i.max() + 1 if j_sub_max > j.max(): j_sub_max = j.max() + 1 # Add oversampled window to image model_image[ j_sub_min:j_sub_max, i_sub_min:i_sub_max ] = sub_model_image # PSF convolve # ------------ if self.psf is None: return model_image[j, i] else: psf = self.psf if psf_p[0] != 0: psf = rotate(psf, psf_p[0], reshape=False) return convolve(model_image, psf, mode='same')[j, i]
def downsample(self, factor=2, func=np.sum): data0 = self.datos data1 = block_reduce(data0, block_size=factor, func=func) return Imagen(data1, self.centro, tuple(ti // factor for ti in self.size), self.pixsize * factor)
def lacosmic(data, contrast, cr_threshold, neighbor_threshold, error=None, mask=None, background=None, effective_gain=None, readnoise=None, maxiter=4, border_mode='mirror'): r""" Remove cosmic rays from an astronomical image using the L.A.Cosmic algorithm. The `L.A.Cosmic algorithm <http://www.astro.yale.edu/dokkum/lacosmic/>`_ is based on Laplacian edge detection and is described in `van Dokkum (2001; PASP 113, 1420) <https://ui.adsabs.harvard.edu/abs/2001PASP..113.1420V/abstract>`_. Parameters ---------- data : array_like The 2D array of the image. contrast : float Contrast threshold between the Laplacian image and the fine-structure image. If your image is critically sampled, use a value around 2. If your image is undersampled (e.g., HST data), a value of 4 or 5 (or more) is more appropriate. If your image is oversampled, use a value between 1 and 2. For details, please see `PASP 113, 1420 (2001) <https://ui.adsabs.harvard.edu/abs/2001PASP..113.1420V/abstract>`_, which calls this parameter :math:`f_{\mbox{lim}}`. In particular, Figure 4 shows the approximate relationship between the ``contrast`` parameter and the full-width half-maximum (in pixels) of stars in your image. cr_threshold : float The Laplacian signal-to-noise ratio threshold for cosmic-ray detection. neighbor_threshold : float The Laplacian signal-to-noise ratio threshold for detection of cosmic rays in pixels neighboring the initially-identified cosmic rays. error : array_like, optional The 1-sigma errors of the input ``data``. If ``error`` is not input, then ``effective_gain`` and ``readnoise`` will be used to construct an approximate model of the ``error``. If ``error`` is input, it will override the ``effective_gain`` and ``readnoise`` parameters. ``error`` must have the same shape as ``data``. mask : array_like (bool), optional A boolean mask, with the same shape as ``data``, where a `True` value indicates the corresponding element of ``data`` is masked. Masked pixels are ignored when identifying cosmic rays. It is highly recommended that saturated stars be included in ``mask``. background : float or array_like, optional The background level previously subtracted from the input ``data``. ``background`` may either be a scalar value or a 2D image with the same shape as the input ``data``. If the input ``data`` has not been background subtracted, then set ``background=None`` (default). effective_gain : float, array-like, optional Ratio of counts (e.g., electrons or photons) to the units of ``data``. For example, if your input ``data`` are in units of ADU, then ``effective_gain`` should represent electrons/ADU. If your input ``data`` are in units of electrons/s then ``effective_gain`` should be the exposure time (or an exposure time map). ``effective_gain`` and ``readnoise`` must be specified if ``error`` is not input. readnoise : float, optional The read noise (in electrons) in the input ``data``. ``effective_gain`` and ``readnoise`` must be specified if ``error`` is not input. maxiter : float, optional The maximum number of iterations. The default is 4. The routine will automatically exit if no additional cosmic rays are identified in an iteration. If the routine is still identifying cosmic rays after four iterations, then you are likely digging into sources (e.g., saturated stars) and/or the noise. In that case, try inputing a ``mask`` or increasing the value of ``cr_threshold``. border_mode : {'reflect', 'constant', 'nearest', 'mirror', 'wrap'}, optional The mode in which the array borders are handled during convolution and median filtering. For 'constant', the fill value is 0. The default is 'mirror', which matches the original L.A.Cosmic algorithm. Returns ------- cleaned_image : `~numpy.ndarray` The cosmic-ray cleaned image. crmask : `~numpy.ndarray` (bool) A mask image of the identified cosmic rays. Cosmic-ray pixels have a value of `True`. """ block_size = 2.0 kernel = np.array([[0.0, -1.0, 0.0], [-1.0, 4.0, -1.0], [0.0, -1.0, 0.0]]) clean_data = data.copy() if background is not None: clean_data += background final_crmask = np.zeros(data.shape, dtype=bool) if error is not None: if data.shape != error.shape: raise ValueError('error and data must have the same shape') clean_error_image = error ncosmics, ncosmics_tot = 0, 0 for iteration in range(maxiter): sampled_img = block_replicate(clean_data, block_size) convolved_img = ndimage.convolve(sampled_img, kernel, mode=border_mode).clip(min=0.0) laplacian_img = block_reduce(convolved_img, block_size) if clean_error_image is None: if effective_gain is None or readnoise is None: raise ValueError('effective_gain and readnoise must be ' 'input if error is not input') med5_img = ndimage.median_filter(clean_data, size=5, mode=border_mode).clip(min=1.e-5) error_image = (np.sqrt(effective_gain * med5_img + readnoise ** 2) / effective_gain) else: error_image = clean_error_image snr_img = laplacian_img / (block_size * error_image) # this is used to remove extended structures (larger than ~5x5) snr_img -= ndimage.median_filter(snr_img, size=5, mode=border_mode) # used to remove compact bright objects med3_img = ndimage.median_filter(clean_data, size=3, mode=border_mode) med7_img = ndimage.median_filter(med3_img, size=7, mode=border_mode) finestruct_img = ((med3_img - med7_img) / error_image).clip(min=0.01) cr_mask1 = snr_img > cr_threshold # NOTE: to follow the paper exactly, this condition should be # "> contrast * block_size". "lacos_im.cl" uses simply "> contrast" cr_mask2 = (snr_img / finestruct_img) > contrast cr_mask = cr_mask1 * cr_mask2 if mask is not None: cr_mask = np.logical_and(cr_mask, ~mask) # grow cosmic rays by one pixel and check in snr_img selem = np.ones((3, 3)) neigh_mask = ndimage.binary_dilation(cr_mask, selem) cr_mask = cr_mask1 * neigh_mask # now grow one more pixel and lower the detection threshold neigh_mask = ndimage.binary_dilation(cr_mask, selem) cr_mask = (snr_img > neighbor_threshold) * neigh_mask # previously unknown cosmic rays found in this iteration crmask_new = np.logical_and(~final_crmask, cr_mask) ncosmics = np.count_nonzero(crmask_new) final_crmask = np.logical_or(final_crmask, cr_mask) ncosmics_tot += ncosmics log.info(f'Iteration {iteration + 1}: Found {ncosmics} cosmic-ray ' f'pixels, Total: {ncosmics_tot}') if ncosmics == 0: if background is not None: clean_data -= background return clean_data, final_crmask clean_data = _clean_masked_pixels(clean_data, final_crmask, size=5, exclude_mask=mask) if background is not None: clean_data -= background return clean_data, final_crmask
def evaluate_sky(img, sigma=1.5, radius=10, pixel_scale=0.168, central_mask_radius=7.0, threshold=0.005, deblend_cont=0.001, deblend_nthresh=20, clean_param=1.0, show_fig=True, show_hist=True, f_factor=None): '''Evaluate the mean sky value. Parameters: ---------- img: 2-D numpy array, the input image show_fig: bool. If True, it will show you the masked sky image. show_hist: bool. If True, it will show you the histogram of the sky value. Returns: ------- median: median of background pixels, in original unit std: standard deviation, in original unit ''' import sep import copy from slug.imutils import extract_obj, make_binary_mask from astropy.convolution import convolve, Gaussian2DKernel b = 35 # Box size f = 5 # Filter width bkg = sep.Background(img, maskthresh=0, bw=b, bh=b, fw=f, fh=f) # first time objects, segmap = extract_obj(img - bkg.globalback, b=35, f=5, sigma=sigma, minarea=20, pixel_scale=pixel_scale, deblend_nthresh=deblend_nthresh, deblend_cont=deblend_cont, clean_param=clean_param, show_fig=False) seg_sky = copy.deepcopy(segmap) seg_sky[segmap > 0] = 1 seg_sky = seg_sky.astype(bool) # Blow up the mask for obj in objects: sep.mask_ellipse(seg_sky, obj['x'], obj['y'], obj['a'], obj['b'], obj['theta'], r=radius) bkg_mask_1 = seg_sky data = copy.deepcopy(img - bkg.globalback) data[bkg_mask_1 == 1] = 0 # Second time obj_lthre, seg_lthre = extract_obj(data, b=35, f=5, sigma=sigma + 1, minarea=5, pixel_scale=pixel_scale, deblend_nthresh=deblend_nthresh, deblend_cont=deblend_cont, clean_param=clean_param, show_fig=False) seg_sky = copy.deepcopy(seg_lthre) seg_sky[seg_lthre > 0] = 1 seg_sky = seg_sky.astype(bool) # Blow up the mask for obj in obj_lthre: sep.mask_ellipse(seg_sky, obj['x'], obj['y'], obj['a'], obj['b'], obj['theta'], r=radius/2) bkg_mask_2 = seg_sky bkg_mask = (bkg_mask_1 + bkg_mask_2).astype(bool) cen_obj = objects[segmap[int(bkg_mask.shape[0] / 2.), int(bkg_mask.shape[1] / 2.)] - 1] fraction_radius = sep.flux_radius(img, cen_obj['x'], cen_obj['y'], 10*cen_obj['a'], 0.5)[0] ba = np.divide(cen_obj['b'], cen_obj['a']) if fraction_radius < int(bkg_mask.shape[0] / 8.): sep.mask_ellipse(bkg_mask, cen_obj['x'], cen_obj['y'], fraction_radius, fraction_radius * ba, cen_obj['theta'], r=central_mask_radius) elif fraction_radius < int(bkg_mask.shape[0] / 4.): sep.mask_ellipse(bkg_mask, cen_obj['x'], cen_obj['y'], fraction_radius, fraction_radius * ba, cen_obj['theta'], r=1.2) # Estimate sky from histogram of binned image import copy from scipy import stats from astropy.stats import sigma_clip from astropy.nddata import block_reduce data = copy.deepcopy(img) data[bkg_mask] = np.nan if f_factor is None: f_factor = round(6 / pixel_scale) rebin = block_reduce(data, f_factor) sample = rebin.flatten() if show_fig: display_single(rebin) plt.savefig('./{}-bkg.png'.format(np.random.randint(1000)), dpi=100, bbox_inches='tight') temp = sigma_clip(sample) sample = temp.data[~temp.mask] kde = stats.gaussian_kde(sample) print(f_factor) mean = np.nanmean(sample) / f_factor**2 median = np.nanmedian(sample) / f_factor**2 std = np.nanstd(sample, ddof=1) / f_factor / np.sqrt(len(sample)) xlim = np.std(sample, ddof=1) * 7 x = np.linspace(-xlim + np.median(sample), xlim + np.median(sample), 100) offset = x[np.argmax(kde.evaluate(x))] / f_factor**2 print('mean', mean) print('median', median) print('std', std) bkg_global = sep.Background(img, mask=bkg_mask, maskthresh=0, bw=f_factor, bh=f_factor, fw=f_factor/2, fh=f_factor/2) print("#SEP sky: Mean Sky / RMS Sky = %10.5f / %10.5f" % (bkg_global.globalback, bkg_global.globalrms)) if show_hist: fig, ax = plt.subplots(figsize=(8,6)) ax.plot(x, kde.evaluate(x), linestyle='dashed', c='black', lw=2, label='KDE') ax.hist(sample, bins=x, normed=1); ax.legend(loc='best', frameon=False, fontsize=20) ax.set_xlabel('Pixel Value', fontsize=20) ax.set_ylabel('Normed Number', fontsize=20) ax.tick_params(labelsize=20) ylim = ax.get_ylim() ax.text(-0.1 * f_factor + np.median(sample), 0.9 * (ylim[1] - ylim[0]) + ylim[0], r'$\mathrm{offset}='+str(round(offset, 6))+'$', fontsize=20) ax.text(-0.1 * f_factor + np.median(sample), 0.8 * (ylim[1] - ylim[0]) + ylim[0], r'$\mathrm{median}='+str(round(median, 6))+'$', fontsize=20) ax.text(-0.1 * f_factor + np.median(sample), 0.7 * (ylim[1] - ylim[0]) + ylim[0], r'$\mathrm{std}='+str(round(std, 6))+'$', fontsize=20) plt.vlines(np.median(sample), 0, ylim[1], linestyle='--') return median, std, sample
def evaluate(self, *args, **kwargs): psf_p = args[-1] args = args[:-1] x = args[0] y = args[1] assert not np.any(x < 0), 'negative pixel values not supported at this time' assert not np.any(y < 0), 'negative pixel values not supported at this time' grid_size = max([i.max() + 1 for i in [x, y]]) grid_factor = self.oversample if isinstance(self.oversample, int) else 1 if grid_size == self._cached_grid_size and self._cached_grid_factor == grid_factor: main_grid = self._cached_grid else: main_grid = make_grid(grid_size, factor=grid_factor) if self.cache_grid: self._cached_grid = main_grid self._cached_grid_size = grid_size self._cached_grid_factor = grid_factor x_grid, y_grid = main_grid model_image = self._model.evaluate(x_grid, y_grid, *args[self.n_inputs:]) if isinstance(self.oversample, int): model_image = block_reduce(model_image, grid_factor) / grid_factor ** 2 elif isinstance(self.oversample, tuple): sub_grid_x0, sub_grid_y0, sub_grid_size, sub_grid_factor = self.oversample if isinstance(sub_grid_x0, str): assert sub_grid_x0 in self._model.param_names, "oversample param '{}' is not in the wrapped model param list".format( sub_grid_x0) idx = self._model.param_names.index(sub_grid_x0) sub_grid_x0 = args[self.n_inputs:][idx][0] if isinstance(sub_grid_y0, str): assert sub_grid_y0 in self._model.param_names, "oversample param '{}' is not in the wrapped model param list".format( sub_grid_y0) idx = self._model.param_names.index(sub_grid_y0) sub_grid_y0 = args[self.n_inputs:][idx][0] x_sub_grid, y_sub_grid = make_grid(sub_grid_size, factor=sub_grid_factor) x_sub_grid += int(sub_grid_x0) - sub_grid_size // 2 y_sub_grid += int(sub_grid_y0) - sub_grid_size // 2 sub_model_oversampled_image = self._model.evaluate(x_sub_grid, y_sub_grid, *args[self.n_inputs:]) # Experimental # over_sampled_sub_model_x0 = np.argmin( # np.abs(x_sub_grid[0, :] - 1 / (2 * sub_grid_factor) - (sub_grid_x0 * sub_grid_factor))) # over_sampled_sub_model_y0 = np.argmin( # np.abs(y_sub_grid[:, 0] - 1 / (2 * sub_grid_factor) - (sub_grid_y0 * sub_grid_factor))) # # sub_model_oversampled_image[ # over_sampled_sub_model_y0, # over_sampled_sub_model_x0 # ] = self._model.evaluate(sub_grid_x0, sub_grid_y0, *args[self.n_inputs:]) sub_model_image = block_reduce(sub_model_oversampled_image, sub_grid_factor) / sub_grid_factor ** 2 x_sub_min = int(x_sub_grid[0][0] - 1 / (2 * sub_grid_factor)) + 1 y_sub_min = int(y_sub_grid[0][0] - 1 / (2 * sub_grid_factor)) + 1 model_image[ y_sub_min: y_sub_min + sub_grid_size, x_sub_min: x_sub_min + sub_grid_size ] = sub_model_image if self.psf is None: return model_image[y.astype(int), x.astype(int)] else: psf = self.psf if psf_p[0] != 0: psf = rotate(psf, psf_p[0], reshape=False) return convolve(model_image, psf, mode='same')[y.astype(int), x.astype(int)]
def rebin(self,binfact): if np.isscalar(binfact): xbin=binfact ybin=binfact else: xbin=binfact[0] ybin=binfact[1] xratio=1./xbin yratio=1./ybin lamb=yratio/xratio pixratio=xratio*yratio # trim the image first (to deal with edge effects x=xbin*(self.shape[1]//xbin)-1 y=ybin*(self.shape[0]//ybin)-1 sub=self.extract(0,x,0,y) # block average the image img=nddata.block_reduce(sub.image,(xbin,ybin),func=np.average) # get the header hdr=sub.header.copy() # update the main the WCS values hdr['CRPIX1']=(hdr['CRPIX1']-1.)*xratio+1. hdr['CRPIX2']=(hdr['CRPIX2']-1.)*yratio+1. if 'CDELT1' in hdr: hdr['CDELT1']=hdr['CDELT1']/xratio if 'CDELT2' in hdr: hdr['CDELT2']=hdr['CDELT2']/yratio print("Need to sort out LTVs") #if 'LTV1' in hdr: # hdr['LTV1']=hdr['LTV1']*xratio #if 'LTV2' in hdr: # hdr['LTV2']=hdr['LTV2']*yratio hdr['CD1_1']=hdr['CD1_1']/xratio hdr['CD1_2']=hdr['CD1_2']/xratio hdr['CD2_1']=hdr['CD2_1']/yratio hdr['CD2_2']=hdr['CD2_2']/yratio # update the distortion if self.sip is not None: print("HELP, must update SIP") # update the BSCALE if not isinstance(img.dtype,np.unsignedinteger): if 'BSCALE' in hdr and hdr['BSCALE']!=1 and hdr['BSCALE']!=0: hdr['BSCALE']=hdr['BSCALE']/pixratio if 'BZERO' in hdr and hdr['BZERO']!=0: hdr['BZERO']=hdr['BZERO']/pixratio # update the history history='Block averaged image with factor {}'.format(binfact) hdr.add_history(history) # create the output output=type(self)(img,hdr) return output