def invert_ignore_none(vis, model, g): if vis is not None: return invert(vis, model, context=context, dopsf=dopsf, normalize=normalize, gcfcf=g, **kwargs) else: return create_empty_image_like(model), 0.0
def smooth_image(model: Image, width=1.0): """ Smooth an image with a kernel :param model: Image :param width: Kernel in pixels """ # TODO: Remove filter when astropy fixes convolve import warnings warnings.simplefilter(action='ignore', category=FutureWarning) import astropy.convolution assert isinstance(model, Image), model kernel = astropy.convolution.kernels.Gaussian2DKernel(width) cmodel = create_empty_image_like(model) nchan, npol, _, _ = model.shape for pol in range(npol): for chan in range(nchan): cmodel.data[chan, pol, :, :] = astropy.convolution.convolve( model.data[chan, pol, :, :], kernel, normalize_kernel=False) if isinstance(kernel, astropy.convolution.kernels.Gaussian2DKernel): cmodel.data *= 2 * numpy.pi * width**2 return cmodel
def deconvolve_channel_list_serial_workflow(dirty_list, psf_list, model_imagelist, subimages, **kwargs): """Create a graph for deconvolution by channels, adding to the model Does deconvolution channel by channel. :param subimages: :param dirty_list: :param psf_list: Must be the size of a facet :param model_imagelist: Current model :param kwargs: Parameters for functions in components :return: """ def deconvolve_subimage(dirty, psf): assert isinstance(dirty, Image) assert isinstance(psf, Image) comp = deconvolve_cube(dirty, psf, **kwargs) return comp[0] def add_model(sum_model, model): assert isinstance(output, Image) assert isinstance(model, Image) sum_model.data += model.data return sum_model output = create_empty_image_like(model_imagelist) dirty_lists = image_scatter_channels(dirty_list[0], subimages=subimages) results = [deconvolve_subimage(dirty_list, psf_list[0]) for dirty_list in dirty_lists] result = image_gather_channels(results, output, subimages=subimages) return add_model(result, model_imagelist)
def create_window(template, window_type, **kwargs): """ :param template: :param type: :return: """ window = create_empty_image_like(template) if window_type == 'quarter': qx = template.shape[3] // 4 qy = template.shape[2] // 4 window.data[..., (qy + 1):3 * qy, (qx + 1):3 * qx] = 1.0 log.info('create_mask: Cleaning inner quarter of each sky plane') elif window_type == 'no_edge': edge = get_parameter(kwargs, 'window_edge', 16) nx = template.shape[3] ny = template.shape[2] window.data[..., (edge + 1):(ny - edge), (edge + 1):(nx - edge)] = 1.0 log.info('create_mask: Window omits %d-pixel edge of each sky plane' % (edge)) elif window_type == 'threshold': window_threshold = get_parameter(kwargs, 'window_threshold', None) if window_threshold is None: window_threshold = 10.0 * numpy.std(template.data) window[template.data >= window_threshold] = 1.0 log.info('create_mask: Window omits all points below %g' % (window_threshold)) elif window_type is None: log.info("create_mask: Mask covers entire image") else: raise ValueError("Window shape %s is not recognized" % window_type) return window
def smooth_image(model: Image, width=1.0, normalise=True): """ Smooth an image with a kernel :param model: Image :param width: Kernel in pixels :param normalise: Normalise kernel peak to unity """ assert isinstance(model, Image), model from astropy.convolution.kernels import Gaussian2DKernel from astropy.convolution import convolve_fft kernel = Gaussian2DKernel(width) cmodel = create_empty_image_like(model) nchan, npol, _, _ = model.shape for pol in range(npol): for chan in range(nchan): cmodel.data[chan, pol, :, :] = convolve_fft(model.data[chan, pol, :, :], kernel, normalize_kernel=False, allow_huge=True) if normalise and isinstance(kernel, Gaussian2DKernel): cmodel.data *= 2 * numpy.pi * width**2 return cmodel
def imagerooter(image_list) -> list(): new_image_list = [] for im in image_list: newim = create_empty_image_like(im) newim.data = numpy.sqrt(numpy.abs(im.data)) new_image_list.append(newim) return new_image_list
def image_gradients(im: Image): """Calculate image gradients numerically Gradient units are (incoming unit)/pixel e.g. Jy/beam/pixel :param im: Image :return: Gradient images """ assert isinstance(im, Image) nchan, npol, ny, nx = im.shape gradientx = create_empty_image_like(im) gradientx.data[..., :, 1:nx] = im.data[..., :, 1:nx] - im.data[..., :, 0:(nx - 1)] gradienty = create_empty_image_like(im) gradienty.data[..., 1:ny, :] = im.data[..., 1:ny, :] - im.data[..., 0:(ny - 1), :] return gradientx, gradienty
def invert_ignore_none(vis, model, gg): if vis is not None: return invert(vis, model, context=context, dopsf=dopsf, normalize=normalize, gcfcf=gg, **kwargs) else: return create_empty_image_like(model), numpy.zeros( [model.nchan, model.npol])
def invert_ignore_none(vis, model): if vis is not None: return invert(vis, model, context=context, dopsf=dopsf, normalize=normalize, facets=facets, vis_slices=vis_slices, **kwargs) else: return create_empty_image_like(model), 0.0
def gather_image_iteration_results(results, template_model): result = create_empty_image_like(template_model) i = 0 sumwt = numpy.zeros([template_model.nchan, template_model.npol]) for dpatch in image_scatter_facets(result, facets=facets): assert i < len(results), "Too few results in gather_image_iteration_results" if results[i] is not None: assert len(results[i]) == 2, results[i] dpatch.data[...] = results[i][0].data[...] sumwt += results[i][1] i += 1 return result, sumwt
def deconvolve_list_channel_mpi_workflow(dirty_list, psf_list, model_imagelist, subimages, comm=MPI.COMM_WORLD, **kwargs): """Create a graph for deconvolution by channels, adding to the model Does deconvolution channel by channel. :param subimages: MONTSE: number of subimages (= freqchannels?) :param dirty_list: in rank=0 :param psf_list: Must be the size of a facet in rank=0 :param model_imagelist: Current model in rank=0 :param kwargs: Parameters for functions in components :return: """ def deconvolve_subimage(dirty, psf): assert isinstance(dirty, Image) assert isinstance(psf, Image) comp = deconvolve_cube(dirty, psf, **kwargs) return comp[0] def add_model(sum_model, model): assert isinstance(output, Image) assert isinstance(model, Image) sum_model.data += model.data return sum_model rank = comm.Get_rank() size = comm.Get_size() if rank == 0: output = create_empty_image_like(model_imagelist) dirty_lists = image_scatter_channels(dirty_list[0], subimages=subimages) sub_dirty_lists = numpy.array_split(dirty_lists, size) sub_dirty_lists = comm.scatter(sub_dirty_lists, root=0) psf_list_im = comm.Bcast(psf_list[0], root=0) sub_results = [ deconvolve_subimage(dirty_list, psf_list_im) for dirty_list in sub_dirty_lists ] results = comm.gather(sub_results, root=0) # NOTE: This is same as in invert, not scalable, we should use a reduction # instead but I don't understand image_gather_channels ... if rank == 0: results = numpy.concatenate(results) result = image_gather_channels(results, output, subimages=subimages) result = add_model(result, model_imagelist) else: result = None return result
def extract_psf(psf, facets): spsf = create_empty_image_like(psf) cx = spsf.shape[3] // 2 cy = spsf.shape[2] // 2 wx = spsf.shape[3] // facets wy = spsf.shape[2] // facets xbeg = cx - wx // 2 xend = cx + wx // 2 ybeg = cy - wy // 2 yend = cy + wy // 2 spsf.data = psf.data[..., ybeg:yend, xbeg:xend] spsf.wcs.wcs.crpix[0] -= xbeg spsf.wcs.wcs.crpix[1] -= ybeg return spsf
def smooth_image(model: Image, width=1.0): """ Smooth an image with a kernel """ import astropy.convolution assert isinstance(model, Image), model kernel = astropy.convolution.kernels.Gaussian2DKernel(width) cmodel = create_empty_image_like(model) nchan, npol, _, _ = model.shape for pol in range(npol): for chan in range(nchan): cmodel.data[chan, pol, :, :] = astropy.convolution.convolve( model.data[chan, pol, :, :], kernel, normalize_kernel=False) if isinstance(kernel, astropy.convolution.kernels.Gaussian2DKernel): cmodel.data *= 2 * numpy.pi * width**2 return cmodel
def make_residual(dcal, tl, it): res = create_empty_image_like(dcal[0][0]) for i, d in enumerate(dcal): assert numpy.max(numpy.abs( d[0].data)) > 0.0, "Residual subimage is zero" if tl[i].mask is None: res.data += d[0].data else: assert numpy.max(numpy.abs( tl[i].mask.data)) > 0.0, "Mask image is zero" res.data += d[0].data * tl[i].mask.data assert numpy.max(numpy.abs( res.data)) > 0.0, "Residual image is zero" # import matplotlib.pyplot as plt # from processing_components.image.operations import show_image # show_image(res, title='MPCCAL residual image, iteration %d' % it) # plt.show() return res
difference_image = copy_image(mpccal_restored) difference_image.data -= ical_restored.data print(qa_image(difference_image, context='MPCCAL - ICAL image')) show_image(difference_image, title='MPCCAL - ICAL image', components=ical_components) plt.show(block=block_plots) export_image_to_fits( difference_image, arl_path( 'test_results/low-sims-mpc-mpccal-ical-restored_%.1frmax.fits' % rmax)) newscreen = create_empty_image_like(screen) gaintables = [sm.gaintable for sm in mpccal_skymodel] newscreen, weights = grid_gaintable_to_screen(block_vis, gaintables, newscreen) export_image_to_fits( newscreen, arl_path('test_results/low-sims-mpc-mpccal-screen_%.1frmax.fits' % rmax)) export_image_to_fits( weights, arl_path( 'test_results/low-sims-mpc-mpccal-screenweights_%.1frmax.fits' % rmax)) print(qa_image(weights)) print(qa_image(newscreen))
# Create test image frequency = numpy.array([1e8]) phasecentre = SkyCoord(ra=+15.0 * u.deg, dec=-35.0 * u.deg, frame='icrs', equinox='J2000') if rank == 0: model = create_test_image(frequency=frequency, phasecentre=phasecentre, cellsize=0.001, polarisation_frame=PolarisationFrame('stokesI')) #f=show_image(model, title='Model image', cm='Greys', vmax=1.0, vmin=-0.1) #print(qa_image(model, context='Model image')) #plt.show() # Rank 0 scatters the test image if rank == 0: subimages = image_scatter_facets(model, facets=facets) subimages = numpy.array_split(subimages, size) else: subimages = list() sublist = comm.scatter(subimages, root=0) root_images = imagerooter(sublist) roots = comm.gather(root_images, root=0) if rank == 0: results = sum(roots, []) root_model = create_empty_image_like(model) result = image_gather_facets(results, root_model, facets=facets) numpy.testing.assert_array_almost_equal_nulp(result.data ** 2, numpy.abs(model.data), 7) print(qa_image(result))
def ingest_visibility(self, freq=None, chan_width=None, times=None, add_errors=False, block=True, bandpass=False): if freq is None: freq = [1e8] if chan_width is None: chan_width = [1e6] if times is None: times = (numpy.pi / 12.0) * numpy.linspace(-3.0, 3.0, 5) lowcore = create_named_configuration('LOWBD2', rmax=750.0) frequency = numpy.array(freq) channel_bandwidth = numpy.array(chan_width) phasecentre = SkyCoord(ra=+180.0 * u.deg, dec=-60.0 * u.deg, frame='icrs', equinox='J2000') if block: vt = create_blockvisibility( lowcore, times, frequency, channel_bandwidth=channel_bandwidth, weight=1.0, phasecentre=phasecentre, polarisation_frame=PolarisationFrame("stokesI")) else: vt = create_visibility( lowcore, times, frequency, channel_bandwidth=channel_bandwidth, weight=1.0, phasecentre=phasecentre, polarisation_frame=PolarisationFrame("stokesI")) cellsize = 0.001 model = create_image_from_visibility( vt, npixel=self.npixel, cellsize=cellsize, npol=1, frequency=frequency, phasecentre=phasecentre, polarisation_frame=PolarisationFrame("stokesI")) nchan = len(self.frequency) flux = numpy.array(nchan * [[100.0]]) facets = 4 rpix = model.wcs.wcs.crpix - 1.0 spacing_pixels = self.npixel // facets centers = [-1.5, -0.5, 0.5, 1.5] comps = list() for iy in centers: for ix in centers: p = int(round(rpix[0] + ix * spacing_pixels * numpy.sign(model.wcs.wcs.cdelt[0]))), \ int(round(rpix[1] + iy * spacing_pixels * numpy.sign(model.wcs.wcs.cdelt[1]))) sc = pixel_to_skycoord(p[0], p[1], model.wcs, origin=1) comp = create_skycomponent( direction=sc, flux=flux, frequency=frequency, polarisation_frame=PolarisationFrame("stokesI")) comps.append(comp) if block: predict_skycomponent_visibility(vt, comps) else: predict_skycomponent_visibility(vt, comps) insert_skycomponent(model, comps) self.comps = comps self.model = copy_image(model) self.empty_model = create_empty_image_like(model) export_image_to_fits( model, '%s/test_pipeline_functions_model.fits' % (self.dir)) if add_errors: # These will be the same for all calls numpy.random.seed(180555) gt = create_gaintable_from_blockvisibility(vt) gt = simulate_gaintable(gt, phase_error=1.0, amplitude_error=0.0) vt = apply_gaintable(vt, gt) if bandpass: bgt = create_gaintable_from_blockvisibility(vt, timeslice=1e5) bgt = simulate_gaintable(bgt, phase_error=0.01, amplitude_error=0.01, smooth_channels=4) vt = apply_gaintable(vt, bgt) return vt
def invert_list_serial_workflow(vis_list, template_model_imagelist, dopsf=False, normalize=True, facets=1, vis_slices=1, context='2d', gcfcf=None, **kwargs): """ Sum results from invert, iterating over the scattered image and vis_list :param vis_list: :param template_model_imagelist: Model used to determine image parameters :param dopsf: Make the PSF instead of the dirty image :param facets: Number of facets :param normalize: Normalize by sumwt :param vis_slices: Number of slices :param context: Imaging context :param gcfcg: tuple containing grid correction and convolution function :param kwargs: Parameters for functions in components :return: List of (image, sumwt) tuple """ if not isinstance(template_model_imagelist, collections.Iterable): template_model_imagelist = [template_model_imagelist] c = imaging_context(context) vis_iter = c['vis_iterator'] invert = c['invert'] def gather_image_iteration_results(results, template_model): result = create_empty_image_like(template_model) i = 0 sumwt = numpy.zeros([template_model.nchan, template_model.npol]) for dpatch in image_scatter_facets(result, facets=facets): assert i < len( results), "Too few results in gather_image_iteration_results" if results[i] is not None: assert len(results[i]) == 2, results[i] dpatch.data[...] = results[i][0].data[...] sumwt += results[i][1] i += 1 return result, sumwt def invert_ignore_none(vis, model, gg): if vis is not None: return invert(vis, model, context=context, dopsf=dopsf, normalize=normalize, gcfcf=gg, **kwargs) else: return create_empty_image_like(model), numpy.zeros( [model.nchan, model.npol]) # If we are doing facets, we need to create the gcf for each image if gcfcf is None and facets == 1: gcfcf = [create_pswf_convolutionfunction(template_model_imagelist[0])] # Loop over all vis_lists independently results_vislist = list() if facets == 1: for ivis, sub_vis_list in enumerate(vis_list): if len(gcfcf) > 1: g = gcfcf[ivis] else: g = gcfcf[0] # Iterate within each vis_list result_image = create_empty_image_like( template_model_imagelist[ivis]) result_sumwt = numpy.zeros([ template_model_imagelist[ivis].nchan, template_model_imagelist[ivis].npol ]) for rows in vis_iter(sub_vis_list, vis_slices): row_vis = create_visibility_from_rows(sub_vis_list, rows) result = invert_ignore_none(row_vis, template_model_imagelist[ivis], g) if result is not None: result_image.data += result[1][:, :, numpy.newaxis, numpy. newaxis] * result[0].data result_sumwt += result[1] result_image = normalize_sumwt(result_image, result_sumwt) results_vislist.append((result_image, result_sumwt)) else: for ivis, sub_vis_list in enumerate(vis_list): # Create the graph to divide an image into facets. This is by reference. facet_lists = image_scatter_facets(template_model_imagelist[ivis], facets=facets) # Create the graph to divide the visibility into slices. This is by copy. sub_sub_vis_lists = visibility_scatter(sub_vis_list, vis_iter, vis_slices=vis_slices) # Iterate within each vis_list vis_results = list() for sub_sub_vis_list in sub_sub_vis_lists: facet_vis_results = list() for facet_list in facet_lists: facet_vis_results.append( invert_ignore_none(sub_sub_vis_list, facet_list, None)) vis_results.append( gather_image_iteration_results( facet_vis_results, template_model_imagelist[ivis])) results_vislist.append(sum_invert_results(vis_results)) return results_vislist
def image_raster_iter(im: Image, facets=1, overlap=0, taper='flat', make_flat=False) -> collections.Iterable: """Create an image_raster_iter generator, returning images, optionally with overlaps The WCS is adjusted appropriately for each raster element. Hence this is a coordinate-aware way to iterate through an image. Provided we don't break reference semantics, memory should be conserved. However make_flat creates a new set of images and thus reference semantics dont hold. To update the image in place: for r in raster(im, facets=2):: r.data[...] = numpy.sqrt(r.data[...]) If the overlap is greater than zero, we choose to keep all images the same size so the other ring of facets are ignored. So if facets=4 and overlap > 0 then the iterator returns (facets-2)**2 = 4 images. A taper is applied in the overlap regions. None implies a constant value, linear is a ramp, and quadratic is parabolic at the ends. :param im: Image :param facets: Number of image partitions on each axis (2) :param overlap: overlap in pixels :param taper: method of tapering at the edges: 'flat' or 'linear' or 'quadratic' or 'tukey' :param make_flat: Make the flat images """ nchan, npol, ny, nx = im.shape assert facets <= ny, "Cannot have more raster elements than pixels" assert facets <= nx, "Cannot have more raster elements than pixels" assert facets >=1, "Facets cannot be zero or less" assert overlap >= 0, "Overlap must be zero or greater" if facets == 1: yield im else: assert overlap < (nx // facets), "Overlap in facets is too large" assert overlap < (ny // facets), "Overlap in facets is too large" # Step between facets sx = nx // facets + overlap sy = ny // facets + overlap # Size of facet dx = sx + overlap dy = sy + overlap # Step between facets sx = nx // facets + overlap sy = ny // facets + overlap # Size of facet dx = nx // facets + 2 * overlap dy = nx // facets + 2 * overlap def taper_linear(): t = numpy.ones(dx) ramp = numpy.arange(0, overlap).astype(float) / float(overlap) t[:overlap] = ramp t[(dx - overlap):dx] = 1.0 - ramp result = numpy.outer(t, t) return result def taper_quadratic(): t = numpy.ones(dx) ramp = numpy.arange(0, overlap).astype(float) / float(overlap) quadratic_ramp = numpy.ones(overlap) quadratic_ramp[0:overlap // 2] = 2.0 * ramp[0:overlap // 2] ** 2 quadratic_ramp[overlap // 2:] = 1 - 2.0 * ramp[overlap // 2:0:-1] ** 2 t[:overlap] = quadratic_ramp t[(dx - overlap):dx] = 1.0 - quadratic_ramp result = numpy.outer(t, t) return result def taper_tukey(): xs = numpy.arange(dx) / float(dx) r = 2 * overlap / dx t = [tukey_filter(x, r) for x in xs] result = numpy.outer(t, t) return result i = 0 for fy in range(facets): y = ny // 2 + sy * (fy - facets // 2) - overlap // 2 for fx in range(facets): x = nx // 2 + sx * (fx - facets // 2) - overlap // 2 if (x >= 0) and (x + dx) <= nx and (y >= 0) and (y + dy) <= ny: # Adjust WCS wcs = im.wcs.deepcopy() wcs.wcs.crpix[0] -= x wcs.wcs.crpix[1] -= y # yield image from slice (reference!) subim = create_image_from_array(im.data[..., y:y + dy, x:x + dx], wcs, im.polarisation_frame) if overlap > 0 and make_flat: flat = create_empty_image_like(subim) if taper == 'linear': flat.data[..., :, :] = taper_linear() elif taper == 'quadratic': flat.data[..., :, :] = taper_quadratic() elif taper == 'tukey': flat.data[..., :, :] = taper_tukey() else: flat.data[...] = 1.0 yield flat else: yield subim i += 1
def sum_images(images): sum_image = create_empty_image_like(images[0][0]) for im in images: sum_image.data += im[0].data return sum_image, images[0][1]
cellsize=0.001, polarisation_frame=PolarisationFrame('stokesI')) #print(model) nchan, npol, ny, nx = model.data.shape sumwt = numpy.ones([nchan, npol]) print('%d:before Reduce: data = ' % rank) print(sumwt) #f=show_image(model, title='Model image', cm='Greys', vmax=1.0, vmin=-0.1) print(qa_image(model, context='Model image')) #plt.show() # In[5]: # Accum images into one with weights result_image = create_empty_image_like(model) comm.Reduce(model.data, result_image.data, root=0, op=MPI.SUM) #f=show_image(result_image, title='Result image', cm='Greys', vmax=1.0, vmin=-0.1) #plt.show() if rank == 0: print('%d:after Reduce: data = ' % rank) print(qa_image(result_image, context='Result image')) # test correctness assert (result_image.data.shape == model.data.shape) numpy.testing.assert_array_almost_equal_nulp(result_image.data, (model.data) * size, 7) # In[6]: result_sumwt = numpy.zeros([nchan, npol]) comm.Reduce(sumwt, result_sumwt, root=0, op=MPI.SUM)