def accumulate_results(results, **kwargs): newim = copy_image(im) i = 0 for dpatch in iterator(newim, **kwargs): dpatch.data[...] = results[i].data[...] i += 1 return newim
def copy_skymodel(sm): """ Copy a sky model """ return SkyModel(components=[copy_skycomponent(comp) for comp in sm.components], images=[copy_image(im) for im in sm.images], fixed=sm.fixed)
def solve_skymodel(vis, skymodel, gain=0.1, **kwargs): """Fit a single skymodel to a visibility :param evis: Expected vis for this ssm :param calskymodel: scm element being fit i.e. (skymodel, gaintable) tuple :param gain: Gain in step :param method: 'fit' or 'sum' :param kwargs: :return: skycomponent """ if skymodel.fixed: return skymodel new_comps = list() for comp in skymodel.components: new_comp = copy_skycomponent(comp) new_comp, _ = fit_visibility(vis, new_comp) new_comp.flux = gain * new_comp.flux + (1.0 - gain) * comp.flux new_comps.append(new_comp) new_images = list() for im in skymodel.images: new_image = copy_image(im) new_image = solve_image(vis, new_image, **kwargs) new_images.append(new_image) return SkyModel(components=new_comps, images=new_images)
def sum_invert_results(image_list): """ Sum a set of invert results with appropriate weighting :param image_list: List of [image, sum weights] pairs :return: image, sum of weights """ if len(image_list) == 1: return image_list[0] first = True sumwt = 0.0 im = None for i, arg in enumerate(image_list): if arg is not None: if isinstance(arg[1], numpy.ndarray): scale = arg[1][..., numpy.newaxis, numpy.newaxis] else: scale = arg[1] if first: im = copy_image(arg[0]) im.data *= scale sumwt = arg[1] first = False else: im.data += scale * arg[0].data sumwt += arg[1] assert not first, "No invert results" im = normalize_sumwt(im, sumwt) return im, sumwt
def generic_image_iterator_arlexecute(imagefunction, im: Image, iterator, **kwargs): """ Definition of interface for generic_image_arlexecute This generates a graph for imagefunction. Note that im cannot be a graph itself. :func imagefunction: Function to be applied to all pixels :param im: Image to be processed :param iterator: iterator e.g. image_raster_iter :param kwargs: Parameters for functions in components :return: graph """ def accumulate_results(results, **kwargs): newim = copy_image(im) i = 0 for dpatch in iterator(newim, **kwargs): dpatch.data[...] = results[i].data[...] i += 1 return newim results = [ arlexecute.execute(imagefunction(copy_image(dpatch))) for dpatch in iterator(im, **kwargs) ] return arlexecute.execute(accumulate_results, pure=True)(results, **kwargs)
def calculate_image_from_frequency_moments(im: Image, moment_image: Image, reference_frequency=None) -> Image: """Calculate image from frequency weighted moments Weights are ((freq-reference_frequency)/reference_frequency)**moment Note that a new image is created For example, to find the moments and then reconstruct from just the moments:: moment_cube = calculate_image_frequency_moments(model_multichannel, nmoments=5) reconstructed_cube = calculate_image_from_frequency_moments(model_multichannel, moment_cube) :param im: Image cube to be reconstructed :param moment_image: Moment cube (constructed using calculate_image_frequency_moments) :param reference_frequency: Reference frequency (default None uses average) :return: reconstructed image """ assert isinstance(im, Image) nchan, npol, ny, nx = im.shape nmoments, mnpol, mny, mnx = moment_image.shape assert npol == mnpol assert ny == mny assert nx == mnx assert moment_image.wcs.wcs.ctype[ 3] == 'MOMENT', "Second image should be a moment image" channels = numpy.arange(nchan) with warnings.catch_warnings(): warnings.simplefilter('ignore', FITSFixedWarning) freq = im.wcs.sub(['spectral']).wcs_pix2world(channels, 0)[0] if reference_frequency is None: reference_frequency = numpy.average(freq) log.debug( "calculate_image_from_frequency_moments: Reference frequency = %.3f (MHz)" % (reference_frequency)) newim = copy_image(im) newim.data[...] = 0.0 for moment in range(nmoments): for chan in range(nchan): weight = numpy.power( (freq[chan] - reference_frequency) / reference_frequency, moment) newim.data[chan, ...] += moment_image.data[moment, ...] * weight return newim
def restore_cube(model: Image, psf: Image, residual=None, **kwargs) -> Image: """ Restore the model image to the residuals :params psf: Input PSF :return: restored image """ from scipy.optimize import minpack assert isinstance(model, Image), model assert isinstance(psf, Image), psf assert residual is None or isinstance(residual, Image), residual restored = copy_image(model) npixel = psf.data.shape[3] sl = slice(npixel // 2 - 7, npixel // 2 + 8) size = get_parameter(kwargs, "psfwidth", None) if size is None: # isotropic at the moment! try: fit = fit_2dgaussian(psf.data[0, 0, sl, sl]) if fit.x_stddev <= 0.0 or fit.y_stddev <= 0.0: log.debug( 'restore_cube: error in fitting to psf, using 1 pixel stddev' ) size = 1.0 else: size = max(fit.x_stddev, fit.y_stddev) log.debug('restore_cube: psfwidth = %s' % (size)) except minpack.error as err: log.debug('restore_cube: minpack error, using 1 pixel stddev') size = 1.0 except ValueError as err: log.debug( 'restore_cube: warning in fit to psf, using 1 pixel stddev') size = 1.0 else: log.debug('restore_cube: Using specified psfwidth = %s' % (size)) # By convention, we normalise the peak not the integral so this is the volume of the Gaussian norm = 2.0 * numpy.pi * size**2 gk = Gaussian2DKernel(size) for chan in range(model.shape[0]): for pol in range(model.shape[1]): restored.data[chan, pol, :, :] = norm * convolve( model.data[chan, pol, :, :], gk, normalize_kernel=False) if residual is not None: restored.data += residual.data return restored
def deconvolve(dirty, psf, model, facet, gthreshold): import time starttime = time.time() if prefix == '': lprefix = "facet %d" % facet else: lprefix = "%s, facet %d" % (prefix, facet) nmoments = get_parameter(kwargs, "nmoments", 0) if nmoments > 0: moment0 = calculate_image_frequency_moments(dirty) this_peak = numpy.max(numpy.abs( moment0.data[0, ...])) / dirty.data.shape[0] else: this_peak = numpy.max(numpy.abs(dirty.data[0, ...])) if this_peak > 1.1 * gthreshold: log.info( "deconvolve_component %s: cleaning - peak %.6f > 1.1 * threshold %.6f" % (lprefix, this_peak, gthreshold)) kwargs['threshold'] = gthreshold result, _ = deconvolve_cube(dirty, psf, prefix=lprefix, **kwargs) if result.data.shape[0] == model.data.shape[0]: result.data += model.data else: log.warning( "deconvolve_component %s: Initial model %s and clean result %s do not have the same shape" % (lprefix, str( model.data.shape[0]), str(result.data.shape[0]))) flux = numpy.sum(result.data[0, 0, ...]) log.info( '### %s, %.6f, %.6f, True, %.3f # cycle, facet, peak, cleaned flux, clean, time?' % (lprefix, this_peak, flux, time.time() - starttime)) return result else: log.info( "deconvolve_component %s: Not cleaning - peak %.6f <= 1.1 * threshold %.6f" % (lprefix, this_peak, gthreshold)) log.info( '### %s, %.6f, %.6f, False, %.3f # cycle, facet, peak, cleaned flux, clean, time?' % (lprefix, this_peak, 0.0, time.time() - starttime)) return copy_image(model)