def gaintable_timeslice_iter(gt: GainTable, **kwargs) -> numpy.ndarray: """ GainTable iterator :param gt: GainTable :param timeslice: 'auto' or time in seconds :param gaintable_slices: Number of slices (second in precedence to timeslice) :return: Boolean array with selected rows=True """ assert isinstance(gt, GainTable) timemin = numpy.min(gt.time) timemax = numpy.max(gt.time) timeslice = get_parameter(kwargs, "timeslice", 'auto') if timeslice == 'auto': boxes = numpy.unique(gt.time) timeslice = 0.1 elif timeslice is None: timeslice = timemax - timemin boxes = [0.5 * (timemax + timemin)] elif isinstance(timeslice, float) or isinstance(timeslice, int): boxes = numpy.arange(timemin, timemax, timeslice) else: gt_slices = get_parameter(kwargs, "gaintable_slices", None) assert gt_slices is not None, "Time slicing not specified: set either timeslice or gaintable_slices" boxes = numpy.linspace(timemin, timemax, gt_slices) if gt_slices > 1: timeslice = boxes[1] - boxes[0] else: timeslice = timemax - timemin for box in boxes: rows = numpy.abs(gt.time - box) <= 0.5 * timeslice yield rows
def invert_2d(vis: Visibility, im: Image, dopsf: bool = False, normalize: bool = True, gcfcf=None, **kwargs) -> (Image, numpy.ndarray): """ Invert using 2D convolution function, using the specified convolution function Use the image im as a template. Do PSF in a separate call. This is at the bottom of the layering i.e. all transforms are eventually expressed in terms of this function. Any shifting needed is performed here. :param vis: Visibility to be inverted :param im: image template (not changed) :param dopsf: Make the psf instead of the dirty image :param normalize: Normalize by the sum of weights (True) :param gcfcf: (Grid correction function i.e. in image space, Convolution function i.e. in uv space) :return: resulting image """ assert isinstance(vis, Visibility), vis svis = copy_visibility(vis) if dopsf: svis.data['vis'][...] = 1.0 + 0.0j svis = shift_vis_to_image(svis, im, tangent=True, inverse=False) if gcfcf is None: gcf, cf = create_pswf_convolutionfunction( im, support=get_parameter(kwargs, "support", 6), oversampling=get_parameter(kwargs, "oversampling", 128)) else: gcf, cf = gcfcf griddata = create_griddata_from_image(im) griddata, sumwt = grid_visibility_to_griddata(svis, griddata=griddata, cf=cf) imaginary = get_parameter(kwargs, "imaginary", False) if imaginary: result0, result1 = fft_griddata_to_image(griddata, gcf, imaginary=imaginary) log.debug("invert_2d: retaining imaginary part of dirty image") if normalize: result0 = normalize_sumwt(result0, sumwt) result1 = normalize_sumwt(result1, sumwt) return result0, sumwt, result1 else: result = fft_griddata_to_image(griddata, gcf) if normalize: result = normalize_sumwt(result, sumwt) return result, sumwt
def coalesce_visibility(vis: BlockVisibility, **kwargs) -> Visibility: """ Coalesce the BlockVisibility data_models. The output format is a Visibility, as needed for imaging Coalesce by baseline-dependent averaging (optional). The number of integrations averaged goes as the ratio of the maximum possible baseline length to that for this baseline. This number can be scaled by coalescence_factor and limited by max_coalescence. When faceting, the coalescence factors should be roughly the same as the number of facets on one axis. If coalescence_factor=0.0 then just a format conversion is done :param vis: BlockVisibility to be coalesced :return: Coalesced visibility with cindex and blockvis filled in """ assert isinstance( vis, BlockVisibility), "vis is not a BlockVisibility: %r" % vis time_coal = get_parameter(kwargs, 'time_coal', 0.0) max_time_coal = get_parameter(kwargs, 'max_time_coal', 100) frequency_coal = get_parameter(kwargs, 'frequency_coal', 0.0) max_frequency_coal = get_parameter(kwargs, 'max_frequency_coal', 100) if time_coal == 0.0 and frequency_coal == 0.0: return convert_blockvisibility_to_visibility((vis)) cvis, cuvw, cwts, cimwt, ctime, cfrequency, cchannel_bandwidth, ca1, ca2, cintegration_time, cindex \ = average_in_blocks(vis.data['vis'], vis.data['uvw'], vis.data['weight'], vis.data['imaging_weight'], vis.time, vis.integration_time, vis.frequency, vis.channel_bandwidth, time_coal, max_time_coal, frequency_coal, max_frequency_coal) coalesced_vis = Visibility(uvw=cuvw, time=ctime, frequency=cfrequency, channel_bandwidth=cchannel_bandwidth, phasecentre=vis.phasecentre, antenna1=ca1, antenna2=ca2, vis=cvis, weight=cwts, imaging_weight=cimwt, configuration=vis.configuration, integration_time=cintegration_time, polarisation_frame=vis.polarisation_frame, cindex=cindex, blockvis=vis, meta=vis.meta) log.debug( 'coalesce_visibility: Created new Visibility for coalesced data_models, coalescence factors (t,f) = (%.3f,%.3f)' % (time_coal, frequency_coal)) log.debug('coalesce_visibility: Maximum coalescence (t,f) = (%d, %d)' % (max_time_coal, max_frequency_coal)) log.debug('coalesce_visibility: Original %s, coalesced %s' % (vis_summary(vis), vis_summary(coalesced_vis))) return coalesced_vis
def create_window(template, window_type, **kwargs): """Create a window image using one of a number of methods The window is 1.0 or 0.0 window types: 'quarter': Inner quarter of the image 'no_edge': 'window_edge' pixels around edge set to zero 'threshold': template image pixels < 'window_threshold' absolute value set to zero :param template: Template image :param window_type: 'quarter' | 'no_edge' | 'threshold' :return: New image containing window See also :py:func:`rascil.processing_components.image.deconvolution.deconvolve_cube` """ assert image_is_canonical(template) window = create_empty_image_like(template) if window_type == 'quarter': qx = template.shape[3] // 4 qy = template.shape[2] // 4 window.data[..., (qy + 1):3 * qy, (qx + 1):3 * qx] = 1.0 log.info('create_mask: Cleaning inner quarter of each sky plane') elif window_type == 'no_edge': edge = get_parameter(kwargs, 'window_edge', 16) nx = template.shape[3] ny = template.shape[2] window.data[..., (edge + 1):(ny - edge), (edge + 1):(nx - edge)] = 1.0 log.info('create_mask: Window omits %d-pixel edge of each sky plane' % (edge)) elif window_type == 'threshold': window_threshold = get_parameter(kwargs, 'window_threshold', None) if window_threshold is None: window_threshold = 10.0 * numpy.std(template.data) window.data[template.data >= window_threshold] = 1.0 log.info('create_mask: Window omits all points below %g' % (window_threshold)) elif window_type is None: log.info("create_mask: Mask covers entire image") else: raise ValueError("Window shape %s is not recognized" % window_type) return window
def restore_cube(model: Image, psf: Image, residual=None, **kwargs) -> Image: """ Restore the model image to the residuals :params psf: Input PSF :return: restored image """ assert isinstance(model, Image), model assert image_is_canonical(model) assert isinstance(psf, Image), psf assert image_is_canonical(psf) assert residual is None or isinstance(residual, Image), residual assert image_is_canonical(residual) restored = copy_image(model) npixel = psf.data.shape[3] sl = slice(npixel // 2 - 7, npixel // 2 + 8) size = get_parameter(kwargs, "psfwidth", None) if size is None: # isotropic at the moment! from scipy.optimize import minpack try: fit = fit_2dgaussian(psf.data[0, 0, sl, sl]) if fit.x_stddev <= 0.0 or fit.y_stddev <= 0.0: log.debug( 'restore_cube: error in fitting to psf, using 1 pixel stddev' ) size = 1.0 else: size = max(fit.x_stddev, fit.y_stddev) log.debug('restore_cube: psfwidth = %s' % (size)) except minpack.error as err: log.debug('restore_cube: minpack error, using 1 pixel stddev') size = 1.0 except ValueError as err: log.debug( 'restore_cube: warning in fit to psf, using 1 pixel stddev') size = 1.0 else: log.debug('restore_cube: Using specified psfwidth = %s' % (size)) # By convention, we normalise the peak not the integral so this is the volume of the Gaussian norm = 2.0 * numpy.pi * size**2 gk = Gaussian2DKernel(size) for chan in range(model.shape[0]): for pol in range(model.shape[1]): restored.data[chan, pol, :, :] = norm * convolve_fft( model.data[chan, pol, :, :], gk, normalize_kernel=False, allow_huge=True) if residual is not None: restored.data += residual.data return restored
def predict_2d(vis: Union[BlockVisibility, Visibility], model: Image, gcfcf=None, **kwargs) -> Union[BlockVisibility, Visibility]: """ Predict using convolutional degridding. This is at the bottom of the layering i.e. all transforms are eventually expressed in terms of this function. Any shifting needed is performed here. :param vis: Visibility to be predicted :param model: model image :param gcfcf: (Grid correction function i.e. in image space, Convolution function i.e. in uv space) :return: resulting visibility (in place works) """ if model is None: return vis assert isinstance(vis, Visibility) or isinstance(vis, BlockVisibility), vis _, _, ny, nx = model.data.shape if gcfcf is None: gcf, cf = create_pswf_convolutionfunction( model, support=get_parameter(kwargs, "support", 8), oversampling=get_parameter(kwargs, "oversampling", 127)) else: gcf, cf = gcfcf griddata = create_griddata_from_image(model, vis) polmodel = convert_stokes_to_polimage(model, vis.polarisation_frame) griddata = fft_image_to_griddata(polmodel, griddata, gcf) if isinstance(vis, Visibility): vis = degrid_visibility_from_griddata(vis, griddata=griddata, cf=cf) else: vis = degrid_blockvisibility_from_griddata(vis, griddata=griddata, cf=cf) # Now we can shift the visibility from the image frame to the original visibility frame svis = shift_vis_to_image(vis, model, tangent=True, inverse=True) return svis
def t1(**kwargs): assert get_parameter(kwargs, 'cellsize') == 0.1 assert get_parameter(kwargs, 'spectral_mode', 'channels') == 'mfs' assert get_parameter(kwargs, 'null_mode', 'mfs') == 'mfs' assert get_parameter(kwargs, 'foo', 'bar') == 'bar' assert get_parameter(kwargs, 'foo') is None assert get_parameter(None, 'foo', 'bar') == 'bar'
def continuum_imaging_list_rsexecute_workflow(vis_list, model_imagelist, context, gcfcf=None, vis_slices=1, facets=1, **kwargs): """ Create graph for the continuum imaging pipeline. Same as ICAL but with no selfcal. :param vis_list: :param model_imagelist: :param context: Imaging context :param kwargs: Parameters for functions in components :return: """ if gcfcf is None: gcfcf = [rsexecute.execute(create_pswf_convolutionfunction)(model_imagelist[0])] psf_imagelist = invert_list_rsexecute_workflow(vis_list, model_imagelist, context=context, dopsf=True, vis_slices=vis_slices, facets=facets, gcfcf=gcfcf, **kwargs) residual_imagelist = residual_list_rsexecute_workflow(vis_list, model_imagelist, context=context, gcfcf=gcfcf, vis_slices=vis_slices, facets=facets, **kwargs) deconvolve_model_imagelist = deconvolve_list_rsexecute_workflow(residual_imagelist, psf_imagelist, model_imagelist, prefix='cycle 0', **kwargs) nmajor = get_parameter(kwargs, "nmajor", 5) if nmajor > 1: for cycle in range(nmajor): prefix = "cycle %d" % (cycle + 1) residual_imagelist = residual_list_rsexecute_workflow(vis_list, deconvolve_model_imagelist, context=context, vis_slices=vis_slices, facets=facets, gcfcf=gcfcf, **kwargs) deconvolve_model_imagelist = deconvolve_list_rsexecute_workflow(residual_imagelist, psf_imagelist, deconvolve_model_imagelist, prefix=prefix, **kwargs) residual_imagelist = residual_list_rsexecute_workflow(vis_list, deconvolve_model_imagelist, context=context, vis_slices=vis_slices, facets=facets, gcfcf=gcfcf, **kwargs) restore_imagelist = restore_list_rsexecute_workflow(deconvolve_model_imagelist, psf_imagelist, residual_imagelist) return (deconvolve_model_imagelist, residual_imagelist, restore_imagelist)
def deconvolve_cube(dirty: Image, psf: Image, prefix='', **kwargs) -> (Image, Image): """ Clean using a variety of algorithms The algorithms available are: hogbom: Hogbom CLEAN See: Hogbom CLEAN A&A Suppl, 15, 417, (1974) hogbom-complex: Complex Hogbom CLEAN of stokesIQUV image msclean: MultiScale CLEAN See: Cornwell, T.J., Multiscale CLEAN (IEEE Journal of Selected Topics in Sig Proc, 2008 vol. 2 pp. 793-801) mfsmsclean, msmfsclean, mmclean: MultiScale Multi-Frequency See: U. Rau and T. J. Cornwell, “A multi-scale multi-frequency deconvolution algorithm for synthesis imaging in radio interferometry,” A&A 532, A71 (2011). For example:: comp, residual = deconvolve_cube(dirty, psf, niter=1000, gain=0.7, algorithm='msclean', scales=[0, 3, 10, 30], threshold=0.01) For the MFS clean, the psf must have number of channels >= 2 * nmoment :param dirty: Image dirty image :param psf: Image Point Spread Function :param window_shape: Window image (Bool) - clean where True :param mask: Window in the form of an image, overrides window_shape :param algorithm: Cleaning algorithm: 'msclean'|'hogbom'|'mfsmsclean' :param gain: loop gain (float) 0.7 :param threshold: Clean threshold (0.0) :param fractional_threshold: Fractional threshold (0.01) :param scales: Scales (in pixels) for multiscale ([0, 3, 10, 30]) :param nmoment: Number of frequency moments (default 3) :param findpeak: Method of finding peak in mfsclean: 'Algorithm1'|'ASKAPSoft'|'CASA'|'RASCIL', Default is RASCIL. :return: component image, residual image See also :py:func:`rascil.processing_components.arrays.cleaners.hogbom` :py:func:`rascil.processing_components.arrays.cleaners.hogbom_complex` :py:func:`rascil.processing_components.arrays.cleaners.msclean` :py:func:`rascil.processing_components.arrays.cleaners.msmfsclean` """ assert isinstance(dirty, Image), dirty assert image_is_canonical(dirty) assert isinstance(psf, Image), psf assert image_is_canonical(psf) window_shape = get_parameter(kwargs, 'window_shape', None) if window_shape == 'quarter': log.info("deconvolve_cube %s: window is inner quarter" % prefix) qx = dirty.shape[3] // 4 qy = dirty.shape[2] // 4 window = numpy.zeros_like(dirty.data) window[..., (qy + 1):3 * qy, (qx + 1):3 * qx] = 1.0 log.info( 'deconvolve_cube %s: Cleaning inner quarter of each sky plane' % prefix) elif window_shape == 'no_edge': edge = get_parameter(kwargs, 'window_edge', 16) nx = dirty.shape[3] ny = dirty.shape[2] window = numpy.zeros_like(dirty.data) window[..., (edge + 1):(ny - edge), (edge + 1):(nx - edge)] = 1.0 log.info( 'deconvolve_cube %s: Window omits %d-pixel edge of each sky plane' % (prefix, edge)) elif window_shape is None: log.info("deconvolve_cube %s: Cleaning entire image" % prefix) window = None else: raise ValueError("Window shape %s is not recognized" % window_shape) mask = get_parameter(kwargs, 'mask', None) if isinstance(mask, Image): if window is not None: log.warning( 'deconvolve_cube %s: Overriding window_shape with mask image' % (prefix)) window = mask.data psf_support = get_parameter(kwargs, 'psf_support', max(dirty.shape[2] // 2, dirty.shape[3] // 2)) if (psf_support <= psf.shape[2] // 2) and ( (psf_support <= psf.shape[3] // 2)): centre = [psf.shape[2] // 2, psf.shape[3] // 2] psf.data = psf.data[..., (centre[0] - psf_support):(centre[0] + psf_support), (centre[1] - psf_support):(centre[1] + psf_support)] log.info('deconvolve_cube %s: PSF support = +/- %d pixels' % (prefix, psf_support)) log.info('deconvolve_cube %s: PSF shape %s' % (prefix, str(psf.data.shape))) algorithm = get_parameter(kwargs, 'algorithm', 'msclean') if algorithm == 'msclean': log.info( "deconvolve_cube %s: Multi-scale clean of each polarisation and channel separately" % prefix) gain = get_parameter(kwargs, 'gain', 0.7) assert 0.0 < gain < 2.0, "Loop gain must be between 0 and 2" thresh = get_parameter(kwargs, 'threshold', 0.0) assert thresh >= 0.0 niter = get_parameter(kwargs, 'niter', 100) assert niter > 0 scales = get_parameter(kwargs, 'scales', [0, 3, 10, 30]) fracthresh = get_parameter(kwargs, 'fractional_threshold', 0.01) assert 0.0 < fracthresh < 1.0 comp_array = numpy.zeros_like(dirty.data) residual_array = numpy.zeros_like(dirty.data) for channel in range(dirty.data.shape[0]): for pol in range(dirty.data.shape[1]): if psf.data[channel, pol, :, :].max(): log.info( "deconvolve_cube %s: Processing pol %d, channel %d" % (prefix, pol, channel)) if window is None: comp_array[channel, pol, :, :], residual_array[channel, pol, :, :] = \ msclean(dirty.data[channel, pol, :, :], psf.data[channel, pol, :, :], None, gain, thresh, niter, scales, fracthresh, prefix) else: comp_array[channel, pol, :, :], residual_array[channel, pol, :, :] = \ msclean(dirty.data[channel, pol, :, :], psf.data[channel, pol, :, :], window[channel, pol, :, :], gain, thresh, niter, scales, fracthresh, prefix) else: log.info( "deconvolve_cube %s: Skipping pol %d, channel %d" % (prefix, pol, channel)) comp_image = create_image_from_array(comp_array, dirty.wcs, dirty.polarisation_frame) residual_image = create_image_from_array(residual_array, dirty.wcs, dirty.polarisation_frame) elif algorithm == 'msmfsclean' or algorithm == 'mfsmsclean' or algorithm == 'mmclean': findpeak = get_parameter(kwargs, "findpeak", 'RASCIL') log.info( "deconvolve_cube %s: Multi-scale multi-frequency clean of each polarisation separately" % prefix) nmoment = get_parameter(kwargs, "nmoment", 3) assert nmoment >= 1, "Number of frequency moments must be greater than or equal to one" nchan = dirty.shape[0] assert nchan > 2 * (nmoment - 1), "Require nchan %d > 2 * (nmoment %d - 1)" % ( nchan, 2 * (nmoment - 1)) dirty_taylor = calculate_image_frequency_moments(dirty, nmoment=nmoment) if nmoment > 1: psf_taylor = calculate_image_frequency_moments(psf, nmoment=2 * nmoment) else: psf_taylor = calculate_image_frequency_moments(psf, nmoment=1) psf_peak = numpy.max(psf_taylor.data) dirty_taylor.data /= psf_peak psf_taylor.data /= psf_peak log.info("deconvolve_cube %s: Shape of Dirty moments image %s" % (prefix, str(dirty_taylor.shape))) log.info("deconvolve_cube %s: Shape of PSF moments image %s" % (prefix, str(psf_taylor.shape))) gain = get_parameter(kwargs, 'gain', 0.7) assert 0.0 < gain < 2.0, "Loop gain must be between 0 and 2" thresh = get_parameter(kwargs, 'threshold', 0.0) assert thresh >= 0.0 niter = get_parameter(kwargs, 'niter', 100) assert niter > 0 scales = get_parameter(kwargs, 'scales', [0, 3, 10, 30]) fracthresh = get_parameter(kwargs, 'fractional_threshold', 0.1) assert 0.0 < fracthresh < 1.0 comp_array = numpy.zeros(dirty_taylor.data.shape) residual_array = numpy.zeros(dirty_taylor.data.shape) for pol in range(dirty_taylor.data.shape[1]): # Always use the Stokes I PSF if psf_taylor.data[0, 0, :, :].max(): log.info("deconvolve_cube %s: Processing pol %d" % (prefix, pol)) if window is None: comp_array[:, pol, :, :], residual_array[:, pol, :, :] = \ msmfsclean(dirty_taylor.data[:, pol, :, :], psf_taylor.data[:, 0, :, :], None, gain, thresh, niter, scales, fracthresh, findpeak, prefix) else: log.info( 'deconvolve_cube %s: Clean window has %d valid pixels' % (prefix, int(numpy.sum(window[0, pol])))) comp_array[:, pol, :, :], residual_array[:, pol, :, :] = \ msmfsclean(dirty_taylor.data[:, pol, :, :], psf_taylor.data[:, 0, :, :], window[0, pol, :, :], gain, thresh, niter, scales, fracthresh, findpeak, prefix) else: log.info("deconvolve_cube %s: Skipping pol %d" % (prefix, pol)) comp_image = create_image_from_array(comp_array, dirty_taylor.wcs, dirty.polarisation_frame) residual_image = create_image_from_array(residual_array, dirty_taylor.wcs, dirty.polarisation_frame) return_moments = get_parameter(kwargs, "return_moments", False) if not return_moments: log.info("deconvolve_cube %s: calculating spectral cubes" % prefix) comp_image = calculate_image_from_frequency_moments( dirty, comp_image) residual_image = calculate_image_from_frequency_moments( dirty, residual_image) else: log.info("deconvolve_cube %s: constructed moment cubes" % prefix) elif algorithm == 'hogbom': log.info( "deconvolve_cube %s: Hogbom clean of each polarisation and channel separately" % prefix) gain = get_parameter(kwargs, 'gain', 0.1) assert 0.0 < gain < 2.0, "Loop gain must be between 0 and 2" thresh = get_parameter(kwargs, 'threshold', 0.0) assert thresh >= 0.0 niter = get_parameter(kwargs, 'niter', 100) assert niter > 0 fracthresh = get_parameter(kwargs, 'fractional_threshold', 0.1) assert 0.0 < fracthresh < 1.0 comp_array = numpy.zeros(dirty.data.shape) residual_array = numpy.zeros(dirty.data.shape) for channel in range(dirty.data.shape[0]): for pol in range(dirty.data.shape[1]): if psf.data[channel, pol, :, :].max(): log.info( "deconvolve_cube %s: Processing pol %d, channel %d" % (prefix, pol, channel)) if window is None: comp_array[channel, pol, :, :], residual_array[channel, pol, :, :] = \ hogbom(dirty.data[channel, pol, :, :], psf.data[channel, pol, :, :], None, gain, thresh, niter, fracthresh, prefix) else: comp_array[channel, pol, :, :], residual_array[channel, pol, :, :] = \ hogbom(dirty.data[channel, pol, :, :], psf.data[channel, pol, :, :], window[channel, pol, :, :], gain, thresh, niter, fracthresh, prefix) else: log.info( "deconvolve_cube %s: Skipping pol %d, channel %d" % (prefix, pol, channel)) comp_image = create_image_from_array(comp_array, dirty.wcs, dirty.polarisation_frame) residual_image = create_image_from_array(residual_array, dirty.wcs, dirty.polarisation_frame) elif algorithm == 'hogbom-complex': log.info( "deconvolve_cube_complex: Hogbom-complex clean of each polarisation and channel separately" ) gain = get_parameter(kwargs, 'gain', 0.1) assert 0.0 < gain < 2.0, "Loop gain must be between 0 and 2" thresh = get_parameter(kwargs, 'threshold', 0.0) assert thresh >= 0.0 niter = get_parameter(kwargs, 'niter', 100) assert niter > 0 fracthresh = get_parameter(kwargs, 'fractional_threshold', 0.1) assert 0.0 <= fracthresh < 1.0 comp_array = numpy.zeros(dirty.data.shape) residual_array = numpy.zeros(dirty.data.shape) for channel in range(dirty.data.shape[0]): for pol in range(dirty.data.shape[1]): if pol == 0 or pol == 3: if psf.data[channel, pol, :, :].max(): log.info( "deconvolve_cube_complex: Processing pol %d, channel %d" % (pol, channel)) if window is None: comp_array[channel, pol, :, :], residual_array[channel, pol, :, :] = \ hogbom(dirty.data[channel, pol, :, :], psf.data[channel, pol, :, :], None, gain, thresh, niter, fracthresh) else: comp_array[channel, pol, :, :], residual_array[channel, pol, :, :] = \ hogbom(dirty.data[channel, pol, :, :], psf.data[channel, pol, :, :], window[channel, pol, :, :], gain, thresh, niter, fracthresh) else: log.info( "deconvolve_cube_complex: Skipping pol %d, channel %d" % (pol, channel)) if pol == 1: if psf.data[channel, 1:2, :, :].max(): log.info( "deconvolve_cube_complex: Processing pol 1 and 2, channel %d" % (channel)) if window is None: comp_array[channel, 1, :, :], comp_array[ channel, 2, :, :], residual_array[ channel, 1, :, :], residual_array[ channel, 2, :, :] = hogbom_complex( dirty.data[channel, 1, :, :], dirty.data[channel, 2, :, :], psf.data[channel, 1, :, :], psf.data[channel, 2, :, :], None, gain, thresh, niter, fracthresh) else: comp_array[channel, 1, :, :], comp_array[ channel, 2, :, :], residual_array[ channel, 1, :, :], residual_array[ channel, 2, :, :] = hogbom_complex( dirty.data[channel, 1, :, :], dirty.data[channel, 2, :, :], psf.data[channel, 1, :, :], psf.data[channel, 2, :, :], window[channel, pol, :, :], gain, thresh, niter, fracthresh) else: log.info( "deconvolve_cube_complex: Skipping pol 1 and 2, channel %d" % (channel)) if pol == 2: continue comp_image = create_image_from_array( comp_array, dirty.wcs, polarisation_frame=PolarisationFrame('stokesIQUV')) residual_image = create_image_from_array( residual_array, dirty.wcs, polarisation_frame=PolarisationFrame('stokesIQUV')) else: raise ValueError('deconvolve_cube %s: Unknown algorithm %s' % (prefix, algorithm)) return comp_image, residual_image
def invert_list_rsexecute_workflow(vis_list, template_model_imagelist, context, dopsf=False, normalize=True, facets=1, vis_slices=1, gcfcf=None, **kwargs): """ Sum results from invert, iterating over the scattered image and vis_list Note that this call can be converted to a set of rsexecute calls to the serial version, using argument use_serial_invert=True :param vis_list: list of vis (or graph) :param template_model_imagelist: list of template models (or graph) :param dopsf: Make the PSF instead of the dirty image :param facets: Number of facets :param normalize: Normalize by sumwt :param vis_slices: Number of slices :param context: Imaging context :param gcfcg: tuple containing grid correction and convolution function :param kwargs: Parameters for functions in components :return: List of (image, sumwt) tuples, one per vis in vis_list For example:: model_list = [rsexecute.execute(create_image_from_visibility) (v, npixel=npixel, cellsize=cellsize, polarisation_frame=pol_frame) for v in vis_list] model_list = rsexecute.persist(model_list) dirty_list = invert_list_rsexecute_workflow(vis_list, template_model_imagelist=model_list, context='wstack', vis_slices=51) dirty_sumwt_list = rsexecute.compute(dirty_list, sync=True) dirty, sumwt = dirty_sumwt_list[centre] """ # Use serial invert for each element of the visibility list. This means that e.g. iteration # through w-planes or timeslices is done sequentially thus not incurring the memory cost # of doing all at once. if get_parameter(kwargs, "use_serial_invert", False): from rascil.workflows.serial.imaging.imaging_serial import invert_list_serial_workflow return [rsexecute.execute(invert_list_serial_workflow, nout=1) \ (vis_list=[vis_list[i]], template_model_imagelist=[template_model_imagelist[i]], context=context, dopsf=dopsf, normalize=normalize, vis_slices=vis_slices, facets=facets, gcfcf=gcfcf, **kwargs)[0] for i, _ in enumerate(vis_list)] if not isinstance(template_model_imagelist, collections.Iterable): template_model_imagelist = [template_model_imagelist] c = imaging_context(context) vis_iter = c['vis_iterator'] invert = c['invert'] if facets % 2 == 0 or facets == 1: actual_number_facets = facets else: actual_number_facets = max(1, (facets - 1)) def gather_image_iteration_results(results, template_model): result = create_empty_image_like(template_model) i = 0 sumwt = numpy.zeros([template_model.nchan, template_model.npol]) for dpatch in image_scatter_facets(result, facets=facets): assert i < len(results), "Too few results in gather_image_iteration_results" if results[i] is not None: assert len(results[i]) == 2, results[i] dpatch.data[...] = results[i][0].data[...] sumwt += results[i][1] i += 1 return result, sumwt def invert_ignore_none(vis, model, gg): if vis is not None: return invert(vis, model, context=context, dopsf=dopsf, normalize=normalize, gcfcf=gg, **kwargs) else: return create_empty_image_like(model), numpy.zeros([model.nchan, model.npol]) # If we are doing facets, we need to create the gcf for each image if gcfcf is None and facets == 1: gcfcf = [rsexecute.execute(create_pswf_convolutionfunction)(template_model_imagelist[0])] # Loop over all vis_lists independently results_vislist = list() if facets == 1: for ivis, sub_vis_list in enumerate(vis_list): if len(gcfcf) > 1: g = gcfcf[ivis] else: g = gcfcf[0] # Create the graph to divide the visibility into slices. This is by copy. sub_sub_vis_lists = rsexecute.execute(visibility_scatter, nout=vis_slices)\ (sub_vis_list, vis_iter, vis_slices=vis_slices) # Iterate within each sub_sub_vis_list vis_results = list() for sub_sub_vis_list in sub_sub_vis_lists: vis_results.append(rsexecute.execute(invert_ignore_none, pure=True) (sub_sub_vis_list, template_model_imagelist[ivis], g)) results_vislist.append(sum_invert_results_rsexecute(vis_results)) result = results_vislist else: for ivis, sub_vis_list in enumerate(vis_list): # Create the graph to divide an image into facets. This is by reference. facet_lists = rsexecute.execute(image_scatter_facets, nout=actual_number_facets ** 2)( template_model_imagelist[ ivis], facets=facets) # Create the graph to divide the visibility into slices. This is by copy. sub_sub_vis_lists = rsexecute.execute(visibility_scatter, nout=vis_slices)\ (sub_vis_list, vis_iter, vis_slices=vis_slices) # Iterate within each vis_list vis_results = list() for sub_sub_vis_list in sub_sub_vis_lists: facet_vis_results = list() for facet_list in facet_lists: facet_vis_results.append( rsexecute.execute(invert_ignore_none, pure=True)(sub_sub_vis_list, facet_list, None)) vis_results.append(rsexecute.execute(gather_image_iteration_results, nout=1) (facet_vis_results, template_model_imagelist[ivis])) results_vislist.append(sum_invert_results_rsexecute(vis_results)) result = results_vislist return rsexecute.optimize(result)
def create_named_configuration(name: str = 'LOWBD2', **kwargs) -> Configuration: """ Create standard configurations e.g. LOWBD2, MIDBD2 Possible configurations are:: LOWBD1 LOWBD2 LOWBD2-core LOW == LOWR3 MID == MIDR5 ASKAP LOFAR VLAA VLAA_north :param name: name of Configuration LOWBD2, LOWBD1, LOFAR, VLAA, ASKAP :param rmax: Maximum distance of station from the average (m) :return: For LOWBD2, setting rmax gives the following number of stations 100.0 13 300.0 94 1000.0 251 3000.0 314 10000.0 398 30000.0 476 100000.0 512 """ if name == 'LOWBD2': location = EarthLocation(lon="116.76444824", lat="-26.824722084", height=300.0) log.info("create_named_configuration: %s\n\t%s\n\t%s" % (name, location.geocentric, location.geodetic)) fc = create_configuration_from_file(antfile=rascil_path("data/configurations/LOWBD2.csv"), location=location, mount='xy', names='LOWBD2_%d', diameter=35.0, name=name, **kwargs) elif name == 'LOWBD1': location = EarthLocation(lon="116.76444824", lat="-26.824722084", height=300.0) log.info("create_named_configuration: %s\n\t%s\n\t%s" % (name, location.geocentric, location.geodetic)) fc = create_configuration_from_file(antfile=rascil_path("data/configurations/LOWBD1.csv"), location=location, mount='xy', names='LOWBD1_%d', diameter=35.0, name=name, **kwargs) elif name == 'LOWBD2-CORE': location = EarthLocation(lon="116.76444824", lat="-26.824722084", height=300.0) log.info("create_named_configuration: %s\n\t%s\n\t%s" % (name, location.geocentric, location.geodetic)) fc = create_configuration_from_file(antfile=rascil_path("data/configurations/LOWBD2-CORE.csv"), location=location, mount='xy', names='LOWBD2_%d', diameter=35.0, name=name, **kwargs) elif (name == 'LOW') or (name == 'LOWR3'): location = EarthLocation(lon="116.76444824", lat="-26.824722084", height=300.0) log.info("create_named_configuration: %s\n\t%s\n\t%s" % (name, location.geocentric, location.geodetic)) fc = create_configuration_from_MIDfile(antfile=rascil_path("data/configurations/ska1low_local.cfg"), mount='xy', name=name, location=location, **kwargs) elif (name == 'MID') or (name == "MIDR5"): location = EarthLocation(lon="21.443803", lat="-30.712925", height=0.0) log.info("create_named_configuration: %s\n\t%s\n\t%s" % (name, location.geocentric, location.geodetic)) fc = create_configuration_from_MIDfile(antfile=rascil_path("data/configurations/ska1mid_local.cfg"), mount='azel', name=name, location=location, **kwargs) elif name == 'ASKAP': location = EarthLocation(lon="+116.6356824", lat="-26.7013006", height=377.0) log.info("create_named_configuration: %s\n\t%s\n\t%s" % (name, location.geocentric, location.geodetic)) fc = create_configuration_from_file(antfile=rascil_path("data/configurations/A27CR3P6B.in.csv"), mount='equatorial', names='ASKAP_%d', diameter=12.0, name=name, location=location, **kwargs) elif name == 'LOFAR': location = EarthLocation(x=[3826923.9] * u.m, y=[460915.1] * u.m, z=[5064643.2] * u.m) log.info("create_named_configuration: %s\n\t%s\n\t%s" % (name, location.geocentric, location.geodetic)) assert get_parameter(kwargs, "meta", False) is False fc = create_LOFAR_configuration(antfile=rascil_path("data/configurations/LOFAR.csv"), location=location) elif name == 'VLAA': location = EarthLocation(lon="-107.6184", lat="34.0784", height=2124.0) log.info("create_named_configuration: %s\n\t%s\n\t%s" % (name, location.geocentric, location.geodetic)) fc = create_configuration_from_file(antfile=rascil_path("data/configurations/VLA_A_hor_xyz.csv"), location=location, mount='azel', names='VLA_%d', diameter=25.0, name=name, **kwargs) elif name == 'VLAA_north': location = EarthLocation(lon="-107.6184", lat="90.000", height=0.0) log.info("create_named_configuration: %s\n\t%s\n\t%s" % (name, location.geocentric, location.geodetic)) fc = create_configuration_from_file(antfile=rascil_path("data/configurations/VLA_A_hor_xyz.csv"), location=location, mount='azel', names='VLA_%d', diameter=25.0, name=name, **kwargs) else: raise ValueError("No such Configuration %s" % name) return fc
def ical_list_serial_workflow(vis_list, model_imagelist, context, vis_slices=1, facets=1, gcfcf=None, calibration_context='TG', do_selfcal=True, **kwargs): """Run ICAL pipeline :param vis_list: :param model_imagelist: :param context: imaging context e.g. '2d' :param calibration_context: Sequence of calibration steps e.g. TGB :param do_selfcal: Do the selfcalibration? :param kwargs: Parameters for functions in components :return: """ gt_list = list() if gcfcf is None: gcfcf = [create_pswf_convolutionfunction(model_imagelist[0])] psf_imagelist = invert_list_serial_workflow(vis_list, model_imagelist, dopsf=True, context=context, vis_slices=vis_slices, facets=facets, gcfcf=gcfcf, **kwargs) model_vislist = [copy_visibility(v, zero=True) for v in vis_list] if do_selfcal: cal_vis_list = [copy_visibility(v) for v in vis_list] else: cal_vis_list = vis_list if do_selfcal: # Make the predicted visibilities, selfcalibrate against it correcting the gains, then # form the residual visibility, then make the residual image model_vislist = predict_list_serial_workflow(model_vislist, model_imagelist, context=context, vis_slices=vis_slices, facets=facets, gcfcf=gcfcf, **kwargs) cal_vis_list, gt_list = calibrate_list_serial_workflow( cal_vis_list, model_vislist, calibration_context=calibration_context, **kwargs) residual_vislist = subtract_list_serial_workflow( cal_vis_list, model_vislist) residual_imagelist = invert_list_serial_workflow(residual_vislist, model_imagelist, context=context, dopsf=False, vis_slices=vis_slices, facets=facets, gcfcf=gcfcf, iteration=0, **kwargs) else: # If we are not selfcalibrating it's much easier and we can avoid an unnecessary round of gather/scatter # for visibility partitioning such as timeslices and wstack. residual_imagelist = residual_list_serial_workflow( cal_vis_list, model_imagelist, context=context, vis_slices=vis_slices, facets=facets, gcfcf=gcfcf, **kwargs) deconvolve_model_imagelist = deconvolve_list_serial_workflow( residual_imagelist, psf_imagelist, model_imagelist, prefix='cycle 0', **kwargs) nmajor = get_parameter(kwargs, "nmajor", 5) if nmajor > 1: for cycle in range(nmajor): if do_selfcal: model_vislist = predict_list_serial_workflow( model_vislist, deconvolve_model_imagelist, context=context, vis_slices=vis_slices, facets=facets, gcfcf=gcfcf, **kwargs) cal_vis_list = [copy_visibility(v) for v in vis_list] cal_vis_list, gt_list = calibrate_list_serial_workflow( cal_vis_list, model_vislist, calibration_context=calibration_context, iteration=cycle, **kwargs) residual_vislist = subtract_list_serial_workflow( cal_vis_list, model_vislist) residual_imagelist = invert_list_serial_workflow( residual_vislist, model_imagelist, context=context, vis_slices=vis_slices, facets=facets, gcfcf=gcfcf, **kwargs) else: residual_imagelist = residual_list_serial_workflow( cal_vis_list, deconvolve_model_imagelist, context=context, vis_slices=vis_slices, facets=facets, gcfcf=gcfcf, **kwargs) prefix = "cycle %d" % (cycle + 1) deconvolve_model_imagelist = deconvolve_list_serial_workflow( residual_imagelist, psf_imagelist, deconvolve_model_imagelist, prefix=prefix, **kwargs) residual_imagelist = residual_list_serial_workflow( cal_vis_list, deconvolve_model_imagelist, context=context, vis_slices=vis_slices, facets=facets, gcfcf=gcfcf, **kwargs) restore_imagelist = restore_list_serial_workflow( deconvolve_model_imagelist, psf_imagelist, residual_imagelist) return deconvolve_model_imagelist, residual_imagelist, restore_imagelist, gt_list
def invert_ng(bvis: BlockVisibility, model: Image, dopsf: bool = False, normalize: bool = True, **kwargs) -> (Image, numpy.ndarray): """ Invert using nifty-gridder module https://gitlab.mpcdf.mpg.de/ift/nifty_gridder Use the image im as a template. Do PSF in a separate call. In the imaging and pipeline workflows, this may be invoked using context='ng'. :param dopsf: Make the PSF instead of the dirty image :param bvis: BlockVisibility to be inverted :param im: image template (not changed) :param normalize: Normalize by the sum of weights (True) :return: (resulting image, sum of the weights for each frequency and polarization) """ assert image_is_canonical(model) assert isinstance(bvis, BlockVisibility), bvis im = copy_image(model) nthreads = get_parameter(kwargs, "threads", 4) epsilon = get_parameter(kwargs, "epsilon", 1e-12) do_wstacking = get_parameter(kwargs, "do_wstacking", True) verbosity = get_parameter(kwargs, "verbosity", 0) sbvis = copy_visibility(bvis) sbvis = shift_vis_to_image(sbvis, im, tangent=True, inverse=False) freq = sbvis.frequency # frequency, Hz nrows, nants, _, vnchan, vnpol = sbvis.vis.shape # if dopsf: # sbvis = fill_vis_for_psf(sbvis) ms = sbvis.vis.reshape([nrows * nants * nants, vnchan, vnpol]) ms = convert_pol_frame(ms, bvis.polarisation_frame, im.polarisation_frame, polaxis=2) uvw = sbvis.uvw.reshape([nrows * nants * nants, 3]) wgt = sbvis.flagged_imaging_weight.reshape( [nrows * nants * nants, vnchan, vnpol]) if epsilon > 5.0e-6: ms = ms.astype("c8") wgt = wgt.astype("f4") # Find out the image size/resolution npixdirty = im.nwidth pixsize = numpy.abs(numpy.radians(im.wcs.wcs.cdelt[0])) fuvw = uvw.copy() # We need to flip the u and w axes. fuvw[:, 0] *= -1.0 fuvw[:, 2] *= -1.0 nchan, npol, ny, nx = im.shape im.data[...] = 0.0 sumwt = numpy.zeros([nchan, npol]) # There's a latent problem here with the weights. # wgt = numpy.real(convert_pol_frame(wgt, bvis.polarisation_frame, im.polarisation_frame, polaxis=2)) # Set up the conversion from visibility channels to image channels vis_to_im = numpy.round(model.wcs.sub([4]).wcs_world2pix( freq, 0)[0]).astype('int') # Nifty gridder likes to receive contiguous arrays so we transpose # at the beginning mfs = nchan == 1 if dopsf: mst = ms.T mst[...] = 0.0 mst[0, ...] = 1.0 wgtt = wgt.T if mfs: dirty = ng.ms2dirty(fuvw.astype(numpy.float64), bvis.frequency.astype(numpy.float64), numpy.ascontiguousarray(mst[0, :, :].T), numpy.ascontiguousarray(wgtt[0, :, :].T), npixdirty, npixdirty, pixsize, pixsize, epsilon, do_wstacking=do_wstacking, nthreads=nthreads, verbosity=verbosity) sumwt[0, :] += numpy.sum(wgtt[0, 0, :].T, axis=0) im.data[0, :] += dirty.T else: for vchan in range(vnchan): ichan = vis_to_im[vchan] frequency = numpy.array(freq[vchan:vchan + 1]).astype( numpy.float64) dirty = ng.ms2dirty( fuvw.astype(numpy.float64), frequency.astype(numpy.float64), numpy.ascontiguousarray(mst[0, vchan, :][..., numpy.newaxis]), numpy.ascontiguousarray(wgtt[0, vchan, :][..., numpy.newaxis]), npixdirty, npixdirty, pixsize, pixsize, epsilon, do_wstacking=do_wstacking, nthreads=nthreads, verbosity=verbosity) sumwt[ichan, :] += numpy.sum(wgtt[0, ichan, :].T, axis=0) im.data[ichan, :] += dirty.T else: mst = ms.T wgtt = wgt.T for pol in range(npol): if mfs: dirty = ng.ms2dirty( fuvw.astype(numpy.float64), bvis.frequency.astype(numpy.float64), numpy.ascontiguousarray(mst[pol, :, :].T), numpy.ascontiguousarray(wgtt[pol, :, :].T), npixdirty, npixdirty, pixsize, pixsize, epsilon, do_wstacking=do_wstacking, nthreads=nthreads, verbosity=verbosity) sumwt[0, pol] += numpy.sum(wgtt[pol, 0, :].T, axis=0) im.data[0, pol] += dirty.T else: for vchan in range(vnchan): ichan = vis_to_im[vchan] frequency = numpy.array(freq[vchan:vchan + 1]).astype( numpy.float64) dirty = ng.ms2dirty(fuvw.astype(numpy.float64), frequency.astype(numpy.float64), numpy.ascontiguousarray( mst[pol, vchan, :][..., numpy.newaxis]), numpy.ascontiguousarray( wgtt[pol, vchan, :][..., numpy.newaxis]), npixdirty, npixdirty, pixsize, pixsize, epsilon, do_wstacking=do_wstacking, nthreads=nthreads, verbosity=verbosity) sumwt[ichan, pol] += numpy.sum(wgtt[pol, ichan, :].T, axis=0) im.data[ichan, pol] += dirty.T if normalize: im = normalize_sumwt(im, sumwt) return im, sumwt
def create_image_from_visibility(vis: Union[BlockVisibility, Visibility], **kwargs) -> Image: """Make an empty image from params and Visibility This makes an empty, template image consistent with the visibility, allowing optional overriding of select parameters. This is a convenience function and does not transform the visibilities. :param vis: :param phasecentre: Phasecentre (Skycoord) :param channel_bandwidth: Channel width (Hz) :param cellsize: Cellsize (radians) :param npixel: Number of pixels on each axis (512) :param frame: Coordinate frame for WCS (ICRS) :param equinox: Equinox for WCS (2000.0) :param nchan: Number of image channels (Default is 1 -> MFS) :return: image See also :py:func:`rascil.processing_components.image.operations.create_image` """ assert isinstance(vis, Visibility) or isinstance(vis, BlockVisibility), \ "vis is not a Visibility or a BlockVisibility: %r" % (vis) log.debug( "create_image_from_visibility: Parsing parameters to get definition of WCS" ) imagecentre = get_parameter(kwargs, "imagecentre", vis.phasecentre) phasecentre = get_parameter(kwargs, "phasecentre", vis.phasecentre) # Spectral processing options ufrequency = numpy.unique(vis.frequency) vnchan = len(ufrequency) frequency = get_parameter(kwargs, "frequency", vis.frequency) inchan = get_parameter(kwargs, "nchan", vnchan) reffrequency = frequency[0] * units.Hz channel_bandwidth = get_parameter( kwargs, "channel_bandwidth", 0.99999999999 * vis.channel_bandwidth[0]) * units.Hz if (inchan == vnchan) and vnchan > 1: log.debug( "create_image_from_visibility: Defining %d channel Image at %s, starting frequency %s, and bandwidth %s" % (inchan, imagecentre, reffrequency, channel_bandwidth)) elif (inchan == 1) and vnchan > 1: assert numpy.abs(channel_bandwidth.value ) > 0.0, "Channel width must be non-zero for mfs mode" log.debug( "create_image_from_visibility: Defining single channel MFS Image at %s, starting frequency %s, " "and bandwidth %s" % (imagecentre, reffrequency, channel_bandwidth)) elif inchan > 1 and vnchan > 1: assert numpy.abs(channel_bandwidth.value ) > 0.0, "Channel width must be non-zero for mfs mode" log.debug( "create_image_from_visibility: Defining multi-channel MFS Image at %s, starting frequency %s, " "and bandwidth %s" % (imagecentre, reffrequency, channel_bandwidth)) elif (inchan == 1) and (vnchan == 1): assert numpy.abs(channel_bandwidth.value ) > 0.0, "Channel width must be non-zero for mfs mode" log.debug( "create_image_from_visibility: Defining single channel Image at %s, starting frequency %s, " "and bandwidth %s" % (imagecentre, reffrequency, channel_bandwidth)) else: raise ValueError( "create_image_from_visibility: unknown spectral mode ") # Image sampling options npixel = get_parameter(kwargs, "npixel", 512) uvmax = numpy.max((numpy.abs(vis.data['uvw'][..., 0:1]))) if isinstance(vis, BlockVisibility): uvmax *= numpy.max(frequency) / constants.c.to('m s^-1').value log.debug("create_image_from_visibility: uvmax = %f wavelengths" % uvmax) criticalcellsize = 1.0 / (uvmax * 2.0) log.debug( "create_image_from_visibility: Critical cellsize = %f radians, %f degrees" % (criticalcellsize, criticalcellsize * 180.0 / numpy.pi)) cellsize = get_parameter(kwargs, "cellsize", 0.5 * criticalcellsize) log.debug( "create_image_from_visibility: Cellsize = %g radians, %g degrees" % (cellsize, cellsize * 180.0 / numpy.pi)) override_cellsize = get_parameter(kwargs, "override_cellsize", True) if (override_cellsize and cellsize > criticalcellsize) or (cellsize == 0.0): log.debug( "create_image_from_visibility: Resetting cellsize %g radians to criticalcellsize %g radians" % (cellsize, criticalcellsize)) cellsize = criticalcellsize pol_frame = get_parameter(kwargs, "polarisation_frame", PolarisationFrame("stokesI")) inpol = pol_frame.npol # Now we can define the WCS, which is a convenient place to hold the info above # Beware of python indexing order! wcs and the array have opposite ordering shape = [inchan, inpol, npixel, npixel] log.debug("create_image_from_visibility: image shape is %s" % str(shape)) w = wcs.WCS(naxis=4) # The negation in the longitude is needed by definition of RA, DEC w.wcs.cdelt = [ -cellsize * 180.0 / numpy.pi, cellsize * 180.0 / numpy.pi, 1.0, channel_bandwidth.to(units.Hz).value ] # The numpy definition of the phase centre of an FFT is n // 2 (0 - rel) so that's what we use for # the reference pixel. We have to use 0 rel everywhere. w.wcs.crpix = [npixel // 2 + 1, npixel // 2 + 1, 1.0, 1.0] w.wcs.ctype = ["RA---SIN", "DEC--SIN", 'STOKES', 'FREQ'] w.wcs.crval = [ phasecentre.ra.deg, phasecentre.dec.deg, 1.0, reffrequency.to(units.Hz).value ] w.naxis = 4 w.wcs.radesys = get_parameter(kwargs, 'frame', 'ICRS') w.wcs.equinox = get_parameter(kwargs, 'equinox', 2000.0) return create_image_from_array(numpy.zeros(shape), wcs=w, polarisation_frame=pol_frame)
def predict_ng(bvis: BlockVisibility, model: Image, **kwargs) -> BlockVisibility: """ Predict using convolutional degridding. Nifty-gridder version. https://gitlab.mpcdf.mpg.de/ift/nifty_gridder In the imaging and pipeline workflows, this may be invoked using context='ng'. :param bvis: BlockVisibility to be predicted :param model: model image :return: resulting BlockVisibility (in place works) """ assert isinstance(bvis, BlockVisibility), bvis assert image_is_canonical(model) if model is None: return bvis nthreads = get_parameter(kwargs, "threads", 4) epsilon = get_parameter(kwargs, "epsilon", 1e-12) do_wstacking = get_parameter(kwargs, "do_wstacking", True) verbosity = get_parameter(kwargs, "verbosity", 0) newbvis = copy_visibility(bvis, zero=True) # Extracting data from BlockVisibility freq = bvis.frequency # frequency, Hz nrows, nants, _, vnchan, vnpol = bvis.vis.shape uvw = newbvis.data['uvw'].reshape([nrows * nants * nants, 3]) vist = numpy.zeros([vnpol, vnchan, nants * nants * nrows], dtype='complex') # Get the image properties m_nchan, m_npol, ny, nx = model.data.shape # Check if the number of frequency channels matches in bvis and a model # assert (m_nchan == v_nchan) assert (m_npol == vnpol) fuvw = uvw.copy() # We need to flip the u and w axes. The flip in w is equivalent to the conjugation of the # convolution function grid_visibility to griddata fuvw[:, 0] *= -1.0 fuvw[:, 2] *= -1.0 # Find out the image size/resolution pixsize = numpy.abs(numpy.radians(model.wcs.wcs.cdelt[0])) # Make de-gridding over a frequency range and pol fields vis_to_im = numpy.round(model.wcs.sub([4]).wcs_world2pix( freq, 0)[0]).astype('int') mfs = m_nchan == 1 if mfs: for vpol in range(vnpol): vist[vpol, :, :] = ng.dirty2ms( fuvw.astype(numpy.float64), bvis.frequency.astype(numpy.float64), model.data[0, vpol, :, :].T.astype(numpy.float64), pixsize_x=pixsize, pixsize_y=pixsize, epsilon=epsilon, do_wstacking=do_wstacking, nthreads=nthreads, verbosity=verbosity).T else: for vpol in range(vnpol): for vchan in range(vnchan): imchan = vis_to_im[vchan] vist[vpol, vchan, :] = ng.dirty2ms( fuvw.astype(numpy.float64), numpy.array(freq[vchan:vchan + 1]).astype( numpy.float64), model.data[imchan, vpol, :, :].T.astype(numpy.float64), pixsize_x=pixsize, pixsize_y=pixsize, epsilon=epsilon, do_wstacking=do_wstacking, nthreads=nthreads, verbosity=verbosity)[:, 0] vis = convert_pol_frame(vist.T, model.polarisation_frame, bvis.polarisation_frame, polaxis=2) newbvis.data['vis'] = vis.reshape([nrows, nants, nants, vnchan, vnpol]) # Now we can shift the visibility from the image frame to the original visibility frame return shift_vis_to_image(newbvis, model, tangent=True, inverse=True)
def invert_ng(bvis: BlockVisibility, model: Image, dopsf: bool = False, normalize: bool = True, **kwargs) -> (Image, numpy.ndarray): """ Invert using nifty-gridder module https://gitlab.mpcdf.mpg.de/ift/nifty_gridder Use the image im as a template. Do PSF in a separate call. This is at the bottom of the layering i.e. all transforms are eventually expressed in terms of this function. . Any shifting needed is performed here. :param bvis: BlockVisibility to be inverted :param im: image template (not changed) :param normalize: Normalize by the sum of weights (True) :return: (resulting image, sum of the weights for each frequency and polarization) """ assert image_is_canonical(model) assert isinstance(bvis, BlockVisibility), bvis im = copy_image(model) nthreads = get_parameter(kwargs, "threads", 4) epsilon = get_parameter(kwargs, "epsilon", 1e-12) do_wstacking = get_parameter(kwargs, "do_wstacking", True) verbosity = get_parameter(kwargs, "verbosity", 0) sbvis = copy_visibility(bvis) sbvis = shift_vis_to_image(sbvis, im, tangent=True, inverse=False) vis = bvis.vis freq = sbvis.frequency # frequency, Hz nrows, nants, _, vnchan, vnpol = vis.shape uvw = sbvis.uvw.reshape([nrows * nants * nants, 3]) ms = vis.reshape([nrows * nants * nants, vnchan, vnpol]) wgt = sbvis.imaging_weight.reshape( [nrows * nants * nants, vnchan, vnpol]) if dopsf: ms[...] = 1.0 + 0.0j if epsilon > 5.0e-6: ms = ms.astype("c8") wgt = wgt.astype("f4") # Find out the image size/resolution npixdirty = im.nwidth pixsize = numpy.abs(numpy.radians(im.wcs.wcs.cdelt[0])) fuvw = uvw.copy() # We need to flip the u and w axes. fuvw[:, 0] *= -1.0 fuvw[:, 2] *= -1.0 nchan, npol, ny, nx = im.shape im.data[...] = 0.0 sumwt = numpy.zeros([nchan, npol]) ms = convert_pol_frame(ms, bvis.polarisation_frame, im.polarisation_frame, polaxis=2) # There's a latent problem here with the weights. # wgt = numpy.real(convert_pol_frame(wgt, bvis.polarisation_frame, im.polarisation_frame, polaxis=2)) # Set up the conversion from visibility channels to image channels vis_to_im = numpy.round(model.wcs.sub([4]).wcs_world2pix( freq, 0)[0]).astype('int') for vchan in range(vnchan): ichan = vis_to_im[vchan] for pol in range(npol): # Nifty gridder likes to receive contiguous arrays ms_1d = numpy.array([ ms[row, vchan:vchan + 1, pol] for row in range(nrows * nants * nants) ], dtype='complex') ms_1d.reshape([ms_1d.shape[0], 1]) wgt_1d = numpy.array([ wgt[row, vchan:vchan + 1, pol] for row in range(nrows * nants * nants) ]) wgt_1d.reshape([wgt_1d.shape[0], 1]) dirty = ng.ms2dirty(fuvw, freq[vchan:vchan + 1], ms_1d, wgt_1d, npixdirty, npixdirty, pixsize, pixsize, epsilon, do_wstacking=do_wstacking, nthreads=nthreads, verbosity=verbosity) sumwt[ichan, pol] += numpy.sum(wgt[:, vchan, pol]) im.data[ichan, pol] += dirty.T if normalize: im = normalize_sumwt(im, sumwt) return im, sumwt
def deconvolve_list_rsexecute_workflow(dirty_list, psf_list, model_imagelist, prefix='', mask=None, **kwargs): """Create a graph for deconvolution, adding to the model Note that this call can be converted to a set of rsexecute calls to the serial version, using argument use_serial_clean=True :param dirty_list: list of dirty images (or graph) :param psf_list: list of psfs (or graph) :param model_imagelist: list of models (or graph) :param prefix: Informative prefix to log messages :param mask: Mask for deconvolution :param kwargs: Parameters for functions :return: graph for the deconvolution For example:: dirty_imagelist = invert_list_rsexecute_workflow(vis_list, model_imagelist, context='2d', dopsf=False, normalize=True) psf_imagelist = invert_list_rsexecute_workflow(vis_list, model_imagelist, context='2d', dopsf=True, normalize=True) dirty_imagelist = rsexecute.persist(dirty_imagelist) psf_imagelist = rsexecute.persist(psf_imagelist) dec_imagelist = deconvolve_list_rsexecute_workflow(dirty_imagelist, psf_imagelist, model_imagelist, niter=1000, fractional_threshold=0.01, scales=[0, 3, 10], algorithm='mmclean', nmoment=3, nchan=freqwin, threshold=0.1, gain=0.7) dec_imagelist = rsexecute.persist(dec_imagelist) """ nchan = len(dirty_list) # Number of moments. 1 is the sum. nmoment = get_parameter(kwargs, "nmoment", 1) if get_parameter(kwargs, "use_serial_clean", False): from rascil.workflows.serial.imaging.imaging_serial import deconvolve_list_serial_workflow return rsexecute.execute(deconvolve_list_serial_workflow, nout=nchan) \ (dirty_list, psf_list, model_imagelist, prefix=prefix, mask=mask, **kwargs) def deconvolve(dirty, psf, model, facet, gthreshold, msk=None): if prefix == '': lprefix = "facet %d" % facet else: lprefix = "%s, facet %d" % (prefix, facet) if nmoment > 0: moment0 = calculate_image_frequency_moments(dirty) this_peak = numpy.max(numpy.abs(moment0.data[0, ...])) / dirty.data.shape[0] else: ref_chan = dirty.data.shape[0] // 2 this_peak = numpy.max(numpy.abs(dirty.data[ref_chan, ...])) if this_peak > 1.1 * gthreshold: kwargs['threshold'] = gthreshold result, _ = deconvolve_cube(dirty, psf, prefix=lprefix, mask=msk, **kwargs) if result.data.shape[0] == model.data.shape[0]: result.data += model.data return result else: return copy_image(model) deconvolve_facets = get_parameter(kwargs, 'deconvolve_facets', 1) deconvolve_overlap = get_parameter(kwargs, 'deconvolve_overlap', 0) deconvolve_taper = get_parameter(kwargs, 'deconvolve_taper', None) if deconvolve_facets > 1 and deconvolve_overlap > 0: deconvolve_number_facets = (deconvolve_facets - 2) ** 2 else: deconvolve_number_facets = deconvolve_facets ** 2 scattered_channels_facets_model_list = \ [rsexecute.execute(image_scatter_facets, nout=deconvolve_number_facets)(m, facets=deconvolve_facets, overlap=deconvolve_overlap, taper=deconvolve_taper) for m in model_imagelist] scattered_facets_model_list = [ rsexecute.execute(image_gather_channels, nout=1)([scattered_channels_facets_model_list[chan][facet] for chan in range(nchan)]) for facet in range(deconvolve_number_facets)] # Scatter the separate channel images into deconvolve facets and then gather channels for each facet. # This avoids constructing the entire spectral cube. # i.e. SCATTER BY FACET then SCATTER BY CHANNEL dirty_list_trimmed = rsexecute.execute(remove_sumwt, nout=nchan)(dirty_list) scattered_channels_facets_dirty_list = \ [rsexecute.execute(image_scatter_facets, nout=deconvolve_number_facets)(d, facets=deconvolve_facets, overlap=deconvolve_overlap, taper=deconvolve_taper) for d in dirty_list_trimmed] scattered_facets_dirty_list = [ rsexecute.execute(image_gather_channels, nout=1)([scattered_channels_facets_dirty_list[chan][facet] for chan in range(nchan)]) for facet in range(deconvolve_number_facets)] psf_list_trimmed = rsexecute.execute(remove_sumwt, nout=nchan)(psf_list) def extract_psf(psf, facets): spsf = create_empty_image_like(psf) cx = spsf.shape[3] // 2 cy = spsf.shape[2] // 2 wx = spsf.shape[3] // facets wy = spsf.shape[2] // facets xbeg = cx - wx // 2 xend = cx + wx // 2 ybeg = cy - wy // 2 yend = cy + wy // 2 spsf.data = psf.data[..., ybeg:yend, xbeg:xend] spsf.wcs.wcs.crpix[0] -= xbeg spsf.wcs.wcs.crpix[1] -= ybeg return spsf psf_list_trimmed = [rsexecute.execute(extract_psf)(p, deconvolve_facets) for p in psf_list_trimmed] psf_centre = rsexecute.execute(image_gather_channels, nout=1)([psf_list_trimmed[chan] for chan in range(nchan)]) # Work out the threshold. Need to find global peak over all dirty_list images threshold = get_parameter(kwargs, "threshold", 0.0) fractional_threshold = get_parameter(kwargs, "fractional_threshold", 0.1) nmoment = get_parameter(kwargs, "nmoment", 1) use_moment0 = nmoment > 0 # Find the global threshold. This uses the peak in the average on the frequency axis since we # want to use it in a stopping criterion in a moment clean global_threshold = rsexecute.execute(threshold_list, nout=1)(scattered_facets_dirty_list, threshold, fractional_threshold, use_moment0=use_moment0, prefix=prefix) facet_list = numpy.arange(deconvolve_number_facets).astype('int') if mask is None: scattered_results_list = [ rsexecute.execute(deconvolve, nout=1)(d, psf_centre, m, facet, global_threshold) for d, m, facet in zip(scattered_facets_dirty_list, scattered_facets_model_list, facet_list)] else: mask_list = \ rsexecute.execute(image_scatter_facets, nout=deconvolve_number_facets)(mask, facets=deconvolve_facets, overlap=deconvolve_overlap) scattered_results_list = [ rsexecute.execute(deconvolve, nout=1)(d, psf_centre, m, facet, global_threshold, msk) for d, m, facet, msk in zip(scattered_facets_dirty_list, scattered_facets_model_list, facet_list, mask_list)] # We want to avoid constructing the entire cube so we do the inverse of how we got here: # i.e. SCATTER BY CHANNEL then GATHER BY FACET # Gather the results back into one image, correcting for overlaps as necessary. The taper function is is used to # feather the facets together # gathered_results_list = rsexecute.execute(image_gather_facets, nout=1)(scattered_results_list, # deconvolve_model_imagelist, # facets=deconvolve_facets, # overlap=deconvolve_overlap, # taper=deconvolve_taper) # result_list = rsexecute.execute(image_scatter_channels, nout=nchan)(gathered_results_list, subimages=nchan) scattered_channel_results_list = [rsexecute.execute(image_scatter_channels, nout=nchan)(scat, subimages=nchan) for scat in scattered_results_list] # The structure is now [[channels] for facets]. We do the reverse transpose to the one above. result_list = [rsexecute.execute(image_gather_facets, nout=1)([scattered_channel_results_list[facet][chan] for facet in range(deconvolve_number_facets)], model_imagelist[chan], facets=deconvolve_facets, overlap=deconvolve_overlap) for chan in range(nchan)] return rsexecute.optimize(result_list)
def predict_list_rsexecute_workflow(vis_list, model_imagelist, context, vis_slices=1, facets=1, gcfcf=None, **kwargs): """Predict, iterating over both the scattered vis_list and image The visibility and image are scattered, the visibility is predicted on each part, and then the parts are assembled. Note that this call can be converted to a set of rsexecute calls to the serial version, using argument use_serial_predict=True :param vis_list: list of vis (or graph) :param model_imagelist: list of models (or graph) :param vis_slices: Number of vis slices (w stack or timeslice) :param facets: Number of facets (per axis) :param context: Type of processing e.g. 2d, wstack, timeslice or facets :param gcfcg: tuple containing grid correction and convolution function :param kwargs: Parameters for functions in components :return: List of vis_lists For example:: dprepb_model = [rsexecute.execute(create_low_test_image_from_gleam) (npixel=npixel, frequency=[frequency[f]], channel_bandwidth=[channel_bandwidth[f]], cellsize=cellsize, phasecentre=phasecentre, polarisation_frame=PolarisationFrame("stokesI"), flux_limit=3.0, applybeam=True) for f, freq in enumerate(frequency)] dprepb_model_list = rsexecute.persist(dprepb_model_list) predicted_vis_list = predict_list_rsexecute_workflow(vis_list, model_imagelist=dprepb_model_list, context='wstack', vis_slices=51) predicted_vis_list = rsexecute.compute(predicted_vis_list , sync=True) """ if get_parameter(kwargs, "use_serial_predict", False): from rascil.workflows.serial.imaging.imaging_serial import predict_list_serial_workflow return [rsexecute.execute(predict_list_serial_workflow, nout=1) \ (vis_list=[vis_list[i]], model_imagelist=[model_imagelist[i]], vis_slices=vis_slices, facets=facets, context=context, gcfcf=gcfcf, **kwargs)[0] for i, _ in enumerate(vis_list)] # Predict_2d does not clear the vis so we have to do it here. vis_list = zero_list_rsexecute_workflow(vis_list) c = imaging_context(context) vis_iter = c['vis_iterator'] predict = c['predict'] if facets % 2 == 0 or facets == 1: actual_number_facets = facets else: actual_number_facets = facets - 1 def predict_ignore_none(vis, model, g): if vis is not None: assert isinstance(vis, Visibility) or isinstance(vis, BlockVisibility), vis assert isinstance(model, Image), model return predict(vis, model, context=context, gcfcf=g, **kwargs) else: return None if gcfcf is None: gcfcf = [rsexecute.execute(create_pswf_convolutionfunction)(m) for m in model_imagelist] # Loop over all frequency windows if facets == 1: image_results_list = list() for ivis, subvis in enumerate(vis_list): if len(gcfcf) > 1: g = gcfcf[ivis] else: g = gcfcf[0] # Create the graph to divide the visibility into slices. This is by copy. sub_vis_lists = rsexecute.execute(visibility_scatter, nout=vis_slices)(subvis, vis_iter, vis_slices) image_vis_lists = list() # Loop over sub visibility for sub_vis_list in sub_vis_lists: # Predict visibility for this sub-visibility from this image image_vis_list = rsexecute.execute(predict_ignore_none, pure=True, nout=1) \ (sub_vis_list, model_imagelist[ivis], g) # Sum all sub-visibilities image_vis_lists.append(image_vis_list) image_results_list.append(rsexecute.execute(visibility_gather, nout=1) (image_vis_lists, subvis, vis_iter)) result = image_results_list else: image_results_list_list = list() for ivis, subvis in enumerate(vis_list): # Create the graph to divide an image into facets. This is by reference. facet_lists = rsexecute.execute(image_scatter_facets, nout=actual_number_facets ** 2)( model_imagelist[ivis], facets=facets) # Create the graph to divide the visibility into slices. This is by copy. sub_vis_lists = rsexecute.execute(visibility_scatter, nout=vis_slices)\ (subvis, vis_iter, vis_slices) facet_vis_lists = list() # Loop over sub visibility for sub_vis_list in sub_vis_lists: facet_vis_results = list() # Loop over facets for facet_list in facet_lists: # Predict visibility for this subvisibility from this facet facet_vis_list = rsexecute.execute(predict_ignore_none, pure=True, nout=1)\ (sub_vis_list, facet_list, None) facet_vis_results.append(facet_vis_list) # Sum the current sub-visibility over all facets facet_vis_lists.append(rsexecute.execute(sum_predict_results)(facet_vis_results)) # Sum all sub-visibilities image_results_list_list.append( rsexecute.execute(visibility_gather, nout=1)(facet_vis_lists, subvis, vis_iter)) result = image_results_list_list return rsexecute.optimize(result)
def deconvolve_list_serial_workflow(dirty_list, psf_list, model_imagelist, prefix='', mask=None, **kwargs): """Create a graph for deconvolution, adding to the model :param dirty_list: list of dirty images :param psf_list: list of psfs :param model_imagelist: list of models :param prefix: Informative prefix to log messages :param mask: Mask for deconvolution :param kwargs: Parameters for functions :return: List of deconvolved images For example:: dirty_imagelist = invert_list_serial_workflow(vis_list, model_imagelist, context='2d', dopsf=False, normalize=True) psf_imagelist = invert_list_serial_workflow(vis_list, model_imagelist, context='2d', dopsf=True, normalize=True) dec_imagelist = deconvolve_list_serial_workflow(dirty_imagelist, psf_imagelist, model_imagelist, niter=1000, fractional_threshold=0.01, scales=[0, 3, 10], algorithm='mmclean', nmoment=3, nchan=freqwin, threshold=0.1, gain=0.7) """ nchan = len(dirty_list) nmoment = get_parameter(kwargs, "nmoment", 0) assert isinstance(dirty_list, list), dirty_list assert isinstance(psf_list, list), psf_list assert isinstance(model_imagelist, list), model_imagelist def deconvolve(dirty, psf, model, facet, gthreshold, msk=None): if prefix == '': lprefix = "subimage %d" % facet else: lprefix = "%s, subimage %d" % (prefix, facet) if nmoment > 0: moment0 = calculate_image_frequency_moments(dirty) this_peak = numpy.max(numpy.abs( moment0.data[0, ...])) / dirty.data.shape[0] else: ref_chan = dirty.data.shape[0] // 2 this_peak = numpy.max(numpy.abs(dirty.data[ref_chan, ...])) if this_peak > 1.1 * gthreshold: kwargs['threshold'] = gthreshold result, _ = deconvolve_cube(dirty, psf, prefix=lprefix, mask=msk, **kwargs) if result.data.shape[0] == model.data.shape[0]: result.data += model.data return result else: return copy_image(model) deconvolve_facets = get_parameter(kwargs, 'deconvolve_facets', 1) deconvolve_overlap = get_parameter(kwargs, 'deconvolve_overlap', 0) deconvolve_taper = get_parameter(kwargs, 'deconvolve_taper', None) if deconvolve_facets > 1 and deconvolve_overlap > 0: deconvolve_number_facets = (deconvolve_facets - 2)**2 else: deconvolve_number_facets = deconvolve_facets**2 model_imagelist = image_gather_channels(model_imagelist) # Scatter the separate channel images into deconvolve facets and then gather channels for each facet. # This avoids constructing the entire spectral cube. dirty_list_trimmed = remove_sumwt(dirty_list) scattered_channels_facets_dirty_list = \ [image_scatter_facets(d, facets=deconvolve_facets, overlap=deconvolve_overlap, taper=deconvolve_taper) for d in dirty_list_trimmed] # Now we do a transpose and gather scattered_facets_list = [ image_gather_channels([ scattered_channels_facets_dirty_list[chan][facet] for chan in range(nchan) ]) for facet in range(deconvolve_number_facets) ] psf_list_trimmed = remove_sumwt(psf_list) psf_list_trimmed = image_gather_channels(psf_list_trimmed) scattered_model_imagelist = \ image_scatter_facets(model_imagelist, facets=deconvolve_facets, overlap=deconvolve_overlap) # Work out the threshold. Need to find global peak over all dirty_list images threshold = get_parameter(kwargs, "threshold", 0.0) fractional_threshold = get_parameter(kwargs, "fractional_threshold", 0.1) nmoment = get_parameter(kwargs, "nmoment", 0) use_moment0 = nmoment > 0 # Find the global threshold. This uses the peak in the average on the frequency axis since we # want to use it in a stopping criterion in a moment clean global_threshold = threshold_list(scattered_facets_list, threshold, fractional_threshold, use_moment0=use_moment0, prefix=prefix) facet_list = numpy.arange(deconvolve_number_facets).astype('int') if mask is None: scattered_results_list = [ deconvolve(d, psf_list_trimmed, m, facet, global_threshold) for d, m, facet in zip(scattered_facets_list, scattered_model_imagelist, facet_list) ] else: mask_list = \ image_scatter_facets(mask, facets=deconvolve_facets, overlap=deconvolve_overlap) scattered_results_list = [ deconvolve(d, psf_list_trimmed, m, facet, global_threshold, msk) for d, m, facet, msk in zip( scattered_facets_list, scattered_model_imagelist, facet_list, mask_list) ] # Gather the results back into one image, correcting for overlaps as necessary. The taper function is is used to # feather the facets together gathered_results_list = image_gather_facets(scattered_results_list, model_imagelist, facets=deconvolve_facets, overlap=deconvolve_overlap, taper=deconvolve_taper) return image_scatter_channels(gathered_results_list, subimages=nchan)