def create_window(template, window_type, **kwargs): """ :param template: :param type: :return: """ window = create_empty_image_like(template) if window_type == 'quarter': qx = template.shape[3] // 4 qy = template.shape[2] // 4 window.data[..., (qy + 1):3 * qy, (qx + 1):3 * qx] = 1.0 log.info('create_mask: Cleaning inner quarter of each sky plane') elif window_type == 'no_edge': edge = get_parameter(kwargs, 'window_edge', 16) nx = template.shape[3] ny = template.shape[2] window.data[..., (edge + 1):(ny - edge), (edge + 1):(nx - edge)] = 1.0 log.info('create_mask: Window omits %d-pixel edge of each sky plane' % (edge)) elif window_type == 'threshold': window_threshold = get_parameter(kwargs, 'window_threshold', None) if window_threshold is None: window_threshold = 10.0 * numpy.std(template.data) window[template.data >= window_threshold] = 1.0 log.info('create_mask: Window omits all points below %g' % (window_threshold)) elif window_type is None: log.info("create_mask: Mask covers entire image") else: raise ValueError("Window shape %s is not recognized" % window_type) return window
def gaintable_timeslice_iter(gt: GainTable, **kwargs) -> numpy.ndarray: """ W slice iterator :param wstack: wstack (wavelengths) :param gt_slices: Number of slices (second in precedence to wstack) :return: Boolean array with selected rows=True """ assert isinstance(gt, GainTable) timemin = numpy.min(gt.time) timemax = numpy.max(gt.time) timeslice = get_parameter(kwargs, "timeslice", 'auto') if timeslice == 'auto': boxes = numpy.unique(gt.time) timeslice = 0.1 elif timeslice is None: timeslice = timemax - timemin boxes = [0.5 * (timemax + timemin)] elif isinstance(timeslice, float) or isinstance(timeslice, int): boxes = numpy.arange(timemin, timemax, timeslice) else: gt_slices = get_parameter(kwargs, "gaintable_slices", None) assert gt_slices is not None, "Time slicing not specified: set either timeslice or gaintable_slices" boxes = numpy.linspace(timemin, timemax, gt_slices) if gt_slices > 1: timeslice = boxes[1] - boxes[0] else: timeslice = timemax - timemin for box in boxes: rows = numpy.abs(gt.time - box) <= 0.5 * timeslice yield rows
def predict_2d(vis: Union[BlockVisibility, Visibility], model: Image, gcfcf=None, **kwargs) -> Union[BlockVisibility, Visibility]: """ Predict using convolutional degridding. This is at the bottom of the layering i.e. all transforms are eventually expressed in terms of this function. Any shifting needed is performed here. :param vis: Visibility to be predicted :param model: model image :param gcfcf: (Grid correction function i.e. in image space, Convolution function i.e. in uv space) :return: resulting visibility (in place works) """ if model is None: return vis assert isinstance(vis, Visibility), vis _, _, ny, nx = model.data.shape if gcfcf is None: gcf, cf = create_pswf_convolutionfunction(model, support=get_parameter(kwargs, "support", 6), oversampling=get_parameter(kwargs, "oversampling", 128)) else: gcf, cf = gcfcf griddata = create_griddata_from_image(model) griddata = fft_image_to_griddata(model, griddata, gcf) vis = degrid_visibility_from_griddata(vis, griddata=griddata, cf=cf) # Now we can shift the visibility from the image frame to the original visibility frame svis = shift_vis_to_image(vis, model, tangent=True, inverse=True) return svis
def weight_visibility(vis: Visibility, im: Image, **kwargs) -> Visibility: """ Reweight the visibility data using a selected algorithm Imaging uses the column "imaging_weight" when imaging. This function sets that column using a variety of algorithms Options are: - Natural: by visibility weight (optimum for noise in final image) - Uniform: weight of sample divided by sum of weights in cell (optimum for sidelobes) - Super-uniform: As uniform, by sum of weights is over extended box region - Briggs: Compromise between natural and uniform - Super-briggs: As Briggs, by sum of weights is over extended box region :param vis: :param im: :return: visibility with imaging_weights column added and filled """ assert isinstance(vis, Visibility), "vis is not a Visibility: %r" % vis assert get_parameter(kwargs, "padding", False) is False spectral_mode, vfrequencymap = get_frequency_map(vis, im) polarisation_mode, vpolarisationmap = get_polarisation_map(vis, im) uvw_mode, shape, padding, vuvwmap = get_uvw_map(vis, im) density = None densitygrid = None weighting = get_parameter(kwargs, "weighting", "uniform") vis.data['imaging_weight'], density, densitygrid = weight_gridding( im.data.shape, vis.data['weight'], vuvwmap, vfrequencymap, vpolarisationmap, weighting) return vis, density, densitygrid
def deconvolve_list_mpi_workflow(dirty_list, psf_list, model_imagelist, prefix='', mask=None,comm=MPI.COMM_WORLD, **kwargs): """Create a graph for deconvolution, adding to the model :param dirty_list: in rank0 :param psf_list: in rank0 :param model_imagelist: in rank0 :param prefix: Informative prefix to log messages :param mask: Mask for deconvolution :param comm: MPI communicator :param kwargs: Parameters for functions in components :return: graph for the deconvolution """ rank = comm.Get_rank() size = comm.Get_size() nchan = len(dirty_list) log.info('%d: deconvolve_list_mpi_workflow: dirty_list len %d psf_list len %d model_imagelist len %d' %(rank,len(dirty_list),len(psf_list),len(model_imagelist))) nmoment = get_parameter(kwargs, "nmoment", 0) assert get_parameter(kwargs, "use_serial_clean", True),"Only serial deconvolution implemented" if get_parameter(kwargs, "use_serial_clean", True): from workflows.serial.imaging.imaging_serial import deconvolve_list_serial_workflow if rank==0: assert isinstance(model_imagelist, list), model_imagelist result_list=deconvolve_list_serial_workflow (dirty_list, psf_list, model_imagelist, prefix=prefix, mask=mask, **kwargs) else: result_list=list() return result_list
def invert_2d(vis: Visibility, im: Image, dopsf: bool = False, normalize: bool = True, gcfcf=None, **kwargs) -> (Image, numpy.ndarray): """ Invert using 2D convolution function, using the specified convolution function Use the image im as a template. Do PSF in a separate call. This is at the bottom of the layering i.e. all transforms are eventually expressed in terms of this function. . Any shifting needed is performed here. :param vis: Visibility to be inverted :param im: image template (not changed) :param dopsf: Make the psf instead of the dirty image :param normalize: Normalize by the sum of weights (True) :param gcfcf: (Grid correction function i.e. in image space, Convolution function i.e. in uv space) :return: resulting image """ if not isinstance(vis, Visibility): svis = coalesce_visibility(vis, **kwargs) else: svis = copy_visibility(vis) if dopsf: svis.data['vis'] = numpy.ones_like(svis.data['vis']) svis = shift_vis_to_image(svis, im, tangent=True, inverse=False) if gcfcf is None: gcf, cf = create_pswf_convolutionfunction( im, support=get_parameter(kwargs, "support", 6), oversampling=get_parameter(kwargs, "oversampling", 128)) else: gcf, cf = gcfcf griddata = create_griddata_from_image(im) griddata, sumwt = grid_visibility_to_griddata(svis, griddata=griddata, cf=cf) imaginary = get_parameter(kwargs, "imaginary", False) if imaginary: result0, result1 = fft_griddata_to_image(griddata, gcf, imaginary=imaginary) log.debug("invert_2d: retaining imaginary part of dirty image") if normalize: result0 = normalize_sumwt(result0, sumwt) result1 = normalize_sumwt(result1, sumwt) return result0, sumwt, result1 else: result = fft_griddata_to_image(griddata, gcf) if normalize: result = normalize_sumwt(result, sumwt) return result, sumwt
def get_kernel_list(vis: Visibility, im: Image, **kwargs): """Get the list of kernels, one per visibility """ shape = im.data.shape npixel = shape[3] cellsize = numpy.pi * im.wcs.wcs.cdelt[1] / 180.0 wstep = get_parameter(kwargs, "wstep", 0.0) oversampling = get_parameter(kwargs, "oversampling", 8) padding = get_parameter(kwargs, "padding", 2) gcf, _ = anti_aliasing_calculate((padding * npixel, padding * npixel), oversampling) wabsmax = numpy.max(numpy.abs(vis.w)) if wstep > 0.0 and wabsmax > 0.0: kernelname = 'wprojection' # wprojection needs a lot of commentary! log.debug("get_kernel_list: Using w projection with wstep = %f" % wstep) # The field of view must be as padded! R_F is for reporting only so that # need not be padded. fov = cellsize * npixel * padding r_f = (cellsize * npixel / 2)**2 / abs(cellsize) log.debug("get_kernel_list: Fresnel number = %f" % r_f) # Now calculate the maximum support for the w kernel kernelwidth = get_parameter( kwargs, "kernelwidth", (2 * int(round(numpy.sin(0.5 * fov) * npixel * wabsmax * cellsize)))) kernelwidth = max(kernelwidth, 8) assert kernelwidth % 2 == 0 log.debug("get_kernel_list: Maximum w kernel full width = %d pixels" % kernelwidth) padded_shape = [ im.shape[0], im.shape[1], im.shape[2] * padding, im.shape[3] * padding ] remove_shift = get_parameter(kwargs, "remove_shift", True) padded_image = pad_image(im, padded_shape) kernel_list = w_kernel_list(vis, padded_image, oversampling=oversampling, wstep=wstep, kernelwidth=kernelwidth, remove_shift=remove_shift) else: kernelname = '2d' kernel_list = standard_kernel_list( vis, (padding * npixel, padding * npixel), oversampling=oversampling) return kernelname, gcf, kernel_list
def coalesce_visibility(vis: BlockVisibility, **kwargs) -> Visibility: """ Coalesce the BlockVisibility data_models. The output format is a Visibility, as needed for imaging Coalesce by baseline-dependent averaging (optional). The number of integrations averaged goes as the ratio of the maximum possible baseline length to that for this baseline. This number can be scaled by coalescence_factor and limited by max_coalescence. When faceting, the coalescence factors should be roughly the same as the number of facets on one axis. If coalescence_factor=0.0 then just a format conversion is done :param vis: BlockVisibility to be coalesced :return: Coalesced visibility with cindex and blockvis filled in """ assert isinstance( vis, BlockVisibility), "vis is not a BlockVisibility: %r" % vis time_coal = get_parameter(kwargs, 'time_coal', 0.0) max_time_coal = get_parameter(kwargs, 'max_time_coal', 100) frequency_coal = get_parameter(kwargs, 'frequency_coal', 0.0) max_frequency_coal = get_parameter(kwargs, 'max_frequency_coal', 100) if time_coal == 0.0 and frequency_coal == 0.0: return convert_blockvisibility_to_visibility((vis)) cvis, cuvw, cwts, cimwt, ctime, cfrequency, cchannel_bandwidth, ca1, ca2, cintegration_time, cindex \ = average_in_blocks(vis.data['vis'], vis.data['uvw'], vis.data['weight'], vis.data['imaging_weight'], vis.time, vis.integration_time, vis.frequency, vis.channel_bandwidth, time_coal, max_time_coal, frequency_coal, max_frequency_coal) coalesced_vis = Visibility(uvw=cuvw, time=ctime, frequency=cfrequency, channel_bandwidth=cchannel_bandwidth, phasecentre=vis.phasecentre, antenna1=ca1, antenna2=ca2, vis=cvis, weight=cwts, imaging_weight=cimwt, configuration=vis.configuration, integration_time=cintegration_time, polarisation_frame=vis.polarisation_frame, cindex=cindex, blockvis=vis, meta=vis.meta) log.debug( 'coalesce_visibility: Created new Visibility for coalesced data_models, coalescence factors (t,f) = (%.3f,%.3f)' % (time_coal, frequency_coal)) log.debug('coalesce_visibility: Maximum coalescence (t,f) = (%d, %d)' % (max_time_coal, max_frequency_coal)) log.debug('coalesce_visibility: Original %s, coalesced %s' % (vis_summary(vis), vis_summary(coalesced_vis))) return coalesced_vis
def predict_list_mpi_workflow(vis_list, model_imagelist, context, vis_slices=1, facets=1, gcfcf=None, comm=MPI.COMM_WORLD, **kwargs): """Predict, iterating over both the scattered vis_list and image The visibility and image are scattered, the visibility is predicted on each part, and then the parts are assembled. About data distribution: vis_list and model_imagelist live in rank 0; vis_slices, facets, context are replicated in all nodes. gcfcf if exists lives in rank 0; if not every mpi proc will create its own for the corresponding subset of the image. :param vis_list: :param model_imagelist: Model used to determine image parameters :param vis_slices: Number of vis slices (w stack or timeslice) :param facets: Number of facets (per axis) :param context: Type of processing e.g. 2d, wstack, timeslice or facets :param gcfcg: tuple containing grid correction and convolution function :param comm: MPI communicator :param kwargs: Parameters for functions in components :return: List of vis_lists """ rank = comm.Get_rank() size = comm.Get_size() log.info('%d: In predict_list_mpi_workflow: %d elements in vis_list' % (rank,len(vis_list))) # the assert only makes sense in proc 0 as for the others both lists are # empty assert len(vis_list) == len(model_imagelist), "Model must be the same length as the vis_list" # The use_serial_predict version paralelizes by freq (my opt version) assert get_parameter(kwargs, "use_serial_predict", True),"Only freq paralellization implemented" #if get_parameter(kwargs, "use_serial_predict", False): if get_parameter(kwargs, "use_serial_predict", True): from workflows.serial.imaging.imaging_serial import predict_list_serial_workflow image_results_list = list() # Distribute visibilities and model by freq sub_vis_list= numpy.array_split(vis_list, size) sub_vis_list=comm.scatter(sub_vis_list,root=0) sub_model_imagelist= numpy.array_split(model_imagelist, size) sub_model_imagelist=comm.scatter(sub_model_imagelist,root=0) if gcfcf is not None: sub_gcfcf = numpy.array_split(gcfcf,size) sub_gcfcf=comm.scatter(sub_gcfcf,root=0) isinstance(sub_vis_list[0], Visibility) image_results_list= [predict_list_serial_workflow(vis_list=[sub_vis_list[i]], model_imagelist=[sub_model_imagelist[i]], vis_slices=vis_slices, facets=facets, context=context, gcfcf=gcfcf, **kwargs)[0] for i, _ in enumerate(sub_vis_list)] #print(image_results_list) image_results_list=comm.gather(image_results_list,root=0) if rank == 0: #image_results_list_list=[x for x in image_results_list_list if x] image_results_list=numpy.concatenate(image_results_list) else: image_results_list=list() return image_results_list
def solve_image_arlexecute(vis: Visibility, model: Image, components=None, context='2d', **kwargs) -> \ (Visibility, Image, Image): """Solve for image using deconvolve_cube and specified predict, invert This is the same as a majorcycle/minorcycle algorithm. The components are removed prior to deconvolution. See also arguments for predict, invert, deconvolve_cube functions.2d :param vis: :param model: Model image :param predict: Predict function e.g. predict_2d, predict_wstack :param invert: Invert function e.g. invert_2d, invert_wstack :return: Visibility, model """ nmajor = get_parameter(kwargs, 'nmajor', 5) thresh = get_parameter(kwargs, "threshold", 0.0) log.info("solve_image_arlexecute: Performing %d major cycles" % nmajor) # The model is added to each major cycle and then the visibilities are # calculated from the full model vispred = copy_visibility(vis, zero=True) visres = copy_visibility(vis, zero=True) vispred = predict_arlexecute(vispred, model, context=context, **kwargs) if components is not None: vispred = predict_skycomponent_visibility(vispred, components) visres.data['vis'] = vis.data['vis'] - vispred.data['vis'] dirty, sumwt = invert_arlexecute(visres, model, context=context, dopsf=False, **kwargs) assert sumwt.any() > 0.0, "Sum of weights is zero" psf, sumwt = invert_arlexecute(visres, model, context=context, dopsf=True, **kwargs) assert sumwt.any() > 0.0, "Sum of weights is zero" for i in range(nmajor): log.info("solve_image_arlexecute: Start of major cycle %d" % i) cc, res = deconvolve_cube(dirty, psf, **kwargs) model.data += cc.data vispred.data['vis'][...]=0.0 vispred = predict_arlexecute(vispred, model, context=context, **kwargs) visres.data['vis'] = vis.data['vis'] - vispred.data['vis'] dirty, sumwt = invert_arlexecute(visres, model, context=context, dopsf=False, **kwargs) if numpy.abs(dirty.data).max() < 1.1 * thresh: log.info("Reached stopping threshold %.6f Jy" % thresh) break log.info("solve_image_arlexecute: End of minor cycles") log.info("solve_image_arlexecute: End of major cycles") return visres, model, dirty
def continuum_imaging_list_serial_workflow(vis_list, model_imagelist, context='2d', **kwargs): """ Create graph for the continuum imaging pipeline. Same as ICAL but with no selfcal. :param vis_list: :param model_imagelist: :param context: Imaging context :param kwargs: Parameters for functions in components :return: """ psf_imagelist = invert_list_serial_workflow(vis_list, model_imagelist, dopsf=True, context=context, **kwargs) residual_imagelist = residual_list_serial_workflow(vis_list, model_imagelist, context=context, **kwargs) deconvolve_model_imagelist, _ = deconvolve_list_serial_workflow(residual_imagelist, psf_imagelist, model_imagelist, prefix='cycle 0', **kwargs) nmajor = get_parameter(kwargs, "nmajor", 5) if nmajor > 1: for cycle in range(nmajor): prefix = "cycle %d" % (cycle + 1) residual_imagelist = residual_list_serial_workflow(vis_list, deconvolve_model_imagelist, context=context, **kwargs) deconvolve_model_imagelist, _ = deconvolve_list_serial_workflow(residual_imagelist, psf_imagelist, deconvolve_model_imagelist, prefix=prefix, **kwargs) residual_imagelist = residual_list_serial_workflow(vis_list, deconvolve_model_imagelist, context=context, **kwargs) restore_imagelist = restore_list_serial_workflow(deconvolve_model_imagelist, psf_imagelist, residual_imagelist) return (deconvolve_model_imagelist, residual_imagelist, restore_imagelist)
def restore_cube(model: Image, psf: Image, residual=None, **kwargs) -> Image: """ Restore the model image to the residuals :params psf: Input PSF :return: restored image """ assert isinstance(model, Image), model assert isinstance(psf, Image), psf assert residual is None or isinstance(residual, Image), residual restored = copy_image(model) npixel = psf.data.shape[3] sl = slice(npixel // 2 - 7, npixel // 2 + 8) size = get_parameter(kwargs, "psfwidth", None) if size is None: # isotropic at the moment! from scipy.optimize import minpack try: fit = fit_2dgaussian(psf.data[0, 0, sl, sl]) if fit.x_stddev <= 0.0 or fit.y_stddev <= 0.0: log.debug( 'restore_cube: error in fitting to psf, using 1 pixel stddev' ) size = 1.0 else: size = max(fit.x_stddev, fit.y_stddev) log.debug('restore_cube: psfwidth = %s' % (size)) except minpack.error as err: log.debug('restore_cube: minpack error, using 1 pixel stddev') size = 1.0 except ValueError as err: log.debug( 'restore_cube: warning in fit to psf, using 1 pixel stddev') size = 1.0 else: log.debug('restore_cube: Using specified psfwidth = %s' % (size)) # TODO: Remove filter when astropy fixes convolve import warnings warnings.simplefilter(action='ignore', category=FutureWarning) from astropy.convolution import Gaussian2DKernel, convolve_fft # By convention, we normalise the peak not the integral so this is the volume of the Gaussian norm = 2.0 * numpy.pi * size**2 gk = Gaussian2DKernel(size) for chan in range(model.shape[0]): for pol in range(model.shape[1]): restored.data[chan, pol, :, :] = norm * convolve_fft( model.data[chan, pol, :, :], gk, normalize_kernel=False, allow_huge=True) if residual is not None: restored.data += residual.data return restored
def create_named_configuration(name: str = 'LOWBD2', **kwargs) -> Configuration: """ Standard configurations e.g. LOWBD2, MIDBD2 :param name: name of Configuration LOWBD2, LOWBD1, LOFAR, VLAA, ASKAP :param rmax: Maximum distance of station from the average (m) :return: For LOWBD2, setting rmax gives the following number of stations 100.0 13 300.0 94 1000.0 251 3000.0 314 10000.0 398 30000.0 476 100000.0 512 """ if name == 'LOWBD2': location = EarthLocation(lon="116.4999", lat="-26.7000", height=300.0) fc = create_configuration_from_file(antfile=arl_path("data/configurations/LOWBD2.csv"), location=location, mount='xy', names='LOWBD2_%d', diameter=35.0, name=name, **kwargs) elif name == 'LOWBD1': location = EarthLocation(lon="116.4999", lat="-26.7000", height=300.0) fc = create_configuration_from_file(antfile=arl_path("data/configurations/LOWBD1.csv"), location=location, mount='xy', names='LOWBD1_%d', diameter=35.0, name=name, **kwargs) elif name == 'LOWBD2-CORE': location = EarthLocation(lon="116.4999", lat="-26.7000", height=300.0) fc = create_configuration_from_file(antfile=arl_path("data/configurations/LOWBD2-CORE.csv"), location=location, mount='xy', names='LOWBD2_%d', diameter=35.0, name=name, **kwargs) elif name == 'ASKAP': location = EarthLocation(lon="+116.6356824", lat="-26.7013006", height=377.0) fc = create_configuration_from_file(antfile=arl_path("data/configurations/A27CR3P6B.in.csv"), mount='equatorial', names='ASKAP_%d', diameter=12.0, name=name, location=location, **kwargs) elif name == 'LOFAR': assert get_parameter(kwargs, "meta", False) is False fc = create_LOFAR_configuration(antfile=arl_path("data/configurations/LOFAR.csv")) elif name == 'VLAA': location = EarthLocation(lon="-107.6184", lat="34.0784", height=2124.0) fc = create_configuration_from_file(antfile=arl_path("data/configurations/VLA_A_hor_xyz.csv"), location=location, mount='altaz', names='VLA_%d', diameter=25.0, name=name, **kwargs) elif name == 'VLAA_north': location = EarthLocation(lon="-107.6184", lat="90.000", height=2124.0) fc = create_configuration_from_file(antfile=arl_path("data/configurations/VLA_A_hor_xyz.csv"), location=location, mount='altaz', names='VLA_%d', diameter=25.0, name=name, **kwargs) else: raise ValueError("No such Configuration %s" % name) return fc
def ical_workflow(vis_list, model_imagelist, context='2d', calibration_context='TG', do_selfcal=True, **kwargs): """Create graph for ICAL pipeline :param vis_list: :param model_imagelist: :param context: imaging context e.g. '2d' :param calibration_context: Sequence of calibration steps e.g. TGB :param do_selfcal: Do the selfcalibration? :param kwargs: Parameters for functions in components :return: """ psf_imagelist = invert_workflow(vis_list, model_imagelist, dopsf=True, context=context, **kwargs) model_vislist = zero_vislist_workflow(vis_list) model_vislist = predict_workflow(model_vislist, model_imagelist, context=context, **kwargs) if do_selfcal: # Make the predicted visibilities, selfcalibrate against it correcting the gains, then # form the residual visibility, then make the residual image vis_list = calibrate_workflow(vis_list, model_vislist, calibration_context=calibration_context, **kwargs) residual_vislist = subtract_vislist_workflow(vis_list, model_vislist) residual_imagelist = invert_workflow(residual_vislist, model_imagelist, dopsf=True, context=context, iteration=0, **kwargs) else: # If we are not selfcalibrating it's much easier and we can avoid an unnecessary round of gather/scatter # for visibility partitioning such as timeslices and wstack. residual_imagelist = residual_workflow(vis_list, model_imagelist, context=context, **kwargs) deconvolve_model_imagelist, _ = deconvolve_workflow(residual_imagelist, psf_imagelist, model_imagelist, prefix='cycle 0', **kwargs) nmajor = get_parameter(kwargs, "nmajor", 5) if nmajor > 1: for cycle in range(nmajor): if do_selfcal: model_vislist = zero_vislist_workflow(vis_list) model_vislist = predict_workflow(model_vislist, deconvolve_model_imagelist, context=context, **kwargs) vis_list = calibrate_workflow(vis_list, model_vislist, calibration_context=calibration_context, iteration=cycle, **kwargs) residual_vislist = subtract_vislist_workflow(vis_list, model_vislist) residual_imagelist = invert_workflow(residual_vislist, model_imagelist, dopsf=False, context=context, **kwargs) else: residual_imagelist = residual_workflow(vis_list, deconvolve_model_imagelist, context=context, **kwargs) prefix = "cycle %d" % (cycle+1) deconvolve_model_imagelist, _ = deconvolve_workflow(residual_imagelist, psf_imagelist, deconvolve_model_imagelist, prefix=prefix, **kwargs) residual_imagelist = residual_workflow(vis_list, deconvolve_model_imagelist, context=context, **kwargs) restore_imagelist = restore_workflow(deconvolve_model_imagelist, psf_imagelist, residual_imagelist) return arlexecute.execute((deconvolve_model_imagelist, residual_imagelist, restore_imagelist))
def predict_2d(vis: Union[BlockVisibility, Visibility], model: Image, **kwargs) -> Union[BlockVisibility, Visibility]: """ Predict using convolutional degridding. This is at the bottom of the layering i.e. all transforms are eventually expressed in terms of this function. Any shifting needed is performed here. :param vis: Visibility to be predicted :param model: model image :return: resulting visibility (in place works) """ if isinstance(vis, BlockVisibility): log.debug("imaging.predict: coalescing prior to prediction") avis = coalesce_visibility(vis, **kwargs) else: avis = vis assert isinstance(avis, Visibility), avis _, _, ny, nx = model.data.shape padding = {} if get_parameter(kwargs, "padding", False): padding = {'padding': get_parameter(kwargs, "padding", False)} spectral_mode, vfrequencymap = get_frequency_map(avis, model) polarisation_mode, vpolarisationmap = get_polarisation_map(avis, model) uvw_mode, shape, padding, vuvwmap = get_uvw_map(avis, model, **padding) kernel_name, gcf, vkernellist = get_kernel_list(avis, model, **kwargs) uvgrid = fft((pad_mid(model.data, int(round(padding * nx))) * gcf).astype(dtype=complex)) avis.data['vis'] = convolutional_degrid(vkernellist, avis.data['vis'].shape, uvgrid, vuvwmap, vfrequencymap) # Now we can shift the visibility from the image frame to the original visibility frame svis = shift_vis_to_image(avis, model, tangent=True, inverse=True) if isinstance(vis, BlockVisibility) and isinstance(svis, Visibility): log.debug("imaging.predict decoalescing post prediction") return decoalesce_visibility(svis) else: return svis
def t1(**kwargs): assert get_parameter(kwargs, 'cellsize') == 0.1 assert get_parameter(kwargs, 'spectral_mode', 'channels') == 'mfs' assert get_parameter(kwargs, 'null_mode', 'mfs') == 'mfs' assert get_parameter(kwargs, 'foo', 'bar') == 'bar' assert get_parameter(kwargs, 'foo') is None assert get_parameter(None, 'foo', 'bar') == 'bar'
def deconvolve(dirty, psf, model, facet, gthreshold): import time starttime = time.time() if prefix == '': lprefix = "facet %d" % facet else: lprefix = "%s, facet %d" % (prefix, facet) nmoments = get_parameter(kwargs, "nmoments", 0) if nmoments > 0: moment0 = calculate_image_frequency_moments(dirty) this_peak = numpy.max(numpy.abs( moment0.data[0, ...])) / dirty.data.shape[0] else: this_peak = numpy.max(numpy.abs(dirty.data[0, ...])) if this_peak > 1.1 * gthreshold: log.info( "deconvolve_list_serial_workflow %s: cleaning - peak %.6f > 1.1 * threshold %.6f" % (lprefix, this_peak, gthreshold)) kwargs['threshold'] = gthreshold result, _ = deconvolve_cube(dirty, psf, prefix=lprefix, **kwargs) if result.data.shape[0] == model.data.shape[0]: result.data += model.data else: log.warning( "deconvolve_list_serial_workflow %s: Initial model %s and clean result %s do not have the same shape" % (lprefix, str( model.data.shape[0]), str(result.data.shape[0]))) flux = numpy.sum(result.data[0, 0, ...]) log.info( '### %s, %.6f, %.6f, True, %.3f # cycle, facet, peak, cleaned flux, clean, time?' % (lprefix, this_peak, flux, time.time() - starttime)) return result else: log.info( "deconvolve_list_serial_workflow %s: Not cleaning - peak %.6f <= 1.1 * threshold %.6f" % (lprefix, this_peak, gthreshold)) log.info( '### %s, %.6f, %.6f, False, %.3f # cycle, facet, peak, cleaned flux, clean, time?' % (lprefix, this_peak, 0.0, time.time() - starttime)) return copy_image(model)
def continuum_imaging_list_serial_workflow(vis_list, model_imagelist, context, gcfcf=None, vis_slices=1, facets=1, **kwargs): """ Create graph for the continuum imaging pipeline. Same as ICAL but with no selfcal. :param vis_list: :param model_imagelist: :param context: Imaging context :param kwargs: Parameters for functions in components :return: """ if gcfcf is None: gcfcf = [create_pswf_convolutionfunction(model_imagelist[0])] psf_imagelist = invert_list_serial_workflow(vis_list, model_imagelist, context=context, dopsf=True, vis_slices=vis_slices, facets=facets, gcfcf=gcfcf, **kwargs) residual_imagelist = residual_list_serial_workflow(vis_list, model_imagelist, context=context, gcfcf=gcfcf, vis_slices=vis_slices, facets=facets, **kwargs) deconvolve_model_imagelist, _ = deconvolve_list_serial_workflow(residual_imagelist, psf_imagelist, model_imagelist, prefix='cycle 0', **kwargs) nmajor = get_parameter(kwargs, "nmajor", 5) if nmajor > 1: for cycle in range(nmajor): prefix = "cycle %d" % (cycle + 1) residual_imagelist = residual_list_serial_workflow(vis_list, deconvolve_model_imagelist, context=context, vis_slices=vis_slices, facets=facets, gcfcf=gcfcf, **kwargs) deconvolve_model_imagelist, _ = deconvolve_list_serial_workflow(residual_imagelist, psf_imagelist, deconvolve_model_imagelist, prefix=prefix, **kwargs) residual_imagelist = residual_list_serial_workflow(vis_list, deconvolve_model_imagelist, context=context, vis_slices=vis_slices, facets=facets, gcfcf=gcfcf, **kwargs) restore_imagelist = restore_list_serial_workflow(deconvolve_model_imagelist, psf_imagelist, residual_imagelist) return (deconvolve_model_imagelist, residual_imagelist, restore_imagelist)
def deconvolve_cube(dirty: Image, psf: Image, prefix='', **kwargs) -> (Image, Image): """ Clean using a variety of algorithms Functions that clean a dirty image using a point spread function. The algorithms available are: hogbom: Hogbom CLEAN See: Hogbom CLEAN A&A Suppl, 15, 417, (1974) msclean: MultiScale CLEAN See: Cornwell, T.J., Multiscale CLEAN (IEEE Journal of Selected Topics in Sig Proc, 2008 vol. 2 pp. 793-801) mfsmsclean, msmfsclean, mmclean: MultiScale Multi-Frequency See: U. Rau and T. J. Cornwell, “A multi-scale multi-frequency deconvolution algorithm for synthesis imaging in radio interferometry,” A&A 532, A71 (2011). For example:: comp, residual = deconvolve_cube(dirty, psf, niter=1000, gain=0.7, algorithm='msclean', scales=[0, 3, 10, 30], threshold=0.01) For the MFS clean, the psf must have number of channels >= 2 * nmoment :param dirty: Image dirty image :param psf: Image Point Spread Function :param window_shape: Window image (Bool) - clean where True :param mask: Window in the form of an image, overrides woindow_shape :param algorithm: Cleaning algorithm: 'msclean'|'hogbom'|'mfsmsclean' :param gain: loop gain (float) 0.7 :param threshold: Clean threshold (0.0) :param fractional_threshold: Fractional threshold (0.01) :param scales: Scales (in pixels) for multiscale ([0, 3, 10, 30]) :param nmoment: Number of frequency moments (default 3) :param findpeak: Method of finding peak in mfsclean: 'Algorithm1'|'ASKAPSoft'|'CASA'|'ARL', Default is ARL. :return: componentimage, residual """ assert isinstance(dirty, Image), dirty assert isinstance(psf, Image), psf window_shape = get_parameter(kwargs, 'window_shape', None) if window_shape == 'quarter': log.info("deconvolve_cube %s: window is inner quarter" % prefix) qx = dirty.shape[3] // 4 qy = dirty.shape[2] // 4 window = numpy.zeros_like(dirty.data) window[..., (qy + 1):3 * qy, (qx + 1):3 * qx] = 1.0 log.info( 'deconvolve_cube %s: Cleaning inner quarter of each sky plane' % prefix) elif window_shape == 'no_edge': edge = get_parameter(kwargs, 'window_edge', 16) nx = dirty.shape[3] ny = dirty.shape[2] window = numpy.zeros_like(dirty.data) window[..., (edge + 1):(ny - edge), (edge + 1):(nx - edge)] = 1.0 log.info( 'deconvolve_cube %s: Window omits %d-pixel edge of each sky plane' % (prefix, edge)) elif window_shape is None: log.info("deconvolve_cube %s: Cleaning entire image" % prefix) window = None else: raise ValueError("Window shape %s is not recognized" % window_shape) mask = get_parameter(kwargs, 'mask', None) if isinstance(mask, Image): if window is not None: log.warning( 'deconvolve_cube %s: Overriding window_shape with mask image' % (prefix)) window = mask.data psf_support = get_parameter(kwargs, 'psf_support', max(dirty.shape[2] // 2, dirty.shape[3] // 2)) if (psf_support <= psf.shape[2] // 2) and ( (psf_support <= psf.shape[3] // 2)): centre = [psf.shape[2] // 2, psf.shape[3] // 2] psf.data = psf.data[..., (centre[0] - psf_support):(centre[0] + psf_support), (centre[1] - psf_support):(centre[1] + psf_support)] log.info('deconvolve_cube %s: PSF support = +/- %d pixels' % (prefix, psf_support)) log.info('deconvolve_cube %s: PSF shape %s' % (prefix, str(psf.data.shape))) algorithm = get_parameter(kwargs, 'algorithm', 'msclean') if algorithm == 'msclean': log.info( "deconvolve_cube %s: Multi-scale clean of each polarisation and channel separately" % prefix) gain = get_parameter(kwargs, 'gain', 0.7) assert 0.0 < gain < 2.0, "Loop gain must be between 0 and 2" thresh = get_parameter(kwargs, 'threshold', 0.0) assert thresh >= 0.0 niter = get_parameter(kwargs, 'niter', 100) assert niter > 0 scales = get_parameter(kwargs, 'scales', [0, 3, 10, 30]) fracthresh = get_parameter(kwargs, 'fractional_threshold', 0.01) assert 0.0 < fracthresh < 1.0 comp_array = numpy.zeros_like(dirty.data) residual_array = numpy.zeros_like(dirty.data) for channel in range(dirty.data.shape[0]): for pol in range(dirty.data.shape[1]): if psf.data[channel, pol, :, :].max(): log.info( "deconvolve_cube %s: Processing pol %d, channel %d" % (prefix, pol, channel)) if window is None: comp_array[channel, pol, :, :], residual_array[channel, pol, :, :] = \ msclean(dirty.data[channel, pol, :, :], psf.data[channel, pol, :, :], None, gain, thresh, niter, scales, fracthresh, prefix) else: comp_array[channel, pol, :, :], residual_array[channel, pol, :, :] = \ msclean(dirty.data[channel, pol, :, :], psf.data[channel, pol, :, :], window[channel, pol, :, :], gain, thresh, niter, scales, fracthresh, prefix) else: log.info( "deconvolve_cube %s: Skipping pol %d, channel %d" % (prefix, pol, channel)) comp_image = create_image_from_array(comp_array, dirty.wcs, dirty.polarisation_frame) residual_image = create_image_from_array(residual_array, dirty.wcs, dirty.polarisation_frame) elif algorithm == 'msmfsclean' or algorithm == 'mfsmsclean' or algorithm == 'mmclean': findpeak = get_parameter(kwargs, "findpeak", 'ARL') log.info( "deconvolve_cube %s: Multi-scale multi-frequency clean of each polarisation separately" % prefix) nmoment = get_parameter(kwargs, "nmoment", 3) assert nmoment >= 1, "Number of frequency moments must be greater than or equal to one" nchan = dirty.shape[0] assert nchan > 2 * (nmoment - 1), "Require nchan %d > 2 * (nmoment %d - 1)" % ( nchan, 2 * (nmoment - 1)) dirty_taylor = calculate_image_frequency_moments(dirty, nmoment=nmoment) if nmoment > 1: psf_taylor = calculate_image_frequency_moments(psf, nmoment=2 * nmoment) else: psf_taylor = calculate_image_frequency_moments(psf, nmoment=1) psf_peak = numpy.max(psf_taylor.data) dirty_taylor.data /= psf_peak psf_taylor.data /= psf_peak log.info("deconvolve_cube %s: Shape of Dirty moments image %s" % (prefix, str(dirty_taylor.shape))) log.info("deconvolve_cube %s: Shape of PSF moments image %s" % (prefix, str(psf_taylor.shape))) gain = get_parameter(kwargs, 'gain', 0.7) assert 0.0 < gain < 2.0, "Loop gain must be between 0 and 2" thresh = get_parameter(kwargs, 'threshold', 0.0) assert thresh >= 0.0 niter = get_parameter(kwargs, 'niter', 100) assert niter > 0 scales = get_parameter(kwargs, 'scales', [0, 3, 10, 30]) fracthresh = get_parameter(kwargs, 'fractional_threshold', 0.1) assert 0.0 < fracthresh < 1.0 comp_array = numpy.zeros(dirty_taylor.data.shape) residual_array = numpy.zeros(dirty_taylor.data.shape) for pol in range(dirty_taylor.data.shape[1]): if psf_taylor.data[0, pol, :, :].max(): log.info("deconvolve_cube %s: Processing pol %d" % (prefix, pol)) if window is None: comp_array[:, pol, :, :], residual_array[:, pol, :, :] = \ msmfsclean(dirty_taylor.data[:, pol, :, :], psf_taylor.data[:, pol, :, :], None, gain, thresh, niter, scales, fracthresh, findpeak, prefix) else: log.info( 'deconvolve_cube %s: Clean window has %d valid pixels' % (prefix, int(numpy.sum(window[0, pol])))) comp_array[:, pol, :, :], residual_array[:, pol, :, :] = \ msmfsclean(dirty_taylor.data[:, pol, :, :], psf_taylor.data[:, pol, :, :], window[0, pol, :, :], gain, thresh, niter, scales, fracthresh, findpeak, prefix) else: log.info("deconvolve_cube %s: Skipping pol %d" % (prefix, pol)) comp_image = create_image_from_array(comp_array, dirty_taylor.wcs, dirty.polarisation_frame) residual_image = create_image_from_array(residual_array, dirty_taylor.wcs, dirty.polarisation_frame) return_moments = get_parameter(kwargs, "return_moments", False) if not return_moments: log.info("deconvolve_cube %s: calculating spectral cubes" % prefix) comp_image = calculate_image_from_frequency_moments( dirty, comp_image) residual_image = calculate_image_from_frequency_moments( dirty, residual_image) else: log.info("deconvolve_cube %s: constructed moment cubes" % prefix) elif algorithm == 'hogbom': log.info( "deconvolve_cube %s: Hogbom clean of each polarisation and channel separately" % prefix) gain = get_parameter(kwargs, 'gain', 0.7) assert 0.0 < gain < 2.0, "Loop gain must be between 0 and 2" thresh = get_parameter(kwargs, 'threshold', 0.0) assert thresh >= 0.0 niter = get_parameter(kwargs, 'niter', 100) assert niter > 0 fracthresh = get_parameter(kwargs, 'fractional_threshold', 0.1) assert 0.0 < fracthresh < 1.0 comp_array = numpy.zeros(dirty.data.shape) residual_array = numpy.zeros(dirty.data.shape) for channel in range(dirty.data.shape[0]): for pol in range(dirty.data.shape[1]): if psf.data[channel, pol, :, :].max(): log.info( "deconvolve_cube %s: Processing pol %d, channel %d" % (prefix, pol, channel)) if window is None: comp_array[channel, pol, :, :], residual_array[channel, pol, :, :] = \ hogbom(dirty.data[channel, pol, :, :], psf.data[channel, pol, :, :], None, gain, thresh, niter, fracthresh, prefix) else: comp_array[channel, pol, :, :], residual_array[channel, pol, :, :] = \ hogbom(dirty.data[channel, pol, :, :], psf.data[channel, pol, :, :], window[channel, pol, :, :], gain, thresh, niter, fracthresh, prefix) else: log.info( "deconvolve_cube %s: Skipping pol %d, channel %d" % (prefix, pol, channel)) comp_image = create_image_from_array(comp_array, dirty.wcs, dirty.polarisation_frame) residual_image = create_image_from_array(residual_array, dirty.wcs, dirty.polarisation_frame) elif algorithm == 'hogbom-complex': log.info( "deconvolve_cube_complex: Hogbom-complex clean of each polarisation and channel separately" ) gain = get_parameter(kwargs, 'gain', 0.7) assert 0.0 < gain < 2.0, "Loop gain must be between 0 and 2" thresh = get_parameter(kwargs, 'threshold', 0.0) assert thresh >= 0.0 niter = get_parameter(kwargs, 'niter', 100) assert niter > 0 fracthresh = get_parameter(kwargs, 'fractional_threshold', 0.1) assert 0.0 <= fracthresh < 1.0 comp_array = numpy.zeros(dirty.data.shape) residual_array = numpy.zeros(dirty.data.shape) for channel in range(dirty.data.shape[0]): for pol in range(dirty.data.shape[1]): if pol == 0 or pol == 3: if psf.data[channel, pol, :, :].max(): log.info( "deconvolve_cube_complex: Processing pol %d, channel %d" % (pol, channel)) if window is None: comp_array[channel, pol, :, :], residual_array[channel, pol, :, :] = \ hogbom(dirty.data[channel, pol, :, :], psf.data[channel, pol, :, :], None, gain, thresh, niter, fracthresh) else: comp_array[channel, pol, :, :], residual_array[channel, pol, :, :] = \ hogbom(dirty.data[channel, pol, :, :], psf.data[channel, pol, :, :], window[channel, pol, :, :], gain, thresh, niter, fracthresh) else: log.info( "deconvolve_cube_complex: Skipping pol %d, channel %d" % (pol, channel)) if pol == 1: if psf.data[channel, 1:2, :, :].max(): log.info( "deconvolve_cube_complex: Processing pol 1 and 2, channel %d" % (channel)) if window is None: comp_array[channel, 1, :, :], comp_array[ channel, 2, :, :], residual_array[ channel, 1, :, :], residual_array[ channel, 2, :, :] = hogbom_complex( dirty.data[channel, 1, :, :], dirty.data[channel, 2, :, :], psf.data[channel, 1, :, :], psf.data[channel, 2, :, :], None, gain, thresh, niter, fracthresh) else: comp_array[channel, 1, :, :], comp_array[ channel, 2, :, :], residual_array[ channel, 1, :, :], residual_array[ channel, 2, :, :] = hogbom_complex( dirty.data[channel, 1, :, :], dirty.data[channel, 2, :, :], psf.data[channel, 1, :, :], psf.data[channel, 2, :, :], window[channel, pol, :, :], gain, thresh, niter, fracthresh) else: log.info( "deconvolve_cube_complex: Skipping pol 1 and 2, channel %d" % (channel)) if pol == 2: continue comp_image = create_image_from_array( comp_array, dirty.wcs, polarisation_frame=PolarisationFrame('stokesIQUV')) residual_image = create_image_from_array( residual_array, dirty.wcs, polarisation_frame=PolarisationFrame('stokesIQUV')) else: raise ValueError('deconvolve_cube %s: Unknown algorithm %s' % (prefix, algorithm)) return comp_image, residual_image
def create_image_from_visibility(vis, **kwargs) -> Image: """Make an empty image from params and Visibility :param vis: :param phasecentre: Phasecentre (Skycoord) :param channel_bandwidth: Channel width (Hz) :param cellsize: Cellsize (radians) :param npixel: Number of pixels on each axis (512) :param frame: Coordinate frame for WCS (ICRS) :param equinox: Equinox for WCS (2000.0) :param nchan: Number of image channels (Default is 1 -> MFS) :return: image """ assert isinstance(vis, Visibility) or isinstance(vis, BlockVisibility), \ "vis is not a Visibility or a BlockVisibility: %r" % (vis) log.info( "create_image_from_visibility: Parsing parameters to get definition of WCS" ) imagecentre = get_parameter(kwargs, "imagecentre", vis.phasecentre) phasecentre = get_parameter(kwargs, "phasecentre", vis.phasecentre) # Spectral processing options ufrequency = numpy.unique(vis.frequency) vnchan = len(ufrequency) frequency = get_parameter(kwargs, "frequency", vis.frequency) inchan = get_parameter(kwargs, "nchan", vnchan) reffrequency = frequency[0] * units.Hz channel_bandwidth = get_parameter( kwargs, "channel_bandwidth", 0.99999999999 * vis.channel_bandwidth[0]) * units.Hz if (inchan == vnchan) and vnchan > 1: log.info( "create_image_from_visibility: Defining %d channel Image at %s, starting frequency %s, and bandwidth %s" % (inchan, imagecentre, reffrequency, channel_bandwidth)) elif (inchan == 1) and vnchan > 1: assert numpy.abs(channel_bandwidth.value ) > 0.0, "Channel width must be non-zero for mfs mode" log.info( "create_image_from_visibility: Defining single channel MFS Image at %s, starting frequency %s, " "and bandwidth %s" % (imagecentre, reffrequency, channel_bandwidth)) elif inchan > 1 and vnchan > 1: assert numpy.abs(channel_bandwidth.value ) > 0.0, "Channel width must be non-zero for mfs mode" log.info( "create_image_from_visibility: Defining multi-channel MFS Image at %s, starting frequency %s, " "and bandwidth %s" % (imagecentre, reffrequency, channel_bandwidth)) elif (inchan == 1) and (vnchan == 1): assert numpy.abs(channel_bandwidth.value ) > 0.0, "Channel width must be non-zero for mfs mode" log.info( "create_image_from_visibility: Defining single channel Image at %s, starting frequency %s, " "and bandwidth %s" % (imagecentre, reffrequency, channel_bandwidth)) else: raise ValueError( "create_image_from_visibility: unknown spectral mode ") # Image sampling options npixel = get_parameter(kwargs, "npixel", 512) uvmax = numpy.max((numpy.abs(vis.data['uvw'][:, 0:1]))) if isinstance(vis, BlockVisibility): uvmax *= numpy.max(frequency) / constants.c.to('m s^-1').value log.info("create_image_from_visibility: uvmax = %f wavelengths" % uvmax) criticalcellsize = 1.0 / (uvmax * 2.0) log.info( "create_image_from_visibility: Critical cellsize = %f radians, %f degrees" % (criticalcellsize, criticalcellsize * 180.0 / numpy.pi)) cellsize = get_parameter(kwargs, "cellsize", 0.5 * criticalcellsize) log.info( "create_image_from_visibility: Cellsize = %f radians, %f degrees" % (cellsize, cellsize * 180.0 / numpy.pi)) override_cellsize = get_parameter(kwargs, "override_cellsize", True) if override_cellsize and cellsize > criticalcellsize: log.info( "create_image_from_visibility: Resetting cellsize %f radians to criticalcellsize %f radians" % (cellsize, criticalcellsize)) cellsize = criticalcellsize pol_frame = get_parameter(kwargs, "polarisation_frame", PolarisationFrame("stokesI")) inpol = pol_frame.npol # Now we can define the WCS, which is a convenient place to hold the info above # Beware of python indexing order! wcs and the array have opposite ordering shape = [inchan, inpol, npixel, npixel] w = wcs.WCS(naxis=4) # The negation in the longitude is needed by definition of RA, DEC w.wcs.cdelt = [ -cellsize * 180.0 / numpy.pi, cellsize * 180.0 / numpy.pi, 1.0, channel_bandwidth.to(units.Hz).value ] # The numpy definition of the phase centre of an FFT is n // 2 (0 - rel) so that's what we use for # the reference pixel. We have to use 0 rel everywhere. w.wcs.crpix = [npixel // 2 + 1, npixel // 2 + 1, 1.0, 1.0] w.wcs.ctype = ["RA---SIN", "DEC--SIN", 'STOKES', 'FREQ'] w.wcs.crval = [ phasecentre.ra.deg, phasecentre.dec.deg, 1.0, reffrequency.to(units.Hz).value ] w.naxis = 4 direction_centre = pixel_to_skycoord(npixel // 2 + 1, npixel // 2 + 1, wcs=w, origin=1) assert direction_centre.separation(imagecentre).value < 1e-7, \ "Image phase centre [npixel//2, npixel//2] should be %s, actually is %s" % \ (str(imagecentre), str(direction_centre)) w.wcs.radesys = get_parameter(kwargs, 'frame', 'ICRS') w.wcs.equinox = get_parameter(kwargs, 'equinox', 2000.0) return create_image_from_array(numpy.zeros(shape), wcs=w, polarisation_frame=pol_frame)
def invert_2d(vis: Visibility, im: Image, dopsf: bool = False, normalize: bool = True, **kwargs) \ -> (Image, numpy.ndarray): """ Invert using 2D convolution function, including w projection optionally Use the image im as a template. Do PSF in a separate call. This is at the bottom of the layering i.e. all transforms are eventually expressed in terms of this function. . Any shifting needed is performed here. :param vis: Visibility to be inverted :param im: image template (not changed) :param dopsf: Make the psf instead of the dirty image :param normalize: Normalize by the sum of weights (True) :return: resulting image """ if not isinstance(vis, Visibility): svis = coalesce_visibility(vis, **kwargs) else: svis = copy_visibility(vis) if dopsf: svis.data['vis'] = numpy.ones_like(svis.data['vis']) svis = shift_vis_to_image(svis, im, tangent=True, inverse=False) nchan, npol, ny, nx = im.data.shape padding = {} if get_parameter(kwargs, "padding", False): padding = {'padding': get_parameter(kwargs, "padding", False)} spectral_mode, vfrequencymap = get_frequency_map(svis, im) polarisation_mode, vpolarisationmap = get_polarisation_map(svis, im) uvw_mode, shape, padding, vuvwmap = get_uvw_map(svis, im, **padding) kernel_name, gcf, vkernellist = get_kernel_list(svis, im, **kwargs) # Optionally pad to control aliasing imgridpad = numpy.zeros( [nchan, npol, int(round(padding * ny)), int(round(padding * nx))], dtype='complex') imgridpad, sumwt = convolutional_grid(vkernellist, imgridpad, svis.data['vis'], svis.data['imaging_weight'], vuvwmap, vfrequencymap) # Fourier transform the padded grid to image, multiply by the gridding correction # function, and extract the unpadded inner part. # Normalise weights for consistency with transform sumwt /= float(padding * int(round(padding * nx)) * ny) imaginary = get_parameter(kwargs, "imaginary", False) if imaginary: log.debug("invert_2d: retaining imaginary part of dirty image") result = extract_mid(ifft(imgridpad) * gcf, npixel=nx) resultreal = create_image_from_array(result.real, im.wcs, im.polarisation_frame) resultimag = create_image_from_array(result.imag, im.wcs, im.polarisation_frame) if normalize: resultreal = normalize_sumwt(resultreal, sumwt) resultimag = normalize_sumwt(resultimag, sumwt) return resultreal, sumwt, resultimag else: result = extract_mid(numpy.real(ifft(imgridpad)) * gcf, npixel=nx) resultimage = create_image_from_array(result, im.wcs, im.polarisation_frame) if normalize: resultimage = normalize_sumwt(resultimage, sumwt) return resultimage, sumwt
def ical(block_vis: BlockVisibility, model: Image, components=None, context='2d', controls=None, **kwargs): """ Post observation image, deconvolve, and self-calibrate :param vis: :param model: Model image :param components: Initial components :param context: Imaging context :param controls: calibration controls dictionary :return: model, residual, restored """ nmajor = get_parameter(kwargs, 'nmajor', 5) log.info("ical: Performing %d major cycles" % nmajor) do_selfcal = get_parameter(kwargs, "do_selfcal", False) if controls is None: controls = create_calibration_controls(**kwargs) # The model is added to each major cycle and then the visibilities are # calculated from the full model vis = convert_blockvisibility_to_visibility(block_vis) block_vispred = copy_visibility(block_vis, zero=True) vispred = convert_blockvisibility_to_visibility(block_vispred) vispred.data['vis'][...] = 0.0 visres = copy_visibility(vispred) vispred = predict_function(vispred, model, context=context, **kwargs) if components is not None: vispred = predict_skycomponent_visibility(vispred, components) if do_selfcal: vis, gaintables = calibrate_function(vis, vispred, 'TGB', controls, iteration=-1) visres.data['vis'] = vis.data['vis'] - vispred.data['vis'] dirty, sumwt = invert_function(visres, model, context=context, **kwargs) log.info("Maximum in residual image is %.6f" % (numpy.max(numpy.abs(dirty.data)))) psf, sumwt = invert_function(visres, model, dopsf=True, context=context, **kwargs) thresh = get_parameter(kwargs, "threshold", 0.0) for i in range(nmajor): log.info("ical: Start of major cycle %d of %d" % (i, nmajor)) cc, res = deconvolve_cube(dirty, psf, **kwargs) model.data += cc.data vispred.data['vis'][...] = 0.0 vispred = predict_function(vispred, model, context=context, **kwargs) if do_selfcal: vis, gaintables = calibrate_function(vis, vispred, 'TGB', controls, iteration=i) visres.data['vis'] = vis.data['vis'] - vispred.data['vis'] dirty, sumwt = invert_function(visres, model, context=context, **kwargs) log.info("Maximum in residual image is %s" % (numpy.max(numpy.abs(dirty.data)))) if numpy.abs(dirty.data).max() < 1.1 * thresh: log.info("ical: Reached stopping threshold %.6f Jy" % thresh) break log.info("ical: End of major cycle") log.info("ical: End of major cycles") restored = restore_cube(model, psf, dirty, **kwargs) return model, dirty, restored
def deconvolve_list_serial_workflow(dirty_list, psf_list, model_imagelist, prefix='', **kwargs): """Create a graph for deconvolution, adding to the model :param dirty_list: :param psf_list: :param model_imagelist: :param kwargs: Parameters for functions in components :return: (graph for the deconvolution, graph for the flat) """ nchan = len(dirty_list) def deconvolve(dirty, psf, model, facet, gthreshold): import time starttime = time.time() if prefix == '': lprefix = "facet %d" % facet else: lprefix = "%s, facet %d" % (prefix, facet) nmoments = get_parameter(kwargs, "nmoments", 0) if nmoments > 0: moment0 = calculate_image_frequency_moments(dirty) this_peak = numpy.max(numpy.abs( moment0.data[0, ...])) / dirty.data.shape[0] else: this_peak = numpy.max(numpy.abs(dirty.data[0, ...])) if this_peak > 1.1 * gthreshold: log.info( "deconvolve_list_serial_workflow %s: cleaning - peak %.6f > 1.1 * threshold %.6f" % (lprefix, this_peak, gthreshold)) kwargs['threshold'] = gthreshold result, _ = deconvolve_cube(dirty, psf, prefix=lprefix, **kwargs) if result.data.shape[0] == model.data.shape[0]: result.data += model.data else: log.warning( "deconvolve_list_serial_workflow %s: Initial model %s and clean result %s do not have the same shape" % (lprefix, str( model.data.shape[0]), str(result.data.shape[0]))) flux = numpy.sum(result.data[0, 0, ...]) log.info( '### %s, %.6f, %.6f, True, %.3f # cycle, facet, peak, cleaned flux, clean, time?' % (lprefix, this_peak, flux, time.time() - starttime)) return result else: log.info( "deconvolve_list_serial_workflow %s: Not cleaning - peak %.6f <= 1.1 * threshold %.6f" % (lprefix, this_peak, gthreshold)) log.info( '### %s, %.6f, %.6f, False, %.3f # cycle, facet, peak, cleaned flux, clean, time?' % (lprefix, this_peak, 0.0, time.time() - starttime)) return copy_image(model) deconvolve_facets = get_parameter(kwargs, 'deconvolve_facets', 1) deconvolve_overlap = get_parameter(kwargs, 'deconvolve_overlap', 0) deconvolve_taper = get_parameter(kwargs, 'deconvolve_taper', None) if deconvolve_overlap > 0: deconvolve_number_facets = (deconvolve_facets - 2)**2 else: deconvolve_number_facets = deconvolve_facets**2 model_imagelist = image_gather_channels(model_imagelist) # Scatter the separate channel images into deconvolve facets and then gather channels for each facet. # This avoids constructing the entire spectral cube. # dirty_list = remove_sumwt, nout=nchan)(dirty_list) scattered_channels_facets_dirty_list = \ [image_scatter_facets(d[0], facets=deconvolve_facets, overlap=deconvolve_overlap, taper=deconvolve_taper) for d in dirty_list] # Now we do a transpose and gather scattered_facets_list = [ image_gather_channels([ scattered_channels_facets_dirty_list[chan][facet] for chan in range(nchan) ]) for facet in range(deconvolve_number_facets) ] psf_list = remove_sumwt(psf_list) psf_list = image_gather_channels(psf_list) scattered_model_imagelist = \ image_scatter_facets(model_imagelist, facets=deconvolve_facets, overlap=deconvolve_overlap) # Work out the threshold. Need to find global peak over all dirty_list images threshold = get_parameter(kwargs, "threshold", 0.0) fractional_threshold = get_parameter(kwargs, "fractional_threshold", 0.1) nmoments = get_parameter(kwargs, "nmoments", 0) use_moment0 = nmoments > 0 # Find the global threshold. This uses the peak in the average on the frequency axis since we # want to use it in a stopping criterion in a moment clean global_threshold = threshold_list(scattered_facets_list, threshold, fractional_threshold, use_moment0=use_moment0, prefix=prefix) facet_list = numpy.arange(deconvolve_number_facets).astype('int') scattered_results_list = [ deconvolve(d, psf_list, m, facet, global_threshold) for d, m, facet in zip(scattered_facets_list, scattered_model_imagelist, facet_list) ] # Gather the results back into one image, correcting for overlaps as necessary. The taper function is is used to # feather the facets together gathered_results_list = image_gather_facets(scattered_results_list, model_imagelist, facets=deconvolve_facets, overlap=deconvolve_overlap, taper=deconvolve_taper) flat_list = image_gather_facets(scattered_results_list, model_imagelist, facets=deconvolve_facets, overlap=deconvolve_overlap, taper=deconvolve_taper, return_flat=True) return image_scatter_channels(gathered_results_list, subimages=nchan), flat_list
def predict_list_arlexecute_workflow(vis_list, model_imagelist, context, vis_slices=1, facets=1, gcfcf=None, **kwargs): """Predict, iterating over both the scattered vis_list and image The visibility and image are scattered, the visibility is predicted on each part, and then the parts are assembled. :param vis_list: :param model_imagelist: Model used to determine image parameters :param vis_slices: Number of vis slices (w stack or timeslice) :param facets: Number of facets (per axis) :param context: Type of processing e.g. 2d, wstack, timeslice or facets :param gcfcg: tuple containing grid correction and convolution function :param kwargs: Parameters for functions in components :return: List of vis_lists """ if get_parameter(kwargs, "use_serial_predict", False): from workflows.serial.imaging.imaging_serial import predict_list_serial_workflow return [arlexecute.execute(predict_list_serial_workflow, nout=1) \ (vis_list=[vis_list[i]], model_imagelist=[model_imagelist[i]], vis_slices=vis_slices, facets=facets, context=context, gcfcf=gcfcf, **kwargs)[0] for i, _ in enumerate(vis_list)] # Predict_2d does not clear the vis so we have to do it here. vis_list = zero_list_arlexecute_workflow(vis_list) c = imaging_context(context) vis_iter = c['vis_iterator'] predict = c['predict'] if facets % 2 == 0 or facets == 1: actual_number_facets = facets else: actual_number_facets = facets - 1 def predict_ignore_none(vis, model, g): if vis is not None: assert isinstance(vis, Visibility), vis assert isinstance(model, Image), model return predict(vis, model, context=context, gcfcf=g, **kwargs) else: return None if gcfcf is None: gcfcf = [arlexecute.execute(create_pswf_convolutionfunction)(m) for m in model_imagelist] # Loop over all frequency windows if facets == 1: image_results_list = list() for ivis, subvis in enumerate(vis_list): if len(gcfcf) > 1: g = gcfcf[ivis] else: g = gcfcf[0] # Create the graph to divide an image into facets. This is by reference. # Create the graph to divide the visibility into slices. This is by copy. sub_vis_lists = arlexecute.execute(visibility_scatter, nout=vis_slices)(subvis, vis_iter, vis_slices) image_vis_lists = list() # Loop over sub visibility for sub_vis_list in sub_vis_lists: # Predict visibility for this sub-visibility from this image image_vis_list = arlexecute.execute(predict_ignore_none, pure=True, nout=1) \ (sub_vis_list, model_imagelist[ivis], g) # Sum all sub-visibilities image_vis_lists.append(image_vis_list) image_results_list.append(arlexecute.execute(visibility_gather, nout=1) (image_vis_lists, subvis, vis_iter)) result = image_results_list else: image_results_list_list = list() for ivis, subvis in enumerate(vis_list): # Create the graph to divide an image into facets. This is by reference. facet_lists = arlexecute.execute(image_scatter_facets, nout=actual_number_facets ** 2)( model_imagelist[ivis], facets=facets) # Create the graph to divide the visibility into slices. This is by copy. sub_vis_lists = arlexecute.execute(visibility_scatter, nout=vis_slices)\ (subvis, vis_iter, vis_slices) facet_vis_lists = list() # Loop over sub visibility for sub_vis_list in sub_vis_lists: facet_vis_results = list() # Loop over facets for facet_list in facet_lists: # Predict visibility for this subvisibility from this facet facet_vis_list = arlexecute.execute(predict_ignore_none, pure=True, nout=1)\ (sub_vis_list, facet_list, None) facet_vis_results.append(facet_vis_list) # Sum the current sub-visibility over all facets facet_vis_lists.append(arlexecute.execute(sum_predict_results)(facet_vis_results)) # Sum all sub-visibilities image_results_list_list.append( arlexecute.execute(visibility_gather, nout=1)(facet_vis_lists, subvis, vis_iter)) result = image_results_list_list return arlexecute.optimize(result)
def ical_list_serial_workflow(vis_list, model_imagelist, context, vis_slices=1, facets=1, gcfcf=None, calibration_context='TG', do_selfcal=True, **kwargs): """Run ICAL pipeline :param vis_list: :param model_imagelist: :param context: imaging context e.g. '2d' :param calibration_context: Sequence of calibration steps e.g. TGB :param do_selfcal: Do the selfcalibration? :param kwargs: Parameters for functions in components :return: """ gt_list = list() if gcfcf is None: gcfcf = [create_pswf_convolutionfunction(model_imagelist[0])] psf_imagelist = invert_list_serial_workflow(vis_list, model_imagelist, dopsf=True, context=context, vis_slices=vis_slices, facets=facets, gcgcf=gcfcf, **kwargs) model_vislist = [copy_visibility(v, zero=True) for v in vis_list] if do_selfcal: cal_vis_list = [copy_visibility(v) for v in vis_list] else: cal_vis_list = vis_list if do_selfcal: # Make the predicted visibilities, selfcalibrate against it correcting the gains, then # form the residual visibility, then make the residual image model_vislist = predict_list_serial_workflow(model_vislist, model_imagelist, context=context, vis_slices=vis_slices, facets=facets, gcgcf=gcfcf, **kwargs) cal_vis_list, gt_list = calibrate_list_serial_workflow(cal_vis_list, model_vislist, calibration_context=calibration_context, **kwargs) residual_vislist = subtract_list_serial_workflow(cal_vis_list, model_vislist) residual_imagelist = invert_list_serial_workflow(residual_vislist, model_imagelist, context=context, dopsf=False, vis_slices=vis_slices, facets=facets, gcgcf=gcfcf, iteration=0, **kwargs) else: # If we are not selfcalibrating it's much easier and we can avoid an unnecessary round of gather/scatter # for visibility partitioning such as timeslices and wstack. residual_imagelist = residual_list_serial_workflow(cal_vis_list, model_imagelist, context=context, vis_slices=vis_slices, facets=facets, gcgcf=gcfcf, **kwargs) deconvolve_model_imagelist, _ = deconvolve_list_serial_workflow(residual_imagelist, psf_imagelist, model_imagelist, prefix='cycle 0', **kwargs) nmajor = get_parameter(kwargs, "nmajor", 5) if nmajor > 1: for cycle in range(nmajor): if do_selfcal: model_vislist = predict_list_serial_workflow(model_vislist, deconvolve_model_imagelist, context=context, vis_slices=vis_slices, facets=facets, gcgcf=gcfcf, **kwargs) cal_vis_list = [copy_visibility(v) for v in vis_list] cal_vis_list, gt_list = calibrate_list_serial_workflow(cal_vis_list, model_vislist, calibration_context=calibration_context, iteration=cycle, **kwargs) residual_vislist = subtract_list_serial_workflow(cal_vis_list, model_vislist) residual_imagelist = invert_list_serial_workflow(residual_vislist, model_imagelist, context=context, vis_slices=vis_slices, facets=facets, gcgcf=gcfcf, **kwargs) else: residual_imagelist = residual_list_serial_workflow(cal_vis_list, deconvolve_model_imagelist, context=context, vis_slices=vis_slices, facets=facets, gcgcf=gcfcf, **kwargs) prefix = "cycle %d" % (cycle + 1) deconvolve_model_imagelist, _ = deconvolve_list_serial_workflow(residual_imagelist, psf_imagelist, deconvolve_model_imagelist, prefix=prefix, **kwargs) residual_imagelist = residual_list_serial_workflow(cal_vis_list, deconvolve_model_imagelist, context=context, vis_slices=vis_slices, facets=facets, gcgcf=gcfcf, **kwargs) restore_imagelist = restore_list_serial_workflow(deconvolve_model_imagelist, psf_imagelist, residual_imagelist) return deconvolve_model_imagelist, residual_imagelist, restore_imagelist, gt_list
def invert_list_arlexecute_workflow(vis_list, template_model_imagelist, context, dopsf=False, normalize=True, facets=1, vis_slices=1, gcfcf=None, **kwargs): """ Sum results from invert, iterating over the scattered image and vis_list :param vis_list: :param template_model_imagelist: Model used to determine image parameters :param dopsf: Make the PSF instead of the dirty image :param facets: Number of facets :param normalize: Normalize by sumwt :param vis_slices: Number of slices :param context: Imaging context :param gcfcg: tuple containing grid correction and convolution function :param kwargs: Parameters for functions in components :return: List of (image, sumwt) tuple """ # Use serial invert for each element of the visibility list. This means that e.g. iteration # through w-planes or timeslices is done sequentially thus not incurring the memory cost # of doing all at once. if get_parameter(kwargs, "use_serial_invert", False): from workflows.serial.imaging.imaging_serial import invert_list_serial_workflow return [arlexecute.execute(invert_list_serial_workflow, nout=1) \ (vis_list=[vis_list[i]], template_model_imagelist=[template_model_imagelist[i]], context=context, dopsf=dopsf, normalize=normalize, vis_slices=vis_slices, facets=facets, gcfcf=gcfcf, **kwargs)[0] for i, _ in enumerate(vis_list)] if not isinstance(template_model_imagelist, collections.Iterable): template_model_imagelist = [template_model_imagelist] c = imaging_context(context) vis_iter = c['vis_iterator'] invert = c['invert'] if facets % 2 == 0 or facets == 1: actual_number_facets = facets else: actual_number_facets = max(1, (facets - 1)) def gather_image_iteration_results(results, template_model): result = create_empty_image_like(template_model) i = 0 sumwt = numpy.zeros([template_model.nchan, template_model.npol]) for dpatch in image_scatter_facets(result, facets=facets): assert i < len(results), "Too few results in gather_image_iteration_results" if results[i] is not None: assert len(results[i]) == 2, results[i] dpatch.data[...] = results[i][0].data[...] sumwt += results[i][1] i += 1 return result, sumwt def invert_ignore_none(vis, model, gg): if vis is not None: return invert(vis, model, context=context, dopsf=dopsf, normalize=normalize, gcfcf=gg, **kwargs) else: return create_empty_image_like(model), numpy.zeros([model.nchan, model.npol]) # If we are doing facets, we need to create the gcf for each image if gcfcf is None and facets == 1: gcfcf = [arlexecute.execute(create_pswf_convolutionfunction)(template_model_imagelist[0])] # Loop over all vis_lists independently results_vislist = list() if facets == 1: for ivis, sub_vis_list in enumerate(vis_list): if len(gcfcf) > 1: g = gcfcf[ivis] else: g = gcfcf[0] # Create the graph to divide the visibility into slices. This is by copy. sub_sub_vis_lists = arlexecute.execute(visibility_scatter, nout=vis_slices)\ (sub_vis_list, vis_iter, vis_slices=vis_slices) # Iterate within each sub_sub_vis_list vis_results = list() for sub_sub_vis_list in sub_sub_vis_lists: vis_results.append(arlexecute.execute(invert_ignore_none, pure=True) (sub_sub_vis_list, template_model_imagelist[ivis], g)) results_vislist.append(sum_invert_results_arlexecute(vis_results)) result = results_vislist else: for ivis, sub_vis_list in enumerate(vis_list): # Create the graph to divide an image into facets. This is by reference. facet_lists = arlexecute.execute(image_scatter_facets, nout=actual_number_facets ** 2)( template_model_imagelist[ ivis], facets=facets) # Create the graph to divide the visibility into slices. This is by copy. sub_sub_vis_lists = arlexecute.execute(visibility_scatter, nout=vis_slices)\ (sub_vis_list, vis_iter, vis_slices=vis_slices) # Iterate within each vis_list vis_results = list() for sub_sub_vis_list in sub_sub_vis_lists: facet_vis_results = list() for facet_list in facet_lists: facet_vis_results.append( arlexecute.execute(invert_ignore_none, pure=True)(sub_sub_vis_list, facet_list, None)) vis_results.append(arlexecute.execute(gather_image_iteration_results, nout=1) (facet_vis_results, template_model_imagelist[ivis])) results_vislist.append(sum_invert_results_arlexecute(vis_results)) result = results_vislist return arlexecute.optimize(result)
def deconvolve_list_arlexecute_workflow(dirty_list, psf_list, model_imagelist, prefix='', mask=None, **kwargs): """Create a graph for deconvolution, adding to the model :param dirty_list: :param psf_list: :param model_imagelist: :param prefix: Informative prefix to log messages :param mask: Mask for deconvolution :param kwargs: Parameters for functions in components :return: graph for the deconvolution """ nchan = len(dirty_list) # Number of moments. 1 is the sum. nmoment = get_parameter(kwargs, "nmoment", 1) if get_parameter(kwargs, "use_serial_clean", False): from workflows.serial.imaging.imaging_serial import deconvolve_list_serial_workflow return arlexecute.execute(deconvolve_list_serial_workflow, nout=nchan) \ (dirty_list, psf_list, model_imagelist, prefix=prefix, mask=mask, **kwargs) def deconvolve(dirty, psf, model, facet, gthreshold, msk=None): if prefix == '': lprefix = "facet %d" % facet else: lprefix = "%s, facet %d" % (prefix, facet) if nmoment > 0: moment0 = calculate_image_frequency_moments(dirty) this_peak = numpy.max(numpy.abs(moment0.data[0, ...])) / dirty.data.shape[0] else: ref_chan = dirty.data.shape[0] // 2 this_peak = numpy.max(numpy.abs(dirty.data[ref_chan, ...])) if this_peak > 1.1 * gthreshold: kwargs['threshold'] = gthreshold result, _ = deconvolve_cube(dirty, psf, prefix=lprefix, mask=msk, **kwargs) if result.data.shape[0] == model.data.shape[0]: result.data += model.data return result else: return copy_image(model) deconvolve_facets = get_parameter(kwargs, 'deconvolve_facets', 1) deconvolve_overlap = get_parameter(kwargs, 'deconvolve_overlap', 0) deconvolve_taper = get_parameter(kwargs, 'deconvolve_taper', None) if deconvolve_facets > 1 and deconvolve_overlap > 0: deconvolve_number_facets = (deconvolve_facets - 2) ** 2 else: deconvolve_number_facets = deconvolve_facets ** 2 deconvolve_model_imagelist = arlexecute.execute(image_gather_channels, nout=1)(model_imagelist) # Scatter the separate channel images into deconvolve facets and then gather channels for each facet. # This avoids constructing the entire spectral cube. dirty_list_trimmed = arlexecute.execute(remove_sumwt, nout=nchan)(dirty_list) scattered_channels_facets_dirty_list = \ [arlexecute.execute(image_scatter_facets, nout=deconvolve_number_facets)(d, facets=deconvolve_facets, overlap=deconvolve_overlap, taper=deconvolve_taper) for d in dirty_list_trimmed] # Now we do a transpose and gather scattered_facets_list = [ arlexecute.execute(image_gather_channels, nout=1)([scattered_channels_facets_dirty_list[chan][facet] for chan in range(nchan)]) for facet in range(deconvolve_number_facets)] psf_list_trimmed = arlexecute.execute(remove_sumwt, nout=nchan)(psf_list) psf_list_trimmed = arlexecute.execute(image_gather_channels, nout=1)(psf_list_trimmed) scattered_model_imagelist = \ arlexecute.execute(image_scatter_facets, nout=deconvolve_number_facets)(deconvolve_model_imagelist, facets=deconvolve_facets, overlap=deconvolve_overlap) # Work out the threshold. Need to find global peak over all dirty_list images threshold = get_parameter(kwargs, "threshold", 0.0) fractional_threshold = get_parameter(kwargs, "fractional_threshold", 0.1) nmoment = get_parameter(kwargs, "nmoment", 1) use_moment0 = nmoment > 0 # Find the global threshold. This uses the peak in the average on the frequency axis since we # want to use it in a stopping criterion in a moment clean global_threshold = arlexecute.execute(threshold_list, nout=1)(scattered_facets_list, threshold, fractional_threshold, use_moment0=use_moment0, prefix=prefix) facet_list = numpy.arange(deconvolve_number_facets).astype('int') if mask is None: scattered_results_list = [ arlexecute.execute(deconvolve, nout=1)(d, psf_list_trimmed, m, facet, global_threshold) for d, m, facet in zip(scattered_facets_list, scattered_model_imagelist, facet_list)] else: mask_list = \ arlexecute.execute(image_scatter_facets, nout=deconvolve_number_facets)(mask, facets=deconvolve_facets, overlap=deconvolve_overlap) scattered_results_list = [ arlexecute.execute(deconvolve, nout=1)(d, psf_list_trimmed, m, facet, global_threshold, msk) for d, m, facet, msk in zip(scattered_facets_list, scattered_model_imagelist, facet_list, mask_list)] # Gather the results back into one image, correcting for overlaps as necessary. The taper function is is used to # feather the facets together gathered_results_list = arlexecute.execute(image_gather_facets, nout=1)(scattered_results_list, deconvolve_model_imagelist, facets=deconvolve_facets, overlap=deconvolve_overlap, taper=deconvolve_taper) result_list = arlexecute.execute(image_scatter_channels, nout=nchan)(gathered_results_list, subimages=nchan) return arlexecute.optimize(result_list)
def deconvolve_list_arlexecute_workflow(dirty_list, psf_list, model_imagelist, prefix='', mask=None, **kwargs): """Create a graph for deconvolution, adding to the model :param dirty_list: :param psf_list: :param model_imagelist: :param prefix: Informative prefix to log messages :param mask: Mask for deconvolution :param kwargs: Parameters for functions in components :return: graph for the deconvolution """ nchan = len(dirty_list) # Number of moments. 1 is the sum. nmoment = get_parameter(kwargs, "nmoment", 1) if get_parameter(kwargs, "use_serial_clean", False): from workflows.serial.imaging.imaging_serial import deconvolve_list_serial_workflow return arlexecute.execute(deconvolve_list_serial_workflow, nout=nchan) \ (dirty_list, psf_list, model_imagelist, prefix=prefix, mask=mask, **kwargs) def deconvolve(dirty, psf, model, facet, gthreshold, msk=None): if prefix == '': lprefix = "facet %d" % facet else: lprefix = "%s, facet %d" % (prefix, facet) if nmoment > 0: moment0 = calculate_image_frequency_moments(dirty) this_peak = numpy.max(numpy.abs(moment0.data[0, ...])) / dirty.data.shape[0] else: ref_chan = dirty.data.shape[0] // 2 this_peak = numpy.max(numpy.abs(dirty.data[ref_chan, ...])) if this_peak > 1.1 * gthreshold: kwargs['threshold'] = gthreshold result, _ = deconvolve_cube(dirty, psf, prefix=lprefix, mask=msk, **kwargs) if result.data.shape[0] == model.data.shape[0]: result.data += model.data return result else: return copy_image(model) deconvolve_facets = get_parameter(kwargs, 'deconvolve_facets', 1) deconvolve_overlap = get_parameter(kwargs, 'deconvolve_overlap', 0) deconvolve_taper = get_parameter(kwargs, 'deconvolve_taper', None) if deconvolve_facets > 1 and deconvolve_overlap > 0: deconvolve_number_facets = (deconvolve_facets - 2) ** 2 else: deconvolve_number_facets = deconvolve_facets ** 2 scattered_channels_facets_model_list = \ [arlexecute.execute(image_scatter_facets, nout=deconvolve_number_facets)(m, facets=deconvolve_facets, overlap=deconvolve_overlap, taper=deconvolve_taper) for m in model_imagelist] scattered_facets_model_list = [ arlexecute.execute(image_gather_channels, nout=1)([scattered_channels_facets_model_list[chan][facet] for chan in range(nchan)]) for facet in range(deconvolve_number_facets)] # Scatter the separate channel images into deconvolve facets and then gather channels for each facet. # This avoids constructing the entire spectral cube. # i.e. SCATTER BY FACET then SCATTER BY CHANNEL dirty_list_trimmed = arlexecute.execute(remove_sumwt, nout=nchan)(dirty_list) scattered_channels_facets_dirty_list = \ [arlexecute.execute(image_scatter_facets, nout=deconvolve_number_facets)(d, facets=deconvolve_facets, overlap=deconvolve_overlap, taper=deconvolve_taper) for d in dirty_list_trimmed] scattered_facets_dirty_list = [ arlexecute.execute(image_gather_channels, nout=1)([scattered_channels_facets_dirty_list[chan][facet] for chan in range(nchan)]) for facet in range(deconvolve_number_facets)] psf_list_trimmed = arlexecute.execute(remove_sumwt, nout=nchan)(psf_list) def extract_psf(psf, facets): spsf = create_empty_image_like(psf) cx = spsf.shape[3] // 2 cy = spsf.shape[2] // 2 wx = spsf.shape[3] // facets wy = spsf.shape[2] // facets xbeg = cx - wx // 2 xend = cx + wx // 2 ybeg = cy - wy // 2 yend = cy + wy // 2 spsf.data = psf.data[..., ybeg:yend, xbeg:xend] spsf.wcs.wcs.crpix[0] -= xbeg spsf.wcs.wcs.crpix[1] -= ybeg return spsf psf_list_trimmed = [arlexecute.execute(extract_psf)(p, deconvolve_facets) for p in psf_list_trimmed] psf_centre = arlexecute.execute(image_gather_channels, nout=1)([psf_list_trimmed[chan] for chan in range(nchan)]) # Work out the threshold. Need to find global peak over all dirty_list images threshold = get_parameter(kwargs, "threshold", 0.0) fractional_threshold = get_parameter(kwargs, "fractional_threshold", 0.1) nmoment = get_parameter(kwargs, "nmoment", 1) use_moment0 = nmoment > 0 # Find the global threshold. This uses the peak in the average on the frequency axis since we # want to use it in a stopping criterion in a moment clean global_threshold = arlexecute.execute(threshold_list, nout=1)(scattered_facets_dirty_list, threshold, fractional_threshold, use_moment0=use_moment0, prefix=prefix) facet_list = numpy.arange(deconvolve_number_facets).astype('int') if mask is None: scattered_results_list = [ arlexecute.execute(deconvolve, nout=1)(d, psf_centre, m, facet, global_threshold) for d, m, facet in zip(scattered_facets_dirty_list, scattered_facets_model_list, facet_list)] else: mask_list = \ arlexecute.execute(image_scatter_facets, nout=deconvolve_number_facets)(mask, facets=deconvolve_facets, overlap=deconvolve_overlap) scattered_results_list = [ arlexecute.execute(deconvolve, nout=1)(d, psf_centre, m, facet, global_threshold, msk) for d, m, facet, msk in zip(scattered_facets_dirty_list, scattered_facets_model_list, facet_list, mask_list)] # We want to avoid constructing the entire cube so we do the inverse of how we got here: # i.e. SCATTER BY CHANNEL then GATHER BY FACET # Gather the results back into one image, correcting for overlaps as necessary. The taper function is is used to # feather the facets together # gathered_results_list = arlexecute.execute(image_gather_facets, nout=1)(scattered_results_list, # deconvolve_model_imagelist, # facets=deconvolve_facets, # overlap=deconvolve_overlap, # taper=deconvolve_taper) # result_list = arlexecute.execute(image_scatter_channels, nout=nchan)(gathered_results_list, subimages=nchan) scattered_channel_results_list = [arlexecute.execute(image_scatter_channels, nout=nchan)(scat, subimages=nchan) for scat in scattered_results_list] # The structure is now [[channels] for facets]. We do the reverse transpose to the one above. result_list = [arlexecute.execute(image_gather_facets, nout=1)([scattered_channel_results_list[facet][chan] for facet in range(deconvolve_number_facets)], model_imagelist[chan], facets=deconvolve_facets, overlap=deconvolve_overlap) for chan in range(nchan)] return arlexecute.optimize(result_list)
def invert_list_mpi_workflow(vis_list, template_model_imagelist, context, dopsf=False, normalize=True, facets=1, vis_slices=1, gcfcf=None, comm=MPI.COMM_WORLD, **kwargs): """ Sum results from invert, iterating over the scattered image and vis_list :param vis_list: Only full for rank==0 :param template_model_imagelist: Model used to determine image parameters (in rank=0) :param dopsf: Make the PSF instead of the dirty image :param facets: Number of facets :param normalize: Normalize by sumwt :param vis_slices: Number of slices :param context: Imaging context :param gcfcg: tuple containing grid correction and convolution function (in rank=0) :param comm:MPI Communicator :param kwargs: Parameters for functions in components :return: List of (image, sumwt) tuple """ # NOTE: Be careful with normalization as normalizing parts is not the # same as normalizing the whole, normalization happens for each image in a # frequency window (in this versio we only parallelize at freqwindows def concat_tuples(list_of_tuples): if len(list_of_tuples) < 2: result_list = list_of_tuples else: result_list = list_of_tuples[0] for l in list_of_tuples[1:]: result_list += l return result_list rank = comm.Get_rank() size = comm.Get_size() log.info( '%d: In invert_list_mpi_workflow: %d elements in vis_list %d in model' % (rank, len(vis_list), len(template_model_imagelist))) assert get_parameter(kwargs, "use_serial_invert", True), "Only freq paralellization implemented" #if get_parameter(kwargs, "use_serial_invert", False): if get_parameter(kwargs, "use_serial_invert", True): from workflows.serial.imaging.imaging_serial import invert_list_serial_workflow results_vislist = list() # Distribute visibilities and model by freq sub_vis_list = numpy.array_split(vis_list, size) sub_vis_list = comm.scatter(sub_vis_list, root=0) sub_template_model_imagelist = numpy.array_split( template_model_imagelist, size) sub_template_model_imagelist = comm.scatter( sub_template_model_imagelist, root=0) if gcfcf is not None: sub_gcfcf = numpy.array_split(gcfcf, size) sub_gcfcf = comm.scatter(sub_gcfcf, root=0) isinstance(sub_vis_list[0], Visibility) sub_results_vislist = [ invert_list_serial_workflow( vis_list=[sub_vis_list[i]], template_model_imagelist=[sub_template_model_imagelist[i]], context=context, dopsf=dopsf, normalize=normalize, vis_slices=vis_slices, facets=facets, gcfcf=gcfcf, **kwargs)[0] for i, _ in enumerate(sub_vis_list) ] #print("%d sub_results_vislist" %rank,sub_results_vislist) results_vislist = comm.gather(sub_results_vislist, root=0) #print("%d results_vislist before concatenate"%rank,results_vislist) if rank == 0: #image_results_list_list=[x for x in image_results_list_list if x] #results_vislist=numpy.concatenate(results_vislist) # TODO: concatenate dos not concatenate well a list of tuples # it returns a 2d array instead of a concatenated list of tuples results_vislist = concat_tuples(results_vislist) else: results_vislist = list() #print("%d results_vislist"%rank,results_vislist) return results_vislist
def create_named_configuration(name: str = 'LOWBD2', **kwargs) -> Configuration: """ Standard configurations e.g. LOWBD2, MIDBD2 :param name: name of Configuration LOWBD2, LOWBD1, LOFAR, VLAA, ASKAP :param rmax: Maximum distance of station from the average (m) :return: For LOWBD2, setting rmax gives the following number of stations 100.0 13 300.0 94 1000.0 251 3000.0 314 10000.0 398 30000.0 476 100000.0 512 """ if name == 'LOWBD2': location = EarthLocation(lon="116.76444824", lat="-26.824722084", height=300.0) log.info("create_named_configuration: %s\n\t%s\n\t%s" % (name, location.geocentric, location.geodetic)) fc = create_configuration_from_file( antfile=arl_path("data/configurations/LOWBD2.csv"), location=location, mount='xy', names='LOWBD2_%d', diameter=35.0, name=name, **kwargs) elif name == 'LOWBD1': location = EarthLocation(lon="116.76444824", lat="-26.824722084", height=300.0) log.info("create_named_configuration: %s\n\t%s\n\t%s" % (name, location.geocentric, location.geodetic)) fc = create_configuration_from_file( antfile=arl_path("data/configurations/LOWBD1.csv"), location=location, mount='xy', names='LOWBD1_%d', diameter=35.0, name=name, **kwargs) elif name == 'LOWBD2-CORE': location = EarthLocation(lon="116.76444824", lat="-26.824722084", height=300.0) log.info("create_named_configuration: %s\n\t%s\n\t%s" % (name, location.geocentric, location.geodetic)) fc = create_configuration_from_file( antfile=arl_path("data/configurations/LOWBD2-CORE.csv"), location=location, mount='xy', names='LOWBD2_%d', diameter=35.0, name=name, **kwargs) elif (name == 'LOW') or (name == 'LOWR3'): location = EarthLocation(lon="116.76444824", lat="-26.824722084", height=300.0) log.info("create_named_configuration: %s\n\t%s\n\t%s" % (name, location.geocentric, location.geodetic)) fc = create_configuration_from_MIDfile( antfile=arl_path("data/configurations/ska1low_local.cfg"), mount='xy', name=name, location=location, **kwargs) elif (name == 'MID') or (name == "MIDR5"): location = EarthLocation(lon="21.443803", lat="-30.712925", height=0.0) log.info("create_named_configuration: %s\n\t%s\n\t%s" % (name, location.geocentric, location.geodetic)) fc = create_configuration_from_MIDfile( antfile=arl_path("data/configurations/ska1mid_local.cfg"), mount='azel', name=name, location=location, **kwargs) elif name == 'ASKAP': location = EarthLocation(lon="+116.6356824", lat="-26.7013006", height=377.0) log.info("create_named_configuration: %s\n\t%s\n\t%s" % (name, location.geocentric, location.geodetic)) fc = create_configuration_from_file( antfile=arl_path("data/configurations/A27CR3P6B.in.csv"), mount='equatorial', names='ASKAP_%d', diameter=12.0, name=name, location=location, **kwargs) elif name == 'LOFAR': location = EarthLocation(x=[3826923.9] * u.m, y=[460915.1] * u.m, z=[5064643.2] * u.m) log.info("create_named_configuration: %s\n\t%s\n\t%s" % (name, location.geocentric, location.geodetic)) assert get_parameter(kwargs, "meta", False) is False fc = create_LOFAR_configuration( antfile=arl_path("data/configurations/LOFAR.csv"), location=location) elif name == 'VLAA': location = EarthLocation(lon="-107.6184", lat="34.0784", height=2124.0) log.info("create_named_configuration: %s\n\t%s\n\t%s" % (name, location.geocentric, location.geodetic)) fc = create_configuration_from_file( antfile=arl_path("data/configurations/VLA_A_hor_xyz.csv"), location=location, mount='azel', names='VLA_%d', diameter=25.0, name=name, **kwargs) elif name == 'VLAA_north': location = EarthLocation(lon="-107.6184", lat="90.000", height=0.0) log.info("create_named_configuration: %s\n\t%s\n\t%s" % (name, location.geocentric, location.geodetic)) fc = create_configuration_from_file( antfile=arl_path("data/configurations/VLA_A_hor_xyz.csv"), location=location, mount='azel', names='VLA_%d', diameter=25.0, name=name, **kwargs) else: raise ValueError("No such Configuration %s" % name) return fc