def vis_wstack_iter(vis, **kwargs): """ W slice iterator :param wstack: wstack (wavelengths) :param vis_slices: Number of slices (second in precedence to wstack) :returns: Boolean array with selected rows=True """ assert type(vis) == Visibility or type(vis) == BlockVisibility wmaxabs = (numpy.max(numpy.abs(vis.w))) wstack = get_parameter(kwargs, "wstack", None) if wstack is None: vis_slices = get_parameter(kwargs, "vis_slices", 1) boxes = numpy.linspace(-wmaxabs, +wmaxabs, vis_slices) wstack = 2 * wmaxabs / vis_slices else: vis_slices = 1 + 2 * numpy.round(wmaxabs / wstack).astype('int') boxes = numpy.linspace(-wmaxabs, +wmaxabs, vis_slices) for box in boxes: rows = numpy.abs(vis.w - box) < 0.5 * wstack if numpy.sum(rows) > 0: yield rows else: yield None
def vis_timeslice_iter(vis: Visibility, **kwargs) -> numpy.ndarray: """ W slice iterator :param wstack: wstack (wavelengths) :param vis_slices: Number of slices (second in precedence to wstack) :return: Boolean array with selected rows=True """ assert isinstance(vis, Visibility) or isinstance(vis, BlockVisibility), vis timemin = numpy.min(vis.time) timemax = numpy.max(vis.time) timeslice = get_parameter(kwargs, "timeslice", 'auto') if timeslice == 'auto': boxes = numpy.unique(vis.time) timeslice = 0.1 elif timeslice is None: timeslice = timemax - timemin boxes = [0.5 * (timemax + timemin)] elif isinstance(timeslice, float) or isinstance(timeslice, int): boxes = numpy.arange(timemin, timemax, timeslice) else: vis_slices = get_parameter(kwargs, "vis_slices", None) assert vis_slices is not None, "Time slicing not specified: set either timeslice or vis_slices" boxes = numpy.linspace(timemin, timemax, vis_slices) if vis_slices > 1: timeslice = boxes[1] - boxes[0] else: timeslice = timemax - timemin for box in boxes: rows = numpy.abs(vis.time - box) <= 0.5 * timeslice yield rows
def add_noise_to_visibility(vis, polarisation='stokesI', **kwargs): """ Add noise to visibility from the 'standard normal' distribution, but optionally the distribution mu and sigma can be changed The function should be modified for different polarization setupd (e.g stokesIQ) :param vis: :param polarisation: the polarizations which the noise need to be added (currently works only for stokes polarization frame) :return: """ sigma = get_parameter(kwargs, "sigma", None);#tuple of the sigma of the real and the imaginary part if sigma is None: sigma = (1,1); mu = get_parameter(kwargs, "mu", None);#tuple of the mu of the real and the imaginary part if mu is None: mu = (0,0); log.info("add_noise_to_visibility: Add noise to visibilities using the normal distribution (with mu=%.2f, %.2f sigma=%.2f, %.2f to Re and Im respectively)" %(mu[0],mu[1],sigma[0],sigma[1])); num_of_visibilities = vis.data['vis'].shape[0]; if polarisation == 'stokesI': ### no need to check polarisation frame as it is working with stokesI and stokesIQUV as well ### #assert vis.polarisation_frame == PolarisationFrame("stokesI"); vis.data['vis'][:,0] += numpy.vectorize(complex)(sigma[0] * numpy.random.randn(num_of_visibilities) + mu[0] ,sigma[1] * numpy.random.randn(num_of_visibilities) + mu[1]); if polarisation == 'stokesIQUV': assert vis.polarisation_frame == PolarisationFrame("stokesIQUV"); for i in range(0,4): vis.data['vis'][:,i] += numpy.vectorize(complex)(sigma[0] * numpy.random.randn(num_of_visibilities) + mu[0] ,sigma[1] * numpy.random.randn(num_of_visibilities) + mu[1]); return vis;
def vis_timeslice_iter(vis: Visibility, **kwargs) -> numpy.ndarray: """ W slice iterator :param wstack: wstack (wavelengths) :param vis_slices: Number of slices (second in precedence to wstack) :return: Boolean array with selected rows=True """ assert isinstance(vis, Visibility) or isinstance(vis, BlockVisibility) timemin = numpy.min(vis.time) timemax = numpy.max(vis.time) timeslice = get_parameter(kwargs, "timeslice", None) if timeslice is None or timeslice == 'auto': vis_slices = get_parameter(kwargs, "vis_slices", None) if vis_slices is None or vis_slices == 'auto': vis_slices = len(numpy.unique(vis.time)) boxes = numpy.linspace(timemin, timemax, vis_slices) timeslice = (timemax - timemin) / vis_slices else: vis_slices = 1 + 2 * numpy.ceil( (timemax - timemin) / timeslice).astype('int') boxes = numpy.linspace(timemin, timemax, vis_slices) if vis_slices > 1: timeslice = boxes[1] - boxes[0] else: timeslice = timemax - timemin for box in boxes: rows = numpy.abs(vis.time - box) <= 0.5 * timeslice yield rows
def vis_wstack_iter(vis: Visibility, **kwargs) -> numpy.ndarray: """ W slice iterator :param wstack: wstack (wavelengths) :param vis_slices: Number of slices (second in precedence to wstack) :return: Boolean array with selected rows=True """ assert isinstance(vis, Visibility) or isinstance(vis, BlockVisibility) wmaxabs = numpy.max(numpy.abs(vis.w)) wstack = get_parameter(kwargs, "wstack", None) if wstack is None: vis_slices = get_parameter(kwargs, "vis_slices", 1) boxes = numpy.linspace(-wmaxabs, wmaxabs, vis_slices) if vis_slices > 1: wstack = boxes[1] - boxes[0] else: wstack = 2 * wmaxabs else: vis_slices = 1 + 2 * numpy.round(wmaxabs / wstack).astype('int') boxes = numpy.linspace(-wmaxabs, +wmaxabs, vis_slices) if vis_slices > 1: wstack = boxes[1] - boxes[0] else: wstack = 2 * wmaxabs for box in boxes: rows = numpy.abs(vis.w - box) < 0.5 * wstack yield rows
def create_predict_gleam_model_graph(vis_graph_list, frequency, channel_bandwidth, npixel=512, cellsize=0.001, **kwargs): """ Create a graph to fill in a model with the gleam sources and predict into a vis_graph_list :param vis_graph_list: :param frequency: :param channel_bandwidth: :param npixel: 512 :param cellsize: 0.001 :param kwargs: :return: vis_graph_list """ # Note that each vis_graph has it's own model_graph predicted_vis_graph_list = list() for i, vis_graph in enumerate(vis_graph_list): facets = {} if get_parameter(kwargs, "facets", False): facets = {'facets': get_parameter(kwargs, "facets", False)} model_graph = delayed(create_low_test_image_from_gleam)( vis_graph, frequency, channel_bandwidth, npixel=npixel, cellsize=cellsize, **facets) predicted_vis_graph_list.append( create_predict_graph([vis_graph], model_graph, **kwargs)[0]) return predicted_vis_graph_list
def weight_visibility(vis: Visibility, im: Image, **kwargs) -> Visibility: """ Reweight the visibility data using a selected algorithm Imaging uses the column "imaging_weight" when imaging. This function sets that column using a variety of algorithms Options are: - Natural: by visibility weight (optimum for noise in final image) - Uniform: weight of sample divided by sum of weights in cell (optimum for sidelobes) - Super-uniform: As uniform, by sum of weights is over extended box region - Briggs: Compromise between natural and uniform - Super-briggs: As Briggs, by sum of weights is over extended box region :param vis: :param im: :return: visibility with imaging_weights column added and filled """ assert isinstance(vis, Visibility), "vis is not a Visibility: %r" % vis assert get_parameter(kwargs, "padding", False) is False spectral_mode, vfrequencymap = get_frequency_map(vis, im) polarisation_mode, vpolarisationmap = get_polarisation_map(vis, im) uvw_mode, shape, padding, vuvwmap = get_uvw_map(vis, im) density = None densitygrid = None weighting = get_parameter(kwargs, "weighting", "uniform") vis.data['imaging_weight'], density, densitygrid = weight_gridding( im.data.shape, vis.data['weight'], vuvwmap, vfrequencymap, vpolarisationmap, weighting) return vis, density, densitygrid
def gaintable_timeslice_iter(gt: GainTable, **kwargs) -> numpy.ndarray: """ W slice iterator :param wstack: wstack (wavelengths) :param gt_slices: Number of slices (second in precedence to wstack) :return: Boolean array with selected rows=True """ assert isinstance(gt, GainTable) timemin = numpy.min(gt.time) timemax = numpy.max(gt.time) timeslice = get_parameter(kwargs, "timeslice", 'auto') if timeslice == 'auto': boxes = numpy.unique(gt.time) timeslice = 0.1 elif timeslice is None: timeslice = timemax - timemin boxes = [0.5 * (timemax + timemin)] elif isinstance(timeslice, float) or isinstance(timeslice, int): boxes = numpy.arange(timemin, timemax, timeslice) else: gt_slices = get_parameter(kwargs, "gaintable_slices", None) assert gt_slices is not None, "Time slicing not specified: set either timeslice or gt_slices" boxes = numpy.linspace(timemin, timemax, gt_slices) if gt_slices > 1: timeslice = boxes[1] - boxes[0] else: timeslice = timemax - timemin for box in boxes: rows = numpy.abs(gt.time - box) <= 0.5 * timeslice yield rows
def invert_2d(vis: Visibility, im: Image, dopsf=False, normalize=True, **kwargs) -> (Image, numpy.ndarray): """ Invert using prolate spheroidal gridding function Use the image im as a template. Do PSF in a separate call. Note that the image is not normalised but the sum of the weights. This is for ease of use in partitioning. :param vis: Visibility to be inverted :param im: image template (not changed) :param dopsf: Make the psf instead of the dirty image :param normalize: Normalize by the sum of weights (True) :return: resulting image[nchan, npol, ny, nx], sum of weights[nchan, npol] """ log.debug("invert_2d: inverting using 2d transform") kwargs['kernel'] = get_parameter(kwargs, "kernel", '2d') timing = get_parameter(kwargs, "timing", False) if timing: return invert_2d_base_timing(vis, im, dopsf, normalize=normalize, **kwargs) return invert_2d_base(vis, im, dopsf, normalize=normalize, **kwargs)
def coalesce_visibility(vis: BlockVisibility, **kwargs) -> Visibility: """ Coalesce the BlockVisibility data. The output format is a Visibility, as needed for imaging Coalesce by baseline-dependent averaging (optional). The number of integrations averaged goes as the ratio of the maximum possible baseline length to that for this baseline. This number can be scaled by coalescence_factor and limited by max_coalescence. When faceting, the coalescence factors should be roughly the same as the number of facets on one axis. If coalescence_factor=0.0 then just a format conversion is done :param vis: BlockVisibility to be coalesced :return: Coalesced visibility with cindex and blockvis filled in """ # 将blockvisibility合并为一个visibility assert type( vis) is BlockVisibility, "vis is not a BlockVisibility: %r" % vis time_coal = get_parameter(kwargs, 'time_coal', 0.0) max_time_coal = get_parameter(kwargs, 'max_time_coal', 100) frequency_coal = get_parameter(kwargs, 'frequency_coal', 0.0) max_frequency_coal = get_parameter(kwargs, 'max_frequency_coal', 100) if time_coal == 0.0 and frequency_coal == 0.0: # 若time和frequency的合并率均为0,则只做一个简单的blockvisibility到visibility的变换 return convert_blockvisibility_to_visibility((vis)) # 否则使用average_in_blocks做一系列较为复杂的合并变换 cvis, cuvw, cwts, ctime, cfrequency, cchannel_bandwidth, ca1, ca2, cintegration_time, cindex \ = average_in_blocks(vis.data['vis'], vis.data['uvw'], vis.data['weight'], vis.time, vis.integration_time, vis.frequency, vis.channel_bandwidth, time_coal, max_time_coal, frequency_coal, max_frequency_coal) cimwt = numpy.ones(cvis.shape) coalesced_vis = Visibility(uvw=cuvw, time=ctime, frequency=cfrequency, channel_bandwidth=cchannel_bandwidth, phasecentre=vis.phasecentre, antenna1=ca1, antenna2=ca2, vis=cvis, weight=cwts, imaging_weight=cimwt, configuration=vis.configuration, integration_time=cintegration_time, polarisation_frame=vis.polarisation_frame, cindex=cindex, blockvis=vis) log.debug( 'coalesce_visibility: Created new Visibility for coalesced data, coalescence factors (t,f) = (%.3f,%.3f)' % (time_coal, frequency_coal)) log.debug('coalesce_visibility: Maximum coalescence (t,f) = (%d, %d)' % (max_time_coal, max_frequency_coal)) log.debug('coalesce_visibility: Original %s, coalesced %s' % (vis_summary(vis), vis_summary(coalesced_vis))) return coalesced_vis
def solve_image(vis: Visibility, model: Image, components=None, predict=predict_2d, invert=invert_2d, **kwargs) -> (Visibility, Image, Image): """Solve for image using deconvolve_cube and specified predict, invert This is the same as a majorcycle/minorcycle algorithm. The components are removed prior to deconvolution. See also arguments for predict, invert, deconvolve_cube functions.2d :param vis: :param model: Model image :param predict: Predict function e.g. predict_2d, predict_wstack :param invert: Invert function e.g. invert_2d, invert_wstack :return: Visibility, model """ nmajor = get_parameter(kwargs, 'nmajor', 5) log.info("solve_image: Performing %d major cycles" % nmajor) # The model is added to each major cycle and then the visibilities are # calculated from the full model vispred = copy_visibility(vis) visres = copy_visibility(vis) vispred = predict(vispred, model, **kwargs) if components is not None: vispred = predict_skycomponent_visibility(vispred, components) visres.data['vis'] = vis.data['vis'] - vispred.data['vis'] dirty, sumwt = invert(visres, model, **kwargs) psf, sumwt = invert(visres, model, dopsf=True, **kwargs) thresh = get_parameter(kwargs, "threshold", 0.0) for i in range(nmajor): log.info("solve_image: Start of major cycle %d" % i) cc, res = deconvolve_cube(dirty, psf, **kwargs) res = None model.data += cc.data vispred = predict(vispred, model, **kwargs) visres.data['vis'] = vis.data['vis'] - vispred.data['vis'] dirty, sumwt = invert(visres, model, **kwargs) if numpy.abs(dirty.data).max() < 1.1 * thresh: log.info("Reached stopping threshold %.6f Jy" % thresh) break log.info("solve_image: End of major cycle") log.info("solve_image: End of major cycles") return visres, model, dirty
def vis_timeslice_iter(vis, **kwargs): """ Time slice iterator If timeslice='auto' then timeslice is taken to be the difference between the first two unique elements of the vis time. :param timeslice: Timeslice (seconds) ('auto') :returns: Boolean array with selected rows=True """ assert type(vis) == Visibility or type(vis) == BlockVisibility uniquetimes = numpy.unique(vis.time) timeslice = get_parameter(kwargs, "timeslice", 'auto') if timeslice == 'auto': log.debug('vis_timeslice_iter: Found %d unique times' % len(uniquetimes)) if len(uniquetimes) > 1: timeslice = (uniquetimes[1] - uniquetimes[0]) log.debug( 'vis_timeslice_auto: Guessing time interval to be %.2f s' % timeslice) else: # Doesn't matter what we set it to. timeslice = vis.integration_time[0] boxes = timeslice * numpy.round(uniquetimes / timeslice).astype('int') for box in boxes: rows = numpy.abs(vis.time - box) < 0.5 * timeslice yield rows
def qa_image(im, mask=None, **kwargs) -> QA: """Assess the quality of an image :param params: :param im: :return: QA """ assert type(im) == Image if mask is None: data = {'shape': str(im.data.shape), 'max': numpy.max(im.data), 'min': numpy.min(im.data), 'rms': numpy.std(im.data), 'sum': numpy.sum(im.data), 'medianabs': numpy.median(numpy.abs(im.data)), 'median': numpy.median(im.data)} else: mdata = im.data[mask.data > 0.0] data = {'shape': str(im.data.shape), 'max': numpy.max(mdata), 'min': numpy.min(mdata), 'rms': numpy.std(mdata), 'sum': numpy.sum(mdata), 'medianabs': numpy.median(numpy.abs(mdata)), 'median': numpy.median(mdata)} qa = QA(origin="qa_image", data=data, context=get_parameter(kwargs, 'context', "")) return qa
def invert_2d_from_grid(uv_grid_vis, sumwt, im: Image, normalize: bool = True, **kwargs): """ Perform the 2d fourier transform on the gridded uv visibility :param uv_grid_vis: the gridded uv visibility :param sumwt: the weight for the uv visibility :param im: image template (not changed) :param return: resulting image[nchan, npol, ny, nx], sum of weights[nchan, npol] """ ### Calculate gcf -> grid correction function ### # 2D Prolate spheroidal angular function is separable npixel = get_parameter(kwargs, "npixel", 512) nx = npixel ny = npixel nu = numpy.abs(2.0 * (numpy.arange(nx) - nx // 2) / nx) gcf1d, _ = grdsf(nu) gcf = numpy.outer(gcf1d, gcf1d) gcf[gcf > 0.0] = gcf.max() / gcf[gcf > 0.0] result = numpy.real(ifft(uv_grid_vis)) * gcf #Create image array resultimage = create_image_from_array(result, im.wcs) if normalize: resultimage = normalize_sumwt(resultimage, sumwt) return resultimage, sumwt
def do_2d_grid_only(vis: Visibility, im: Image, dopsf=False, normalize=True, only_gridding: bool = True, **kwargs): """ Do the gridding using prolate spheroidal gridding function Use the image im as a template. Do PSF in a separate call. Note that the image is not normalised but the sum of the weights. This is for ease of use in partitioning. :param vis: Visibility to be inverted :param im: image template (not changed) :param dopsf: Make the psf instead of the dirty image :param normalize: Normalize by the sum of weights (True) :param return: grid """ log.debug("do_2d_grid_only: create grid only") kwargs['kernel'] = get_parameter(kwargs, "kernel", '2d') return invert_2d_base(vis, im, dopsf, normalize=normalize, only_gridding=only_gridding, **kwargs)
def calibrate_visibility(vt: Visibility, model: Image = None, components=None, predict=predict_2d, **kwargs) -> Visibility: """ calibrate Visibility with respect to model and optionally components :param vt: Visibility :param model: Model image :param components: Sky components :return: Calibrated visibility """ assert model is not None or components is not None, "calibration requires a model or skycomponents" vtpred = copy_visibility(vt, zero=True) if model is not None: vtpred = predict(vtpred, model, **kwargs) if components is not None: vtpred = predict_skycomponent_visibility(vtpred, components) else: vtpred = predict_skycomponent_visibility(vtpred, components) bvt = decoalesce_visibility(vt) bvtpred = decoalesce_visibility(vtpred) gt = solve_gaintable(bvt, bvtpred, **kwargs) bvt = apply_gaintable(bvt, gt, inverse=get_parameter(kwargs, "inverse", False)) return convert_blockvisibility_to_visibility(bvt)
def image_raster_iter(im: Image, **kwargs): """Create an image_raster_iter generator, returning images, optionally with overlaps The WCS is adjusted appropriately for each raster element. Hence this is a coordinate-aware way to iterate through an image. Provided we don't break reference semantics, memory should be conserved To update the image in place: for r in raster(im, facets=2):: r.data[...] = numpy.sqrt(r.data[...]) :param im: Image :param facets: Number of image partitions on each axis (2) :param overlap: overlap in pixels :param kwargs: throw away unwanted parameters """ nchan, npol, ny, nx = im.shape facets = get_parameter(kwargs, "facets", 1) overlap = get_parameter(kwargs, 'overlap', 0) log.debug("raster_overlap: predicting using %d x %d image partitions" % (facets, facets)) assert facets <= ny, "Cannot have more raster elements than pixels" assert facets <= nx, "Cannot have more raster elements than pixels" sx = int((nx // facets)) sy = int((ny // facets)) dx = int((nx // facets) + 2 * overlap) dy = int((ny // facets) + 2 * overlap) log.debug('raster_overlap: spacing of raster (%d, %d)' % (dx, dy)) for fy in range(facets): y = ny // 2 + sy * (fy - facets // 2) - overlap for fx in range(facets): x = nx // 2 + sx * (fx - facets // 2) - overlap if (x >= 0) and (x + dx) <= nx and (y >= 0) and (y + dy) <= ny: log.debug('raster_overlap: partition (%d, %d) of (%d, %d)' % (fy, fx, facets, facets)) # Adjust WCS wcs = im.wcs.deepcopy() wcs.wcs.crpix[0] -= x wcs.wcs.crpix[1] -= y # yield image from slice (reference!) yield create_image_from_array(im.data[..., y:y + dy, x:x + dx], wcs, im.polarisation_frame)
def predict_2d_base(vis: Union[BlockVisibility, Visibility], model: Image, **kwargs) -> Union[BlockVisibility, Visibility]: """ Predict using convolutional degridding. This is at the bottom of the layering i.e. all transforms are eventually expressed in terms of this function. Any shifting needed is performed here. :param vis: Visibility to be predicted :param model: model image :return: resulting visibility (in place works) """ if isinstance(vis, BlockVisibility): log.debug("imaging.predict: coalescing prior to prediction") avis = coalesce_visibility(vis, **kwargs) else: avis = vis assert isinstance(avis, Visibility), avis _, _, ny, nx = model.data.shape padding = {} if get_parameter(kwargs, "padding", False): padding = {'padding': get_parameter(kwargs, "padding", False)} spectral_mode, vfrequencymap = get_frequency_map(avis, model) polarisation_mode, vpolarisationmap = get_polarisation_map(avis, model) uvw_mode, shape, padding, vuvwmap = get_uvw_map(avis, model, **padding) kernel_name, gcf, vkernellist = get_kernel_list(avis, model, **kwargs) uvgrid = fft((pad_mid(model.data, int(round(padding * nx))) * gcf).astype(dtype=complex)) avis.data['vis'] = convolutional_degrid(vkernellist, avis.data['vis'].shape, uvgrid, vuvwmap, vfrequencymap, vpolarisationmap) # Now we can shift the visibility from the image frame to the original visibility frame svis = shift_vis_to_image(avis, model, tangent=True, inverse=True) if isinstance(vis, BlockVisibility) and isinstance(svis, Visibility): log.debug("imaging.predict decoalescing post prediction") return decoalesce_visibility(svis) else: return svis
def vis_slice_iter(vis, **kwargs): """ Iterates in slices :param step: Size of step to be iterated over (in rows) :param vis_slices: Number of slices (second in precedence to step) :returns: Boolean array with selected rows=True """ assert type(vis) == Visibility or type(vis) == BlockVisibility step = get_parameter(kwargs, "step", None) if step is None: vis_slices = get_parameter(kwargs, "vis_slices", 1) step = 1 + vis.nvis // vis_slices assert step > 0 for row in range(0, vis.nvis, step): yield range(row, min(row + step, vis.nvis))
def predict_2d(vis: Visibility, im: Image, **kwargs) -> Visibility: """ Predict using convolutional degridding and w projection :param vis: Visibility to be predicted :param model: model image :return: resulting visibility (in place works) """ log.debug("predict_2d: predict using 2d transform") timing = get_parameter(kwargs, "timing", False) if timing: return predict_2d_base_timing(vis, im, **kwargs) return predict_2d_base(vis, im, **kwargs)
def t1(**kwargs): assert get_parameter(kwargs, 'cellsize') == 0.1 assert get_parameter(kwargs, 'spectral_mode', 'channels') == 'mfs' assert get_parameter(kwargs, 'null_mode', 'mfs') == 'mfs' assert get_parameter(kwargs, 'foo', 'bar') == 'bar' assert get_parameter(kwargs, 'foo') is None assert get_parameter(None, 'foo', 'bar') == 'bar'
def create_deconvolve_graph(dirty_graph: delayed, psf_graph: delayed, model_graph: delayed, **kwargs) -> delayed: """Create a graph for deconvolution, adding to the model :param dirty_graph: :param psf_graph: :param model_graph: :param nchan: Number of channels :param kwargs: Parameters for functions in graphs :return: """ nchan = get_parameter(kwargs, "nchan", 1) def remove_sumwt(dirty_list): return [d[0] for d in dirty_list] def make_cube_and_deconvolve(dirty, psf, model): dirty_cube = image_gather_channels(remove_sumwt(dirty), subimages=nchan) psf_cube = image_gather_channels(remove_sumwt(psf), subimages=nchan) model_cube = image_gather_channels(model, subimages=nchan) result = deconvolve_cube(dirty_cube, psf_cube, **kwargs) result[0].data += model_cube.data return image_scatter_channels(result[0], nchan) def deconvolve(dirty, psf, model): result = deconvolve_cube(dirty[0], psf[0], **kwargs) result[0].data += model.data return result[0] algorithm = get_parameter(kwargs, "algorithm", 'mmclean') if algorithm == "mmclean" and nchan > 1: return delayed(make_cube_and_deconvolve, nout=nchan)(dirty_graph, psf_graph, model_graph) else: return [ delayed(deconvolve, pure=True, nout=nchan)(dirty_graph[i], psf_graph[i], model_graph[i]) for i, _ in enumerate(dirty_graph) ]
def vis_slice_iter(vis: Union[Visibility, BlockVisibility], **kwargs) -> numpy.ndarray: """ Iterates in slices :param step: Size of step to be iterated over (in rows) :param vis_slices: Number of slices (second in precedence to step) :return: Boolean array with selected rows=True """ assert isinstance(vis, Visibility) or isinstance(vis, BlockVisibility) step = get_parameter(kwargs, "step", None) if step is None: vis_slices = get_parameter(kwargs, "vis_slices", 1) if isinstance(vis_slices, int): step = vis.nvis // vis_slices else: step = vis.nvis assert step > 0 for row in range(0, vis.nvis, step): yield range(row, min(row + step, vis.nvis))
def vis_slice_iter(vis: Union[Visibility, BlockVisibility], **kwargs) -> numpy.ndarray: """ Iterates in slices :param step: Size of step to be iterated over (in rows) :param vis_slices: Number of slices (second in precedence to step) :return: Boolean array with selected rows=True """ assert isinstance(vis, Visibility) or isinstance(vis, BlockVisibility), vis step = get_parameter(kwargs, "step", None) if step is None: vis_slices = get_parameter(kwargs, "vis_slices", None) assert vis_slices is not None, "vis slicing not specified: set either step or vis_slices" step = vis.nvis // vis_slices assert step > 0 for row in range(0, vis.nvis, step): rows = vis.nvis * [False] for r in range(row, min(row + step, vis.nvis)): rows[r] = True yield rows
def vis_timeslice_iter(vis: Visibility, **kwargs) -> numpy.ndarray: """ W slice iterator :param timeslice: timeslice (arcseconds ???) :param vis_slices: Number of slices :return: Boolean array with selected rows=True """ assert isinstance(vis, Visibility) or isinstance(vis, BlockVisibility) timemin = numpy.min(vis.time) timemax = numpy.max(vis.time) timeslice = get_parameter(kwargs, "timeslice", None) if timeslice is None or timeslice == 'auto': vis_slices = get_parameter(kwargs, "vis_slices", None) if vis_slices is None or vis_slices == 'auto': vis_slices = len(numpy.unique(vis.time)) ### Change code to equaly slice the data ### #boxes = numpy.linspace(timemin, timemax, vis_slices) half_box_width = (timemax - timemin) / (2 * vis_slices) boxes = numpy.linspace(timemin + half_box_width, timemax - half_box_width, vis_slices) #Equal slicing as now linspace distribute points to the middle of boxes timeslice = (timemax - timemin) / vis_slices else: vis_slices = 1 + 2 * numpy.ceil( (timemax - timemin) / timeslice).astype('int') boxes = numpy.linspace(timemin, timemax, vis_slices) if vis_slices > 1: timeslice = boxes[1] - boxes[0] else: timeslice = timemax - timemin for box in boxes: rows = numpy.abs(vis.time - box) <= 0.5 * timeslice yield rows
def get_kernel_list(vis: Visibility, im: Image, **kwargs): """Get the list of kernels, one per visibility """ shape = im.data.shape npixel = shape[3] cellsize = numpy.pi * im.wcs.wcs.cdelt[1] / 180.0 kernelname = get_parameter(kwargs, "kernel", "2d") oversampling = get_parameter(kwargs, "oversampling", 8) padding = get_parameter(kwargs, "padding", 2) gcf, _ = anti_aliasing_calculate((padding * npixel, padding * npixel), oversampling) wabsmax = numpy.max(numpy.abs(vis.w)) if kernelname == 'wprojection' and wabsmax > 0.0: # wprojection needs a lot of commentary! log.debug("get_kernel_list: Using wprojection kernel") # The field of view must be as padded! R_F is for reporting only so that # need not be padded. fov = cellsize * npixel * padding r_f = (cellsize * npixel / 2) ** 2 / abs(cellsize) log.debug("get_kernel_list: Fresnel number = %f" % (r_f)) delA = get_parameter(kwargs, 'wloss', 0.02) advice = advise_wide_field(vis, delA) wstep = get_parameter(kwargs, "wstep", advice['w_sampling_primary_beam']) log.debug("get_kernel_list: Using w projection with wstep = %f" % (wstep)) # Now calculate the maximum support for the w kernel kernelwidth = get_parameter(kwargs, "kernelwidth", (2 * int(round(numpy.sin(0.5 * fov) * npixel * wabsmax * cellsize)))) kernelwidth = max(kernelwidth, 8) assert kernelwidth % 2 == 0 log.debug("get_kernel_list: Maximum w kernel full width = %d pixels" % (kernelwidth)) padded_shape=[im.shape[0], im.shape[1], im.shape[2] * padding, im.shape[3] * padding] remove_shift = get_parameter(kwargs, "remove_shift", True) padded_image = pad_image(im, padded_shape) kernel_list = w_kernel_list(vis, padded_image, oversampling=oversampling, wstep=wstep, kernelwidth=kernelwidth, remove_shift=remove_shift) else: kernelname = '2d' kernel_list = standard_kernel_list(vis, (padding * npixel, padding * npixel), oversampling=oversampling) return kernelname, gcf, kernel_list
def restore_cube(model: Image, psf: Image, residual=None, **kwargs) -> Image: """ Restore the model image to the residuals :params psf: Input PSF :return: restored image """ assert isinstance(model, Image), "Type is %s" % (type(model)) assert isinstance(psf, Image), "Type is %s" % (type(psf)) assert residual is None or isinstance( residual, Image), "Type is %s" % (type(residual)) restored = copy_image(model) npixel = psf.data.shape[3] sl = slice(npixel // 2 - 7, npixel // 2 + 8) size = get_parameter(kwargs, "psfwidth", None) if size is None: # isotropic at the moment! try: fit = fit_2dgaussian(psf.data[0, 0, sl, sl]) if fit.x_stddev <= 0.0 or fit.y_stddev <= 0.0: log.debug( 'restore_cube: error in fitting to psf, using 1 pixel stddev' ) size = 1.0 else: size = max(fit.x_stddev, fit.y_stddev) log.debug('restore_cube: psfwidth = %s' % (size)) except: log.debug( 'restore_cube: warning in fit to psf, using 1 pixel stddev') size = 1.0 else: log.debug('restore_cube: Using specified psfwidth = %s' % (size)) # By convention, we normalise the peak not the integral so this is the volume of the Gaussian norm = 2.0 * numpy.pi * size**2 gk = Gaussian2DKernel(size) for chan in range(model.shape[0]): for pol in range(model.shape[1]): restored.data[chan, pol, :, :] = norm * convolve( model.data[chan, pol, :, :], gk, normalize_kernel=False) if residual is not None: restored.data += residual.data return restored
def create_ical_pipeline_graph(vis_graph_list, model_graph, c_deconvolve_graph=create_deconvolve_graph, c_invert_graph=create_invert_graph, c_residual_graph=create_residual_graph, first_selfcal=None, **kwargs): """Create graph for ICAL pipeline :param vis_graph_list: :param model_graph: :param c_deconvolve_graph: Default: create_deconvolve_graph :param c_invert_graph: Default: create_invert_graph, :param c_residual_graph: Default: Default: create_residual graph :param kwargs: :return: """ psf_graph = c_invert_graph(vis_graph_list, model_graph, dopsf=True, **kwargs) if first_selfcal is not None and first_selfcal == 0: vis_graph_list = create_selfcal_graph_list(vis_graph_list, model_graph, **kwargs) residual_graph = c_residual_graph(vis_graph_list, model_graph, **kwargs) deconvolve_model_graph = c_deconvolve_graph(residual_graph, psf_graph, model_graph, **kwargs) nmajor = get_parameter(kwargs, "nmajor", 5) if nmajor > 1: for cycle in range(nmajor): if first_selfcal is not None and cycle >= first_selfcal: vis_graph_list = create_selfcal_graph_list( vis_graph_list, deconvolve_model_graph, **kwargs) residual_graph = c_residual_graph(vis_graph_list, deconvolve_model_graph, **kwargs) deconvolve_model_graph = c_deconvolve_graph( residual_graph, psf_graph, deconvolve_model_graph, **kwargs) residual_graph = c_residual_graph(vis_graph_list, deconvolve_model_graph, **kwargs) restore_graph = delayed(restore_cube, pure=True, nout=1)(deconvolve_model_graph, psf_graph[0], residual_graph[0], **kwargs) return delayed((deconvolve_model_graph, residual_graph, restore_graph))
def create_continuum_imaging_pipeline_graph(vis_graph_list, model_graph: delayed, context='2d', **kwargs) -> delayed: """ Create graph for the continuum imaging pipeline. Same as ICAL but with no selfcal. :param vis_graph_list: :param model_graph: :param c_deconvolve_graph: Default: create_deconvolve_graph :param c_invert_graph: Default: create_invert_graph :param c_residual_graph: Default: Default: create_residual graph :param kwargs: Parameters for functions in graphs :return: """ psf_graph = create_invert_graph(vis_graph_list, model_graph, dopsf=True, context=context, **kwargs) residual_graph = create_residual_graph(vis_graph_list, model_graph, context=context, **kwargs) deconvolve_model_graph = create_deconvolve_graph(residual_graph, psf_graph, model_graph, **kwargs) nmajor = get_parameter(kwargs, "nmajor", 5) if nmajor > 1: for cycle in range(nmajor): residual_graph = create_residual_graph(vis_graph_list, deconvolve_model_graph, context=context, **kwargs) deconvolve_model_graph = create_deconvolve_graph( residual_graph, psf_graph, deconvolve_model_graph, **kwargs) residual_graph = create_residual_graph(vis_graph_list, deconvolve_model_graph, context=context, **kwargs) restore_graph = create_restore_graph(deconvolve_model_graph, psf_graph, residual_graph) return delayed((deconvolve_model_graph, residual_graph, restore_graph))
def get_uvw_map(vis, im, **kwargs): """ Get the generators that map channels uvw to pixels """ # Transform parameters padding = get_parameter(kwargs, "padding", 2) # Model image information inchan, inpol, ny, nx = im.data.shape shape = (1, int(round(padding * ny)), int(round(padding * nx))) # UV sampling information uvwscale = numpy.zeros([3]) uvwscale[0:2] = im.wcs.wcs.cdelt[0:2] * numpy.pi / 180.0 assert uvwscale[0] != 0.0, "Error in uv scaling" vuvwmap = uvwscale * vis.uvw uvw_mode = "2d" return uvw_mode, shape, padding, vuvwmap