def deconvolve_cube(dirty: Image, psf: Image, prefix='', **kwargs) -> (Image, Image): """ Clean using a variety of algorithms Functions that clean a dirty image using a point spread function. The algorithms available are: hogbom: Hogbom CLEAN See: Hogbom CLEAN A&A Suppl, 15, 417, (1974) msclean: MultiScale CLEAN See: Cornwell, T.J., Multiscale CLEAN (IEEE Journal of Selected Topics in Sig Proc, 2008 vol. 2 pp. 793-801) mfsmsclean, msmfsclean, mmclean: MultiScale Multi-Frequency See: U. Rau and T. J. Cornwell, “A multi-scale multi-frequency deconvolution algorithm for synthesis imaging in radio interferometry,” A&A 532, A71 (2011). For example:: comp, residual = deconvolve_cube(dirty, psf, niter=1000, gain=0.7, algorithm='msclean', scales=[0, 3, 10, 30], threshold=0.01) For the MFS clean, the psf must have number of channels >= 2 * nmoments :param dirty: Image dirty image :param psf: Image Point Spread Function :param window: Window image (Bool) - clean where True :param algorithm: Cleaning algorithm: 'msclean'|'hogbom'|'mfsmsclean' :param gain: loop gain (float) 0.7 :param threshold: Clean threshold (0.0) :param fractional_threshold: Fractional threshold (0.01) :param scales: Scales (in pixels) for multiscale ([0, 3, 10, 30]) :param nmoments: Number of frequency moments (default 3) :param findpeak: Method of finding peak in mfsclean: 'Algorithm1'|'ASKAPSoft'|'CASA'|'ARL', Default is ARL. :return: componentimage, residual """ assert isinstance(dirty, Image), dirty assert isinstance(psf, Image), psf window_shape = get_parameter(kwargs, 'window_shape', None) if window_shape == 'quarter': qx = dirty.shape[3] // 4 qy = dirty.shape[2] // 4 window = numpy.zeros_like(dirty.data) window[..., (qy + 1):3 * qy, (qx + 1):3 * qx] = 1.0 log.info( 'deconvolve_cube %s: Cleaning inner quarter of each sky plane' % prefix) else: window = None psf_support = get_parameter(kwargs, 'psf_support', max(dirty.shape[2] // 2, dirty.shape[3] // 2)) if (psf_support <= psf.shape[2] // 2) and ( (psf_support <= psf.shape[3] // 2)): centre = [psf.shape[2] // 2, psf.shape[3] // 2] psf.data = psf.data[..., (centre[0] - psf_support):(centre[0] + psf_support), (centre[1] - psf_support):(centre[1] + psf_support)] log.info('deconvolve_cube %s: PSF support = +/- %d pixels' % (prefix, psf_support)) log.info('deconvolve_cube %s: PSF shape %s' % (prefix, str(psf.data.shape))) algorithm = get_parameter(kwargs, 'algorithm', 'msclean') if algorithm == 'msclean': log.info( "deconvolve_cube %s: Multi-scale clean of each polarisation and channel separately" % prefix) gain = get_parameter(kwargs, 'gain', 0.7) assert 0.0 < gain < 2.0, "Loop gain must be between 0 and 2" thresh = get_parameter(kwargs, 'threshold', 0.0) assert thresh >= 0.0 niter = get_parameter(kwargs, 'niter', 100) assert niter > 0 scales = get_parameter(kwargs, 'scales', [0, 3, 10, 30]) fracthresh = get_parameter(kwargs, 'fractional_threshold', 0.01) assert 0.0 < fracthresh < 1.0 comp_array = numpy.zeros_like(dirty.data) residual_array = numpy.zeros_like(dirty.data) for channel in range(dirty.data.shape[0]): for pol in range(dirty.data.shape[1]): if psf.data[channel, pol, :, :].max(): log.info( "deconvolve_cube %s: Processing pol %d, channel %d" % (prefix, pol, channel)) if window is None: comp_array[channel, pol, :, :], residual_array[channel, pol, :, :] = \ msclean(dirty.data[channel, pol, :, :], psf.data[channel, pol, :, :], None, gain, thresh, niter, scales, fracthresh, prefix) else: comp_array[channel, pol, :, :], residual_array[channel, pol, :, :] = \ msclean(dirty.data[channel, pol, :, :], psf.data[channel, pol, :, :], window[channel, pol, :, :], gain, thresh, niter, scales, fracthresh, prefix) else: log.info( "deconvolve_cube %s: Skipping pol %d, channel %d" % (prefix, pol, channel)) comp_image = create_image_from_array(comp_array, dirty.wcs, dirty.polarisation_frame) residual_image = create_image_from_array(residual_array, dirty.wcs, dirty.polarisation_frame) elif algorithm == 'msmfsclean' or algorithm == 'mfsmsclean' or algorithm == 'mmclean': findpeak = get_parameter(kwargs, "findpeak", 'ARL') log.info( "deconvolve_cube %s: Multi-scale multi-frequency clean of each polarisation separately" % prefix) nmoments = get_parameter(kwargs, "nmoments", 3) assert nmoments > 0, "Number of frequency moments must be greater than zero" nchan = dirty.shape[0] assert nchan > 2 * nmoments, "Require nchan %d > 2 * nmoments %d" % ( nchan, 2 * nmoments) dirty_taylor = calculate_image_frequency_moments(dirty, nmoments=nmoments) psf_taylor = calculate_image_frequency_moments(psf, nmoments=2 * nmoments) psf_peak = numpy.max(psf_taylor.data) dirty_taylor.data /= psf_peak psf_taylor.data /= psf_peak log.info("deconvolve_cube %s: Shape of Dirty moments image %s" % (prefix, str(dirty_taylor.shape))) log.info("deconvolve_cube %s: Shape of PSF moments image %s" % (prefix, str(psf_taylor.shape))) gain = get_parameter(kwargs, 'gain', 0.7) assert 0.0 < gain < 2.0, "Loop gain must be between 0 and 2" thresh = get_parameter(kwargs, 'threshold', 0.0) assert thresh >= 0.0 niter = get_parameter(kwargs, 'niter', 100) assert niter > 0 scales = get_parameter(kwargs, 'scales', [0, 3, 10, 30]) fracthresh = get_parameter(kwargs, 'fractional_threshold', 0.1) assert 0.0 < fracthresh < 1.0 comp_array = numpy.zeros(dirty_taylor.data.shape) residual_array = numpy.zeros(dirty_taylor.data.shape) for pol in range(dirty_taylor.data.shape[1]): if psf_taylor.data[0, pol, :, :].max(): log.info("deconvolve_cube %s: Processing pol %d" % (prefix, pol)) if window is None: comp_array[:, pol, :, :], residual_array[:, pol, :, :] = \ msmfsclean(dirty_taylor.data[:, pol, :, :], psf_taylor.data[:, pol, :, :], None, gain, thresh, niter, scales, fracthresh, findpeak, prefix) else: qx = dirty.shape[3] // 4 qy = dirty.shape[2] // 4 window_taylor = numpy.zeros_like(dirty_taylor.data) window_taylor[..., (qy + 1):3 * qy, (qx + 1):3 * qx] = 1.0 log.info( 'deconvolve_cube %s: Cleaning inner quarter of each moment plane' % prefix) comp_array[:, pol, :, :], residual_array[:, pol, :, :] = \ msmfsclean(dirty_taylor.data[:, pol, :, :], psf_taylor.data[:, pol, :, :], window_taylor[0, pol, :, :], gain, thresh, niter, scales, fracthresh, findpeak, prefix) else: log.info("deconvolve_cube %s: Skipping pol %d" % (prefix, pol)) comp_image = create_image_from_array(comp_array, dirty_taylor.wcs, dirty.polarisation_frame) residual_image = create_image_from_array(residual_array, dirty_taylor.wcs, dirty.polarisation_frame) return_moments = get_parameter(kwargs, "return_moments", False) if not return_moments: log.info("deconvolve_cube %s: calculating spectral cubes" % prefix) comp_image = calculate_image_from_frequency_moments( dirty, comp_image) residual_image = calculate_image_from_frequency_moments( dirty, residual_image) else: log.info("deconvolve_cube %s: constructed moment cubes" % prefix) elif algorithm == 'hogbom': log.info( "deconvolve_cube %s: Hogbom clean of each polarisation and channel separately" % prefix) gain = get_parameter(kwargs, 'gain', 0.7) assert 0.0 < gain < 2.0, "Loop gain must be between 0 and 2" thresh = get_parameter(kwargs, 'threshold', 0.0) assert thresh >= 0.0 niter = get_parameter(kwargs, 'niter', 100) assert niter > 0 fracthresh = get_parameter(kwargs, 'fractional_threshold', 0.1) assert 0.0 < fracthresh < 1.0 comp_array = numpy.zeros(dirty.data.shape) residual_array = numpy.zeros(dirty.data.shape) for channel in range(dirty.data.shape[0]): for pol in range(dirty.data.shape[1]): if psf.data[channel, pol, :, :].max(): log.info( "deconvolve_cube %s: Processing pol %d, channel %d" % (prefix, pol, channel)) if window is None: comp_array[channel, pol, :, :], residual_array[channel, pol, :, :] = \ hogbom(dirty.data[channel, pol, :, :], psf.data[channel, pol, :, :], None, gain, thresh, niter, fracthresh, prefix) else: comp_array[channel, pol, :, :], residual_array[channel, pol, :, :] = \ hogbom(dirty.data[channel, pol, :, :], psf.data[channel, pol, :, :], window[channel, pol, :, :], gain, thresh, niter, fracthresh, prefix) else: log.info( "deconvolve_cube %s: Skipping pol %d, channel %d" % (prefix, pol, channel)) comp_image = create_image_from_array(comp_array, dirty.wcs, dirty.polarisation_frame) residual_image = create_image_from_array(residual_array, dirty.wcs, dirty.polarisation_frame) elif algorithm == 'hogbom-complex': log.info( "deconvolve_cube_complex: Hogbom-complex clean of each polarisation and channel separately" ) gain = get_parameter(kwargs, 'gain', 0.7) assert 0.0 < gain < 2.0, "Loop gain must be between 0 and 2" thresh = get_parameter(kwargs, 'threshold', 0.0) assert thresh >= 0.0 niter = get_parameter(kwargs, 'niter', 100) assert niter > 0 fracthresh = get_parameter(kwargs, 'fractional_threshold', 0.1) assert 0.0 <= fracthresh < 1.0 comp_array = numpy.zeros(dirty.data.shape) residual_array = numpy.zeros(dirty.data.shape) for channel in range(dirty.data.shape[0]): for pol in range(dirty.data.shape[1]): if pol == 0 or pol == 3: if psf.data[channel, pol, :, :].max(): log.info( "deconvolve_cube_complex: Processing pol %d, channel %d" % (pol, channel)) if window is None: comp_array[channel, pol, :, :], residual_array[channel, pol, :, :] = \ hogbom(dirty.data[channel, pol, :, :], psf.data[channel, pol, :, :], None, gain, thresh, niter, fracthresh) else: comp_array[channel, pol, :, :], residual_array[channel, pol, :, :] = \ hogbom(dirty.data[channel, pol, :, :], psf.data[channel, pol, :, :], window[channel, pol, :, :], gain, thresh, niter, fracthresh) else: log.info( "deconvolve_cube_complex: Skipping pol %d, channel %d" % (pol, channel)) if pol == 1: if psf.data[channel, 1:2, :, :].max(): log.info( "deconvolve_cube_complex: Processing pol 1 and 2, channel %d" % (channel)) if window is None: comp_array[channel, 1, :, :], comp_array[ channel, 2, :, :], residual_array[ channel, 1, :, :], residual_array[ channel, 2, :, :] = hogbom_complex( dirty.data[channel, 1, :, :], dirty.data[channel, 2, :, :], psf.data[channel, 1, :, :], psf.data[channel, 2, :, :], None, gain, thresh, niter, fracthresh) else: comp_array[channel, 1, :, :], comp_array[ channel, 2, :, :], residual_array[ channel, 1, :, :], residual_array[ channel, 2, :, :] = hogbom_complex( dirty.data[channel, 1, :, :], dirty.data[channel, 2, :, :], psf.data[channel, 1, :, :], psf.data[channel, 2, :, :], window[channel, pol, :, :], gain, thresh, niter, fracthresh) else: log.info( "deconvolve_cube_complex: Skipping pol 1 and 2, channel %d" % (channel)) if pol == 2: continue comp_image = create_image_from_array( comp_array, dirty.wcs, polarisation_frame=PolarisationFrame('stokesIQUV')) residual_image = create_image_from_array( residual_array, dirty.wcs, polarisation_frame=PolarisationFrame('stokesIQUV')) else: raise ValueError('deconvolve_cube %s: Unknown algorithm %s' % (prefix, algorithm)) return comp_image, residual_image
times = numpy.linspace(-numpy.pi / 3.0, numpy.pi / 3.0, ntimes) phasecentre = SkyCoord(ra=+30.0 * u.deg, dec=-60.0 * u.deg, frame='icrs', equinox='J2000') lowcore = create_named_configuration('LOWBD2-CORE', rmax=rmax) block_vis = create_blockvisibility( lowcore, times, frequency=frequency, channel_bandwidth=channel_bandwidth, weight=1.0, phasecentre=phasecentre, polarisation_frame=PolarisationFrame("stokesI")) wprojection_planes = 1 advice = advise_wide_field(block_vis, guard_band_image=4.0, delA=0.02, wprojection_planes=wprojection_planes) vis_slices = advice['vis_slices'] npixel = advice['npixels2'] cellsize = advice['cellsize'] gleam_model = create_low_test_image_from_gleam( npixel=npixel, frequency=frequency, channel_bandwidth=channel_bandwidth,
def actualSetUp(self, freqwin=1, block=False, dospectral=True, dopol=False, zerow=False): self.npixel = 512 self.low = create_named_configuration('LOWBD2', rmax=750.0) self.freqwin = freqwin self.vis = list() self.ntimes = 5 self.times = numpy.linspace(-3.0, +3.0, self.ntimes) * numpy.pi / 12.0 if freqwin > 1: self.frequency = numpy.linspace(0.8e8, 1.2e8, self.freqwin) self.channelwidth = numpy.array( freqwin * [self.frequency[1] - self.frequency[0]]) else: self.frequency = numpy.array([1e8]) self.channelwidth = numpy.array([1e6]) if dopol: self.vis_pol = PolarisationFrame('linear') self.image_pol = PolarisationFrame('stokesIQUV') f = numpy.array([100.0, 20.0, -10.0, 1.0]) else: self.vis_pol = PolarisationFrame('stokesI') self.image_pol = PolarisationFrame('stokesI') f = numpy.array([100.0]) if dospectral: flux = numpy.array( [f * numpy.power(freq / 1e8, -0.7) for freq in self.frequency]) else: flux = numpy.array([f]) self.phasecentre = SkyCoord(ra=+180.0 * u.deg, dec=-60.0 * u.deg, frame='icrs', equinox='J2000') self.vis = ingest_unittest_visibility(self.low, [self.frequency], [self.channelwidth], self.times, self.vis_pol, self.phasecentre, block=block, zerow=zerow) self.model = create_unittest_model(self.vis, self.image_pol, npixel=self.npixel) self.components = create_unittest_components(self.model, flux) self.model = insert_skycomponent(self.model, self.components) self.vis = predict_skycomponent_visibility(self.vis, self.components) # Calculate the model convolved with a Gaussian. self.cmodel = smooth_image(self.model) export_image_to_fits(self.model, '%s/test_imaging_model.fits' % self.dir) export_image_to_fits(self.cmodel, '%s/test_imaging_cmodel.fits' % self.dir)
def create_visibility(config: Configuration, times: numpy.array, frequency: numpy.array, channel_bandwidth, phasecentre: SkyCoord, weight: float, polarisation_frame=PolarisationFrame('stokesI'), integration_time=1.0, zerow=False) -> Visibility: """ Create a Visibility from Configuration, hour angles, and direction of source Note that we keep track of the integration time for BDA purposes :param config: Configuration of antennas :param times: hour angles in radians :param frequency: frequencies (Hz] [nchan] :param weight: weight of a single sample :param phasecentre: phasecentre of observation :param channel_bandwidth: channel bandwidths: (Hz] [nchan] :param integration_time: Integration time ('auto' or value in s) :param polarisation_frame: PolarisationFrame('stokesI') :return: Visibility """ assert phasecentre is not None, "Must specify phase centre" if polarisation_frame is None: polarisation_frame = correlate_polarisation(config.receptor_frame) nch = len(frequency) ants_xyz = config.data['xyz'] nants = len(config.data['names']) nbaselines = int(nants * (nants - 1) / 2) ntimes = len(times) npol = polarisation_frame.npol nrows = nbaselines * ntimes * nch nrowsperintegration = nbaselines * nch row = 0 rvis = numpy.zeros([nrows, npol], dtype='complex') rweight = weight * numpy.ones([nrows, npol]) rtimes = numpy.zeros([nrows]) rfrequency = numpy.zeros([nrows]) rchannel_bandwidth = numpy.zeros([nrows]) rantenna1 = numpy.zeros([nrows], dtype='int') rantenna2 = numpy.zeros([nrows], dtype='int') ruvw = numpy.zeros([nrows, 3]) # Do each hour angle in turn for iha, ha in enumerate(times): # Calculate the positions of the antennas as seen for this hour angle # and declination ant_pos = xyz_to_uvw(ants_xyz, ha, phasecentre.dec.rad) rtimes[row:row + nrowsperintegration] = ha * 43200.0 / numpy.pi # Loop over all pairs of antennas. Note that a2>a1 for a1 in range(nants): for a2 in range(a1 + 1, nants): rantenna1[row:row + nch] = a1 rantenna2[row:row + nch] = a2 # Loop over all frequencies and polarisations for ch in range(nch): # noinspection PyUnresolvedReferences k = frequency[ch] / constants.c.value ruvw[row, :] = (ant_pos[a2, :] - ant_pos[a1, :]) * k rfrequency[row] = frequency[ch] rchannel_bandwidth[row] = channel_bandwidth[ch] row += 1 if zerow: ruvw[..., 2] = 0.0 assert row == nrows rintegration_time = numpy.full_like(rtimes, integration_time) vis = Visibility(uvw=ruvw, time=rtimes, antenna1=rantenna1, antenna2=rantenna2, frequency=rfrequency, vis=rvis, weight=rweight, imaging_weight=rweight, integration_time=rintegration_time, channel_bandwidth=rchannel_bandwidth, polarisation_frame=polarisation_frame) vis.phasecentre = phasecentre vis.configuration = config log.info("create_visibility: %s" % (vis_summary(vis))) assert isinstance(vis, Visibility), "vis is not a Visibility: %r" % vis return vis
def actualSetUp(self, add_errors=False, freqwin=7, block=False, dospectral=True, dopol=False, zerow=True): self.npixel = 256 self.low = create_named_configuration('LOWBD2', rmax=750.0) self.freqwin = freqwin self.vis_list = list() self.ntimes = 5 cellsize = 0.001 self.times = numpy.linspace(-3.0, +3.0, self.ntimes) * numpy.pi / 12.0 self.frequency = numpy.linspace(0.8e8, 1.2e8, self.freqwin) if freqwin > 1: self.channelwidth = numpy.array( freqwin * [self.frequency[1] - self.frequency[0]]) else: self.channelwidth = numpy.array([1e6]) if dopol: self.vis_pol = PolarisationFrame('linear') self.image_pol = PolarisationFrame('stokesIQUV') f = numpy.array([100.0, 20.0, -10.0, 1.0]) else: self.vis_pol = PolarisationFrame('stokesI') self.image_pol = PolarisationFrame('stokesI') f = numpy.array([100.0]) if dospectral: flux = numpy.array( [f * numpy.power(freq / 1e8, -0.7) for freq in self.frequency]) else: flux = numpy.array([f]) self.phasecentre = SkyCoord(ra=+180.0 * u.deg, dec=-60.0 * u.deg, frame='icrs', equinox='J2000') self.vis_list = [ arlexecute.execute(ingest_unittest_visibility)( self.low, [self.frequency[freqwin]], [self.channelwidth[freqwin]], self.times, self.vis_pol, self.phasecentre, block=block, zerow=zerow) for freqwin, _ in enumerate(self.frequency) ] self.model_imagelist = [ arlexecute.execute(create_unittest_model, nout=freqwin)(self.vis_list[freqwin], self.image_pol, cellsize=cellsize, npixel=self.npixel) for freqwin, _ in enumerate(self.frequency) ] self.componentlist = [ arlexecute.execute(create_unittest_components)( self.model_imagelist[freqwin], flux[freqwin, :][numpy.newaxis, :]) for freqwin, _ in enumerate(self.frequency) ] self.model_imagelist = [ arlexecute.execute(insert_skycomponent, nout=1)(self.model_imagelist[freqwin], self.componentlist[freqwin]) for freqwin, _ in enumerate(self.frequency) ] self.vis_list = [ arlexecute.execute(predict_skycomponent_visibility)( self.vis_list[freqwin], self.componentlist[freqwin]) for freqwin, _ in enumerate(self.frequency) ] # Calculate the model convolved with a Gaussian. self.model_imagelist = arlexecute.compute(self.model_imagelist, sync=True) model = self.model_imagelist[0] self.cmodel = smooth_image(model) export_image_to_fits( model, '%s/test_imaging_arlexecute_deconvolved_model.fits' % self.dir) export_image_to_fits( self.cmodel, '%s/test_imaging_arlexecute_deconvolved_cmodel.fits' % self.dir) if add_errors and block: self.vis_list = [ arlexecute.execute(insert_unittest_errors)(self.vis_list[i]) for i, _ in enumerate(self.frequency) ] # self.vis_list = arlexecute.compute(self.vis_list, sync=True) self.vis_list = arlexecute.persist(self.vis_list) self.model_imagelist = arlexecute.scatter(self.model_imagelist)
def actualSetUp(self, nfreqwin=3, dospectral=True, dopol=False, amp_errors=None, phase_errors=None, zerow=True): if amp_errors is None: amp_errors = {'T': 0.0, 'G': 0.1} if phase_errors is None: phase_errors = {'T': 1.0, 'G': 0.0} self.npixel = 512 self.low = create_named_configuration('LOWBD2', rmax=750.0) self.freqwin = nfreqwin self.vis_list = list() self.ntimes = 1 self.times = numpy.linspace(-3.0, +3.0, self.ntimes) * numpy.pi / 12.0 self.frequency = numpy.linspace(0.8e8, 1.2e8, self.freqwin) if self.freqwin > 1: self.channelwidth = numpy.array( self.freqwin * [self.frequency[1] - self.frequency[0]]) else: self.channelwidth = numpy.array([1e6]) if dopol: self.vis_pol = PolarisationFrame('linear') self.image_pol = PolarisationFrame('stokesIQUV') f = numpy.array([100.0, 20.0, -10.0, 1.0]) else: self.vis_pol = PolarisationFrame('stokesI') self.image_pol = PolarisationFrame('stokesI') f = numpy.array([100.0]) if dospectral: flux = numpy.array( [f * numpy.power(freq / 1e8, -0.7) for freq in self.frequency]) else: flux = numpy.array([f]) self.phasecentre = SkyCoord(ra=+180.0 * u.deg, dec=-60.0 * u.deg, frame='icrs', equinox='J2000') self.blockvis_list = [ arlexecute.execute(ingest_unittest_visibility, nout=1)(self.low, [self.frequency[i]], [self.channelwidth[i]], self.times, self.vis_pol, self.phasecentre, block=True, zerow=zerow) for i in range(nfreqwin) ] self.blockvis_list = arlexecute.compute(self.blockvis_list, sync=True) for v in self.blockvis_list: v.data['vis'][...] = 1.0 + 0.0j self.error_blockvis_list = [ arlexecute.execute(copy_visibility(v)) for v in self.blockvis_list ] gt = arlexecute.execute(create_gaintable_from_blockvisibility)( self.blockvis_list[0]) gt = arlexecute.execute(simulate_gaintable)(gt, phase_error=0.1, amplitude_error=0.0, smooth_channels=1, leakage=0.0, seed=180555) self.error_blockvis_list = [ arlexecute.execute(apply_gaintable)(self.error_blockvis_list[i], gt) for i in range(self.freqwin) ] self.error_blockvis_list = arlexecute.compute(self.error_blockvis_list, sync=True) assert numpy.max( numpy.abs(self.error_blockvis_list[0].vis - self.blockvis_list[0].vis)) > 0.0
def create_blockvisibility_from_ms(msname, channum=None, ack=False): """ Minimal MS to BlockVisibility converter The MS format is much more general than the ARL BlockVisibility so we cut many corners. This requires casacore to be installed. If not an exception ModuleNotFoundError is raised. Creates a list of BlockVisibility's, split by field and spectral window :param msname: File name of MS :param channum: range of channels e.g. range(17,32), default is None meaning all :return: """ try: from casacore.tables import table # pylint: disable=import-error except ModuleNotFoundError: raise ModuleNotFoundError("casacore is not installed") tab = table(msname, ack=ack) log.debug("create_blockvisibility_from_ms: %s" % str(tab.info())) fields = numpy.unique(tab.getcol('FIELD_ID')) dds = numpy.unique(tab.getcol('DATA_DESC_ID')) log.debug("create_blockvisibility_from_ms: Found unique fields %s, unique data descriptions %s" % ( str(fields), str(dds))) vis_list = list() for dd in dds: dtab = table(msname, ack=ack).query('DATA_DESC_ID==%d' % dd, style='') for field in fields: ms = dtab.query('FIELD_ID==%d' % field, style='') assert ms.nrows() > 0, "Empty selection for FIELD_ID=%d and DATA_DESC_ID=%d" % (field, dd) log.debug("create_blockvisibility_from_ms: Found %d rows" % (ms.nrows())) time = ms.getcol('TIME') channels = ms.getcol('DATA').shape[-2] log.debug("create_visibility_from_ms: Found %d channels" % (channels)) if channum is None: channum = range(channels) try: ms_vis = ms.getcol('DATA')[:, channum, :] ms_weight = ms.getcol('WEIGHT')[:, :] except IndexError: raise IndexError("channel number exceeds max. within ms") uvw = -1 * ms.getcol('UVW') antenna1 = ms.getcol('ANTENNA1') antenna2 = ms.getcol('ANTENNA2') integration_time = ms.getcol('INTERVAL') # Now get info from the subtables spwtab = table('%s/SPECTRAL_WINDOW' % msname, ack=False) cfrequency = spwtab.getcol('CHAN_FREQ')[dd][channum] cchannel_bandwidth = spwtab.getcol('CHAN_WIDTH')[dd][channum] nchan = cfrequency.shape[0] # Get polarisation info poltab = table('%s/POLARIZATION' % msname, ack=False) corr_type = poltab.getcol('CORR_TYPE') # These correspond to the CASA Stokes enumerations if numpy.array_equal(corr_type[0], [1, 2, 3, 4]): polarisation_frame = PolarisationFrame('stokesIQUV') elif numpy.array_equal(corr_type[0], [5, 6, 7, 8]): polarisation_frame = PolarisationFrame('circular') elif numpy.array_equal(corr_type[0], [9, 10, 11, 12]): polarisation_frame = PolarisationFrame('linear') else: raise KeyError("Polarisation not understood: %s" % str(corr_type)) npol = 4 # Get configuration anttab = table('%s/ANTENNA' % msname, ack=False) nants = anttab.nrows() mount = anttab.getcol('MOUNT') names = anttab.getcol('NAME') diameter = anttab.getcol('DISH_DIAMETER') xyz = anttab.getcol('POSITION') configuration = Configuration(name='', data=None, location=None, names=names, xyz=xyz, mount=mount, frame=None, receptor_frame=ReceptorFrame("linear"), diameter=diameter) # Get phasecentres fieldtab = table('%s/FIELD' % msname, ack=False) pc = fieldtab.getcol('PHASE_DIR')[field, 0, :] phasecentre = SkyCoord(ra=[pc[0]] * u.rad, dec=pc[1] * u.rad, frame='icrs', equinox='J2000') bv_times = numpy.unique(time) ntimes = len(bv_times) bv_vis = numpy.zeros([ntimes, nants, nants, nchan, npol]).astype('complex') bv_weight = numpy.zeros([ntimes, nants, nants, nchan, npol]) bv_imaging_weight = numpy.zeros([ntimes, nants, nants, nchan, npol]) bv_uvw = numpy.zeros([ntimes, nants, nants, 3]) time_last = time[0] time_index = 0 for row, _ in enumerate(ms_vis): # MS has shape [row, npol, nchan] # BV has shape [ntimes, nants, nants, nchan, npol] if time[row] != time_last: assert time[row] > time_last, "MS is not time-sorted - cannot convert" time_index += 1 time_last = time[row] bv_vis[time_index, antenna2[row], antenna1[row], ...] = ms_vis[row, ...] bv_weight[time_index, antenna2[row], antenna1[row], :, ...] = ms_weight[row, numpy.newaxis, ...] bv_imaging_weight[time_index, antenna2[row], antenna1[row], :, ...] = ms_weight[row, numpy.newaxis, ...] bv_uvw[time_index, antenna2[row], antenna1[row], :] = uvw[row, :] vis_list.append(BlockVisibility(uvw=bv_uvw, time=bv_times, frequency=cfrequency, channel_bandwidth=cchannel_bandwidth, vis=bv_vis, weight=bv_weight, imaging_weight=bv_imaging_weight, configuration=configuration, phasecentre=phasecentre, polarisation_frame=polarisation_frame)) tab.close() return vis_list
def test_congruent(self): for frame in ["linear", "circular", "stokesI"]: assert congruent_polarisation(ReceptorFrame(frame), PolarisationFrame(frame)) assert not congruent_polarisation(ReceptorFrame(frame), PolarisationFrame("stokesIQUV"))
def trial_case(results, seed=180555, context='wstack', nworkers=8, threads_per_worker=1, memory=8, processes=True, order='frequency', nfreqwin=7, ntimes=3, rmax=750.0, facets=1, wprojection_planes=1, use_dask=True, use_serial_imaging=True, flux_limit=0.3, nmajor=5, dft_threshold=1.0, use_serial_clean=True, write_fits=False): """ Single trial for performance-timings Simulates visibilities from GLEAM including phase errors Makes dirty image and PSF Runs ICAL pipeline The results are in a dictionary: 'context': input - a string describing concisely the purpose of the test 'time overall', overall execution time (s) 'time predict', time to execute GLEAM prediction graph 'time invert', time to make dirty image 'time invert graph', time to make dirty image graph 'time ICAL graph', time to create ICAL graph 'time ICAL', time to execute ICAL graph 'context', type of imaging e.g. 'wstack' 'nworkers', number of workers to create 'threads_per_worker', 'nnodes', Number of nodes, 'processes', 'order', Ordering of data_models 'nfreqwin', Number of frequency windows in simulation 'ntimes', Number of hour angles in simulation 'rmax', Maximum radius of stations used in simulation (m) 'facets', Number of facets in deconvolution and imaging 'wprojection_planes', Number of wprojection planes 'vis_slices', Number of visibility slices (per Visibbility) 'npixel', Number of pixels in image 'cellsize', Cellsize in radians 'seed', Random number seed 'dirty_max', Maximum in dirty image 'dirty_min', Minimum in dirty image 'restored_max', 'restored_min', 'deconvolved_max', 'deconvolved_min', 'residual_max', 'residual_min', 'git_info', GIT hash (not definitive since local mods are possible) :param results: Initial state :param seed: Random number seed (used in gain simulations) :param context: imaging context :param context: Type of context: '2d'|'timeslice'|'wstack' :param nworkers: Number of dask workers to use :param threads_per_worker: Number of threads per worker :param processes: Use processes instead of threads 'processes'|'threads' :param order: See simulate_list_list_arlexecute_workflow_workflowkflow :param nfreqwin: See simulate_list_list_arlexecute_workflow_workflowkflow :param ntimes: See simulate_list_list_arlexecute_workflow_workflowkflow :param rmax: See simulate_list_list_arlexecute_workflow_workflowkflow :param facets: Number of facets to use :param wprojection_planes: Number of wprojection planes to use :param use_dask: Use dask or immediate evaluation :return: results dictionary """ # Initialise MPI comm = MPI.COMM_WORLD rank = comm.Get_rank() size = comm.Get_size() def init_logging(): logging.basicConfig( filename='pipelines_mpi_timings.log', filemode='w', format='%(asctime)s,%(msecs)d %(name)s %(levelname)s %(message)s', datefmt='%H:%M:%S', level=logging.INFO) init_logging() log = logging.getLogger() # Initialise logging on the workers. This appears to only work using the process scheduler. def lprint(*args): log.info(*args) print(*args) lprint("Starting pipelines_mpi_timings") numpy.random.seed(seed) results['seed'] = seed start_all = time.time() results['context'] = context results['hostname'] = socket.gethostname() results['git_hash'] = git_hash() results['epoch'] = time.strftime("%Y-%m-%d %H:%M:%S") lprint("Context is %s" % context) results['nworkers'] = nworkers results['threads_per_worker'] = threads_per_worker results['processes'] = processes results['memory'] = memory results['order'] = order results['nfreqwin'] = nfreqwin results['ntimes'] = ntimes results['rmax'] = rmax results['facets'] = facets results['wprojection_planes'] = wprojection_planes results['dft threshold'] = dft_threshold results['use_dask'] = use_dask lprint("At start, configuration is:") lprint(sort_dict(results)) # Parameters determining scale of simulation. frequency = numpy.linspace(1.0e8, 1.2e8, nfreqwin) centre = nfreqwin // 2 if nfreqwin > 1: channel_bandwidth = numpy.array(nfreqwin * [frequency[1] - frequency[0]]) else: channel_bandwidth = numpy.array([1e6]) times = numpy.linspace(-numpy.pi / 4.0, numpy.pi / 4.0, ntimes) phasecentre = SkyCoord(ra=+0.0 * u.deg, dec=-40.0 * u.deg, frame='icrs', equinox='J2000') lprint("****** Visibility creation ******") # Create the empty BlockVisibility's and persist these on the cluster if rank == 0: tmp_bvis_list = simulate_list_serial_workflow( 'LOWBD2', frequency=frequency, channel_bandwidth=channel_bandwidth, times=times, phasecentre=phasecentre, order=order, format='blockvis', rmax=rmax) vis_list = [ convert_blockvisibility_to_visibility(bv) for bv in tmp_bvis_list ] else: vis_list = list() #if rank==0: # import matplotlib.pyplot as plt # plt.clf() # plt.hist(vis_list[0].w, bins=100) # plt.title('Histogram of w samples: rms=%.1f (wavelengths)' % numpy.std(vis_list[0].w)) # plt.xlabel('W (wavelengths)') # plt.show() # plt.clf() # plt.hist(vis_list[0].uvdist, bins=100) # plt.title('Histogram of uvdistance samples') # plt.xlabel('UV Distance (wavelengths)') # plt.show() sub_vis_list = numpy.array_split(vis_list, size) sub_vis_list = comm.scatter(sub_vis_list, root=0) #NOTE: sub_vis_list is scattered (future_vis_list in arlexecute) # Find the best imaging parameters but don't bring the vis_list back here print("****** Finding wide field parameters ******") sub_advice = [ advise_wide_field(v, guard_band_image=6.0, delA=0.1, facets=facets, wprojection_planes=wprojection_planes, oversampling_synthesised_beam=4.0) for v in sub_vis_list ] # MONTSE: This does not make sense cause they all compute advice but only # the last one is used. Confirm that this is what the dask version does!!! advice_list = comm.gather(sub_advice, root=0) if rank == 0: advice = numpy.concatenate(advice_list)[-1] else: advice = None advice = comm.bcast(advice, root=0) # Deconvolution via sub-images requires 2^n npixel = advice['npixels2'] results['npixel'] = npixel cellsize = advice['cellsize'] results['cellsize'] = cellsize lprint("Image will have %d by %d pixels, cellsize = %.6f rad" % (npixel, npixel, cellsize)) # NOTE: frequency and channel_bandwidth are replicated sub_frequency = numpy.array_split(frequency, size) sub_channel_bandwidth = numpy.array_split(channel_bandwidth, size) # Create an empty model image tmp_model_list = [ create_image(npixel=npixel, cellsize=cellsize, frequency=[sub_frequency[rank][f]], channel_bandwidth=[sub_channel_bandwidth[rank][f]], phasecentre=phasecentre, polarisation_frame=PolarisationFrame("stokesI")) for f, freq in enumerate(sub_frequency[rank]) ] model_list = comm.gather(tmp_model_list, root=0) if rank == 0: model_list = numpy.concatenate(model_list).tolist() else: model_list = list() print(type(model_list)) assert isinstance(model_list, list), model_list # NOTE: tmp_model_list is the scattered model_list # NOTE: template_model and gcfcf are replicated lprint("****** Setting up imaging parameters ******") # Now set up the imaging parameters template_model = create_image( npixel=npixel, cellsize=cellsize, frequency=[frequency[centre]], phasecentre=phasecentre, channel_bandwidth=[channel_bandwidth[centre]], polarisation_frame=PolarisationFrame("stokesI")) gcfcf = [create_pswf_convolutionfunction(template_model)] if context == 'timeslice': vis_slices = ntimes lprint("Using timeslice with %d slices" % vis_slices) elif context == '2d': vis_slices = 1 elif context == "wprojection": wstep = advice['wstep'] nw = advice['wprojection_planes'] vis_slices = 1 support = advice['nwpixels'] results['wprojection_planes'] = nw lprint("****** Starting W projection kernel creation ******") lprint("Using wprojection with %d planes with wstep %.1f wavelengths" % (nw, wstep)) lprint("Support of wprojection = %d pixels" % support) gcfcf = [ create_awterm_convolutionfunction(template_model, nw=nw, wstep=wstep, oversampling=4, support=support, use_aaf=True) ] lprint("Size of W projection gcf, cf = %.2E bytes" % get_size(gcfcf)) else: context = 'wstack' vis_slices = advice['vis_slices'] lprint("Using wstack with %d slices" % vis_slices) results['vis_slices'] = vis_slices # Make a skymodel from gleam, with bright sources as components and weak sources in an image lprint("****** Starting GLEAM skymodel creation ******") future_skymodel_list = [ create_low_test_skymodel_from_gleam( npixel=npixel, cellsize=cellsize, frequency=[sub_frequency[rank][f]], phasecentre=phasecentre, polarisation_frame=PolarisationFrame("stokesI"), flux_limit=flux_limit, flux_threshold=dft_threshold, flux_max=5.0) for f, freq in enumerate(sub_frequency[rank]) ] # We use predict_skymodel so that we can use skycomponents as well as images lprint("****** Starting GLEAM skymodel prediction ******") #NOTE: future_skymodel_list is a subset of skymodel_list which has not been #gathered sub_predicted_vis_list = [ predict_skymodel_list_serial_workflow(sub_vis_list[f], [future_skymodel_list[f]], context=context, vis_slices=vis_slices, facets=facets, gcfcf=gcfcf)[0] for f, freq in enumerate(sub_frequency[rank]) ] predicted_vis_list = comm.gather(sub_predicted_vis_list, root=0) if rank == 0: predicted_vis_list = numpy.concatenate(predicted_vis_list) else: predicted_vis_list = list() #NOTE: sub_predicted_vis_list is predicted_vis_list scattered # Corrupt the visibility for the GLEAM model lprint("****** Visibility corruption ******") tmp_corrupted_vis_list = corrupt_list_serial_workflow( sub_predicted_vis_list, phase_error=1.0, seed=seed) lprint("****** Weighting and tapering ******") tmp_corrupted_vis_list = weight_list_serial_workflow( tmp_corrupted_vis_list, tmp_model_list) tmp_corrupted_vis_list = taper_list_serial_workflow( tmp_corrupted_vis_list, 0.003 * 750.0 / rmax) corrupted_vis_list = comm.gather(tmp_corrupted_vis_list, root=0) if rank == 0: corrupted_vis_list = numpy.concatenate(corrupted_vis_list) else: corrupted_vis_list = list() #future_corrupted_vis_list = arlexecute.scatter(corrupted_vis_list) # At this point the only futures are of scatter'ed data so no repeated calculations should be # incurred. lprint("****** Starting dirty image calculation ******") start = time.time() dirty_list = invert_list_mpi_workflow(corrupted_vis_list, model_list, vis_slices=vis_slices, context=context, facets=facets, use_serial_invert=use_serial_imaging, gcfcf=gcfcf) end = time.time() results['size invert graph'] = get_size(dirty_list) lprint('Size of dirty graph is %.3E bytes' % (results['size invert graph'])) results['time invert graph'] = 0.0 results['time invert'] = end - start lprint("Dirty image invert took %.3f seconds" % (end - start)) if rank == 0: dirty, sumwt = dirty_list[centre] lprint("Maximum in dirty image is %f, sumwt is %s" % (numpy.max(numpy.abs(dirty.data)), str(sumwt))) qa = qa_image(dirty) results['dirty_max'] = qa.data['max'] results['dirty_min'] = qa.data['min'] if write_fits and rank == 0: export_image_to_fits( dirty, "pipelines_arlexecute_timings-%s-dirty.fits" % context) lprint("****** Starting prediction ******") start = time.time() result = predict_list_mpi_workflow(corrupted_vis_list, model_list, vis_slices=vis_slices, context=context, facets=facets, use_serial_predict=use_serial_imaging, gcfcf=gcfcf) # arlexecute.client.cancel(tmp_vis_list) end = time.time() results['time predict'] = end - start lprint("Predict took %.3f seconds" % (end - start)) # Create the ICAL pipeline to run major cycles, starting selfcal at cycle 1. A global solution across all # frequencies (i.e. Visibilities) is performed. print("Using subimage clean") deconvolve_facets = 8 deconvolve_overlap = 16 deconvolve_taper = 'tukey' lprint("****** Starting ICAL graph creation ******") controls = create_calibration_controls() controls['T']['first_selfcal'] = 1 controls['T']['timescale'] = 'auto' start = time.time() ical_list = ical_list_mpi_workflow(corrupted_vis_list, model_imagelist=model_list, context=context, vis_slices=vis_slices, scales=[0, 3, 10], algorithm='mmclean', nmoment=3, niter=1000, fractional_threshold=0.1, threshold=0.01, nmajor=nmajor, gain=0.25, psf_support=64, deconvolve_facets=deconvolve_facets, deconvolve_overlap=deconvolve_overlap, deconvolve_taper=deconvolve_taper, timeslice='auto', global_solution=True, do_selfcal=True, calibration_context='T', controls=controls, use_serial_predict=use_serial_imaging, use_serial_invert=use_serial_imaging, use_serial_clean=use_serial_clean, gcfcf=gcfcf) end = time.time() results['size ICAL graph'] = get_size(ical_list) lprint('Size of ICAL graph is %.3E bytes' % results['size ICAL graph']) results['time ICAL graph'] = 0.0 # Execute the graph lprint("****** Executing ICAL graph ******") deconvolved, residual, restored, gaintables = ical_list results['time ICAL'] = end - start lprint("ICAL graph execution took %.3f seconds" % (end - start)) if rank == 0: qa = qa_image(deconvolved[centre]) results['deconvolved_max'] = qa.data['max'] results['deconvolved_min'] = qa.data['min'] deconvolved_cube = image_gather_channels(deconvolved) if write_fits: export_image_to_fits( deconvolved_cube, "pipelines_arlexecute_timings-%s-ical_deconvolved.fits" % context) qa = qa_image(residual[centre][0]) results['residual_max'] = qa.data['max'] results['residual_min'] = qa.data['min'] residual_cube = remove_sumwt(residual) residual_cube = image_gather_channels(residual_cube) if write_fits: export_image_to_fits( residual_cube, "pipelines_arlexecute_timings-%s-ical_residual.fits" % context) qa = qa_image(restored[centre]) results['restored_max'] = qa.data['max'] results['restored_min'] = qa.data['min'] restored_cube = image_gather_channels(restored) if write_fits: export_image_to_fits( restored_cube, "pipelines_arlexecute_timings-%s-ical_restored.fits" % context) # end_all = time.time() results['time overall'] = end_all - start_all lprint("At end, results are:") results = sort_dict(results) lprint(results) return results
def test_polarisation_frame(self): for frame in [ 'circular', 'circularnp', 'linear', 'linearnp', 'stokesIQUV', 'stokesIV', 'stokesIQ', 'stokesI' ]: polarisation_frame = PolarisationFrame(frame) assert polarisation_frame.type == frame assert PolarisationFrame("circular").npol == 4 assert PolarisationFrame("circularnp").npol == 2 assert PolarisationFrame("linear").npol == 4 assert PolarisationFrame("linearnp").npol == 2 assert PolarisationFrame("circular").npol == 4 assert PolarisationFrame("stokesI").npol == 1 with self.assertRaises(ValueError): polarisation_frame = PolarisationFrame("circuloid") assert PolarisationFrame("linear") != PolarisationFrame("stokesI") assert PolarisationFrame("linear") != PolarisationFrame("circular") assert PolarisationFrame("circular") != PolarisationFrame("stokesI")
def test_correlate(self): for frame in ["linear", "circular", "stokesI"]: rec_frame = ReceptorFrame(frame) assert correlate_polarisation(rec_frame) == PolarisationFrame( frame)
def trial_case(results, seed=180555, context='wstack', nworkers=8, threads_per_worker=1, memory=8, processes=True, order='frequency', nfreqwin=7, ntimes=3, rmax=750.0, facets=1, wprojection_planes=1, use_dask=True, use_serial=False): """ Single trial for performance-timings Simulates visibilities from GLEAM including phase errors Makes dirty image and PSF Runs ICAL pipeline The results are in a dictionary: 'context': input - a string describing concisely the purpose of the test 'time overall', overall execution time (s) 'time create gleam', time to create GLEAM prediction graph 'time predict', time to execute GLEAM prediction graph 'time corrupt', time to corrupt data_models 'time invert', time to make dirty image 'time psf invert', time to make PSF 'time ICAL graph', time to create ICAL graph 'time ICAL', time to execute ICAL graph 'context', type of imaging e.g. 'wstack' 'nworkers', number of workers to create 'threads_per_worker', 'nnodes', Number of nodes, 'processes', 'order', Ordering of data_models 'nfreqwin', Number of frequency windows in simulation 'ntimes', Number of hour angles in simulation 'rmax', Maximum radius of stations used in simulation (m) 'facets', Number of facets in deconvolution and imaging 'wprojection_planes', Number of wprojection planes 'vis_slices', Number of visibility slices (per Visibbility) 'npixel', Number of pixels in image 'cellsize', Cellsize in radians 'seed', Random number seed 'dirty_max', Maximum in dirty image 'dirty_min', Minimum in dirty image 'psf_max', 'psf_min', 'restored_max', 'restored_min', 'deconvolved_max', 'deconvolved_min', 'residual_max', 'residual_min', 'git_info', GIT hash (not definitive since local mods are possible) :param results: Initial state :param seed: Random number seed (used in gain simulations) :param context: imaging context :param context: Type of context: '2d'|'timeslice'|'wstack' :param nworkers: Number of dask workers to use :param threads_per_worker: Number of threads per worker :param processes: Use processes instead of threads 'processes'|'threads' :param order: See simulate_list_list_arlexecute_workflow_workflowkflow :param nfreqwin: See simulate_list_list_arlexecute_workflow_workflowkflow :param ntimes: See simulate_list_list_arlexecute_workflow_workflowkflow :param rmax: See simulate_list_list_arlexecute_workflow_workflowkflow :param facets: Number of facets to use :param wprojection_planes: Number of wprojection planes to use :param use_dask: Use dask or immediate evaluation :param kwargs: :return: results dictionary """ numpy.random.seed(seed) results['seed'] = seed start_all = time.time() results['context'] = context results['hostname'] = socket.gethostname() results['git_hash'] = git_hash() results['epoch'] = time.strftime("%Y-%m-%d %H:%M:%S") zerow = False print("Context is %s" % context) results['nworkers'] = nworkers results['threads_per_worker'] = threads_per_worker results['processes'] = processes results['memory'] = memory results['order'] = order results['nfreqwin'] = nfreqwin results['ntimes'] = ntimes results['rmax'] = rmax results['facets'] = facets results['wprojection_planes'] = wprojection_planes results['use_dask'] = use_dask print("At start, configuration is {0!r}".format(results)) # Parameters determining scale frequency = numpy.linspace(0.8e8, 1.2e8, nfreqwin) centre = nfreqwin // 2 if nfreqwin > 1: channel_bandwidth = numpy.array(nfreqwin * [frequency[1] - frequency[0]]) else: channel_bandwidth = numpy.array([1e6]) times = numpy.linspace(-numpy.pi / 3.0, numpy.pi / 3.0, ntimes) phasecentre = SkyCoord(ra=+30.0 * u.deg, dec=-60.0 * u.deg, frame='icrs', equinox='J2000') if use_dask: client = get_dask_Client(threads_per_worker=threads_per_worker, memory_limit=memory * 1024 * 1024 * 1024, n_workers=nworkers) arlexecute.set_client(client) nodes = findNodes(arlexecute.client) unodes = list(numpy.unique(nodes)) results['nnodes'] = len(unodes) print("Defined %d workers on %d nodes" % (nworkers, results['nnodes'])) print("Workers are: %s" % str(nodes)) else: arlexecute.set_client(use_dask=use_dask) results['nnodes'] = 1 unodes = None vis_list = simulate_list_arlexecute_workflow( 'LOWBD2', frequency=frequency, channel_bandwidth=channel_bandwidth, times=times, phasecentre=phasecentre, order=order, format='blockvis', rmax=rmax) print("****** Visibility creation ******") vis_list = arlexecute.persist(vis_list) # Find the best imaging parameters but don't bring the vis_list back here def get_wf(bv): v = convert_blockvisibility_to_visibility(bv) return advise_wide_field(v, guard_band_image=6.0, delA=0.02, facets=facets, wprojection_planes=wprojection_planes, oversampling_synthesised_beam=4.0) wprojection_planes = 1 advice = arlexecute.compute(arlexecute.execute(get_wf)(vis_list[0]), sync=True) npixel = advice['npixels2'] cellsize = advice['cellsize'] if context == 'timeslice': vis_slices = ntimes print("Using timeslice with %d slices" % vis_slices) elif context == '2d': vis_slices = 1 else: context = 'wstack' vis_slices = 5 * advice['vis_slices'] print("Using wstack with %d slices" % vis_slices) results['vis_slices'] = vis_slices results['cellsize'] = cellsize results['npixel'] = npixel gleam_model_list = [ arlexecute.execute(create_low_test_image_from_gleam)( npixel=npixel, frequency=[frequency[f]], channel_bandwidth=[channel_bandwidth[f]], cellsize=cellsize, phasecentre=phasecentre, polarisation_frame=PolarisationFrame("stokesI"), flux_limit=0.3, applybeam=True) for f, freq in enumerate(frequency) ] start = time.time() print("****** Starting GLEAM model creation ******") gleam_model_list = arlexecute.compute(gleam_model_list, sync=True) cmodel = smooth_image(gleam_model_list[centre]) export_image_to_fits(cmodel, "pipelines-timings-arlexecute-gleam_cmodel.fits") end = time.time() results['time create gleam'] = end - start print("Creating GLEAM model took %.2f seconds" % (end - start)) gleam_model_list = arlexecute.scatter(gleam_model_list) vis_list = predict_list_arlexecute_workflow(vis_list, gleam_model_list, vis_slices=vis_slices, context=context) start = time.time() print("****** Starting GLEAM model visibility prediction ******") vis_list = arlexecute.compute(vis_list, sync=True) end = time.time() results['time predict'] = end - start print("GLEAM model Visibility prediction took %.2f seconds" % (end - start)) # Corrupt the visibility for the GLEAM model print("****** Visibility corruption ******") vis_list = corrupt_list_arlexecute_workflow(vis_list, phase_error=1.0) start = time.time() vis_list = arlexecute.compute(vis_list, sync=True) vis_list = arlexecute.scatter(vis_list) end = time.time() results['time corrupt'] = end - start print("Visibility corruption took %.2f seconds" % (end - start)) # Create an empty model image model_list = [ arlexecute.execute(create_image_from_visibility)( vis_list[f], npixel=npixel, cellsize=cellsize, frequency=[frequency[f]], channel_bandwidth=[channel_bandwidth[f]], polarisation_frame=PolarisationFrame("stokesI")) for f, freq in enumerate(frequency) ] model_list = arlexecute.compute(model_list, sync=True) model_list = arlexecute.scatter(model_list) psf_list = invert_list_arlexecute_workflow(vis_list, model_list, vis_slices=vis_slices, context=context, facets=facets, dopsf=True) start = time.time() print("****** Starting PSF calculation ******") psf, sumwt = arlexecute.compute(psf_list, sync=True)[centre] end = time.time() results['time psf invert'] = end - start print("PSF invert took %.2f seconds" % (end - start)) results['psf_max'] = qa_image(psf).data['max'] results['psf_min'] = qa_image(psf).data['min'] dirty_list = invert_list_arlexecute_workflow(vis_list, model_list, vis_slices=vis_slices, context=context, facets=facets) start = time.time() print("****** Starting dirty image calculation ******") dirty, sumwt = arlexecute.compute(dirty_list, sync=True)[centre] end = time.time() results['time invert'] = end - start print("Dirty image invert took %.2f seconds" % (end - start)) print("Maximum in dirty image is ", numpy.max(numpy.abs(dirty.data)), ", sumwt is ", sumwt) qa = qa_image(dirty) results['dirty_max'] = qa.data['max'] results['dirty_min'] = qa.data['min'] # Create the ICAL pipeline to run 5 major cycles, starting selfcal at cycle 1. A global solution across all # frequencies (i.e. Visibilities) is performed. start = time.time() print("****** Starting ICAL ******") controls = create_calibration_controls() controls['T']['first_selfcal'] = 1 controls['G']['first_selfcal'] = 3 controls['B']['first_selfcal'] = 4 controls['T']['timescale'] = 'auto' controls['G']['timescale'] = 'auto' controls['B']['timescale'] = 1e5 if nfreqwin > 6: nmoment = 3 algorithm = 'mmclean' elif nfreqwin > 2: nmoment = 2 algorithm = 'mmclean' else: nmoment = 1 algorithm = 'msclean' start = time.time() ical_list = ical_list_arlexecute_workflow(vis_list, model_imagelist=model_list, context='wstack', calibration_context='TG', controls=controls, scales=[0, 3, 10], algorithm=algorithm, nmoment=nmoment, niter=1000, fractional_threshold=0.1, threshold=0.1, nmajor=5, gain=0.25, vis_slices=vis_slices, timeslice='auto', global_solution=False, psf_support=64, do_selfcal=True) end = time.time() results['time ICAL graph'] = end - start print("Construction of ICAL graph took %.2f seconds" % (end - start)) # Execute the graph start = time.time() result = arlexecute.compute(ical_list, sync=True) deconvolved, residual, restored = result end = time.time() results['time ICAL'] = end - start print("ICAL graph execution took %.2f seconds" % (end - start)) qa = qa_image(deconvolved[centre]) results['deconvolved_max'] = qa.data['max'] results['deconvolved_min'] = qa.data['min'] export_image_to_fits(deconvolved[centre], "pipelines-timings-arlexecute-ical_deconvolved.fits") qa = qa_image(residual[centre][0]) results['residual_max'] = qa.data['max'] results['residual_min'] = qa.data['min'] export_image_to_fits(residual[centre][0], "pipelines-timings-arlexecute-ical_residual.fits") qa = qa_image(restored[centre]) results['restored_max'] = qa.data['max'] results['restored_min'] = qa.data['min'] export_image_to_fits(restored[centre], "pipelines-timings-arlexecute-ical_restored.fits") # arlexecute.close() end_all = time.time() results['time overall'] = end_all - start_all print("At end, results are {0!r}".format(results)) return results
def actualSetup(self, vnchan=1, doiso=True, ntimes=5, flux_limit=2.0, zerow=True, fixed=False): nfreqwin = vnchan rmax = 300.0 npixel = 512 cellsize = 0.001 frequency = numpy.linspace(0.8e8, 1.2e8, nfreqwin) if nfreqwin > 1: channel_bandwidth = numpy.array(nfreqwin * [frequency[1] - frequency[0]]) else: channel_bandwidth = [0.4e8] times = numpy.linspace(-numpy.pi / 3.0, numpy.pi / 3.0, ntimes) phasecentre = SkyCoord(ra=-60.0 * u.deg, dec=-60.0 * u.deg, frame='icrs', equinox='J2000') lowcore = create_named_configuration('LOWBD2', rmax=rmax) block_vis = create_blockvisibility( lowcore, times, frequency=frequency, channel_bandwidth=channel_bandwidth, weight=1.0, phasecentre=phasecentre, polarisation_frame=PolarisationFrame("stokesI"), zerow=zerow) block_vis.data['uvw'][..., 2] = 0.0 self.beam = create_image_from_visibility( block_vis, npixel=npixel, frequency=[numpy.average(frequency)], nchan=nfreqwin, channel_bandwidth=[numpy.sum(channel_bandwidth)], cellsize=cellsize, phasecentre=phasecentre) self.components = create_low_test_skycomponents_from_gleam( flux_limit=flux_limit, phasecentre=phasecentre, frequency=frequency, polarisation_frame=PolarisationFrame('stokesI'), radius=npixel * cellsize) self.beam = create_low_test_beam(self.beam) self.components = apply_beam_to_skycomponent(self.components, self.beam, flux_limit=flux_limit) self.vis = copy_visibility(block_vis, zero=True) gt = create_gaintable_from_blockvisibility(block_vis, timeslice='auto') for i, sc in enumerate(self.components): if sc.flux[0, 0] > 10: sc.flux[...] /= 10.0 component_vis = copy_visibility(block_vis, zero=True) gt = simulate_gaintable(gt, amplitude_error=0.0, phase_error=0.1, seed=None) component_vis = predict_skycomponent_visibility(component_vis, sc) component_vis = apply_gaintable(component_vis, gt) self.vis.data['vis'][...] += component_vis.data['vis'][...] # Do an isoplanatic selfcal self.model_vis = copy_visibility(self.vis, zero=True) self.model_vis = predict_skycomponent_visibility( self.model_vis, self.components) if doiso: gt = solve_gaintable(self.vis, self.model_vis, phase_only=True, timeslice='auto') self.vis = apply_gaintable(self.vis, gt, inverse=True) self.model_vis = convert_blockvisibility_to_visibility(self.model_vis) self.model_vis, _, _ = weight_visibility(self.model_vis, self.beam) self.dirty_model, sumwt = invert_list_arlexecute_workflow( self.model_vis, self.beam, context='2d') export_image_to_fits( self.dirty_model, "%s/test_modelpartition-model_dirty.fits" % self.dir) lvis = convert_blockvisibility_to_visibility(self.vis) lvis, _, _ = weight_visibility(lvis, self.beam) dirty, sumwt = invert_list_arlexecute_workflow(lvis, self.beam, context='2d') if doiso: export_image_to_fits( dirty, "%s/test_modelpartition-initial-iso-residual.fits" % self.dir) else: export_image_to_fits( dirty, "%s/test_modelpartition-initial-noiso-residual.fits" % self.dir) self.skymodels = [ SkyModel(components=[cm], fixed=fixed) for cm in self.components ]
ntimes = 61 lowparamcube = numpy.array([(r0, speed, direction, hiono)]) filename = 'low_screen_%.1fr0_%.3frate.fits' % (r0, rate) my_screens = ArScreens.ArScreens(n, m, pscale, rate, lowparamcube, alpha_mag) my_screens.run(ntimes, verbose=True) from astropy.wcs import WCS nfreqwin = 1 npol = 1 frequency = [1e8] channel_bandwidth = [0.1e8] w = WCS(naxis=4) cellsize = pscale w.wcs.cdelt = [cellsize, cellsize, 1.0 / rate, channel_bandwidth[0]] w.wcs.crpix = [npixel // 2 + 1, npixel // 2 + 1, ntimes // 2 + 1, 1.0] w.wcs.ctype = ['XX', 'YY', 'TIME', 'FREQ'] w.wcs.crval = [0.0, 0.0, 0.0, frequency[0]] w.naxis = 4 w.wcs.radesys = 'ICRS' w.wcs.equinox = 2000.0 data = numpy.zeros([nfreqwin, ntimes, npixel, npixel]) for i, screen in enumerate(my_screens.screens[0]): data[:, i, ...] = screen[numpy.newaxis, ...] im = create_image_from_array(data, wcs=w, polarisation_frame=PolarisationFrame("stokesI")) print(im) export_image_to_fits(im, filename)
def actualSetUp(self, add_errors=False, nfreqwin=7, dospectral=True, dopol=False, zerow=True): self.npixel = 512 self.low = create_named_configuration('LOWBD2', rmax=750.0) self.freqwin = nfreqwin self.vis_list = list() self.ntimes = 5 self.times = numpy.linspace(-3.0, +3.0, self.ntimes) * numpy.pi / 12.0 self.frequency = numpy.linspace(0.8e8, 1.2e8, self.freqwin) if self.freqwin > 1: self.channelwidth = numpy.array(self.freqwin * [self.frequency[1] - self.frequency[0]]) else: self.channelwidth = numpy.array([1e6]) if dopol: self.vis_pol = PolarisationFrame('linear') self.image_pol = PolarisationFrame('stokesIQUV') f = numpy.array([100.0, 20.0, -10.0, 1.0]) else: self.vis_pol = PolarisationFrame('stokesI') self.image_pol = PolarisationFrame('stokesI') f = numpy.array([100.0]) if dospectral: flux = numpy.array([f * numpy.power(freq / 1e8, -0.7) for freq in self.frequency]) else: flux = numpy.array([f]) self.phasecentre = SkyCoord(ra=+180.0 * u.deg, dec=-60.0 * u.deg, frame='icrs', equinox='J2000') self.blockvis_list = [ingest_unittest_visibility(self.low, [self.frequency[i]], [self.channelwidth[i]], self.times, self.vis_pol, self.phasecentre, block=True, zerow=zerow) for i in range(nfreqwin)] self.vis_list = [convert_blockvisibility_to_visibility(bv) for bv in self.blockvis_list] self.model_imagelist = [ create_unittest_model(self.vis_list[i], self.image_pol, npixel=self.npixel, cellsize=0.0005) for i in range(nfreqwin)] self.components_list = [create_unittest_components(self.model_imagelist[freqwin], flux[freqwin, :][numpy.newaxis, :]) for freqwin, m in enumerate(self.model_imagelist)] self.blockvis_list = [ predict_skycomponent_visibility(self.blockvis_list[freqwin], self.components_list[freqwin]) for freqwin, _ in enumerate(self.blockvis_list)] self.model_imagelist = [insert_skycomponent(self.model_imagelist[freqwin], self.components_list[freqwin]) for freqwin in range(nfreqwin)] model = self.model_imagelist[0] self.cmodel = smooth_image(model) if self.persist: export_image_to_fits(model, '%s/test_imaging_serial_model.fits' % self.dir) export_image_to_fits(self.cmodel, '%s/test_imaging_serial_cmodel.fits' % self.dir) if add_errors: gt = create_gaintable_from_blockvisibility(self.blockvis_list[0]) gt = simulate_gaintable(gt, phase_error=0.1, amplitude_error=0.0, smooth_channels=1, leakage=0.0, seed=180555) self.blockvis_list = [apply_gaintable(self.blockvis_list[i], gt) for i in range(self.freqwin)] self.vis_list = [convert_blockvisibility_to_visibility(bv) for bv in self.blockvis_list] self.model_imagelist = [ create_unittest_model(self.vis_list[i], self.image_pol, npixel=self.npixel, cellsize=0.0005) for i in range(nfreqwin)]
# In[37]: # Create test images num_images = 4 image_list = list() for i in range(num_images): frequency = numpy.array([1e8]) phasecentre = SkyCoord(ra=+15.0 * u.deg, dec=-35.0 * u.deg, frame='icrs', equinox='J2000') model = create_test_image(frequency=frequency, phasecentre=phasecentre, cellsize=0.001, polarisation_frame=PolarisationFrame('stokesI')) #print(model) nchan, npol, ny, nx = model.data.shape sumwt = numpy.ones([nchan, npol]) print(sumwt) image_list.append((model, sumwt)) if i == num_images - 1: # f=show_image(model, title='Model image', cm='Greys', vmax=1.0, vmin=-0.1) print(qa_image(model, context='Model image')) # plt.show() # In[38]: # Accum images into one with weights (result_image, result_sumwt) = sum_invert_results_local(image_list)
def actualSetUp(self, add_errors=False, freqwin=3, block=False, dospectral=True, dopol=False, zerow=False, makegcfcf=False): self.npixel = 256 self.low = create_named_configuration('LOWBD2', rmax=750.0) self.freqwin = freqwin self.vis_list = list() self.ntimes = 5 self.cellsize = 0.0005 # Choose the interval so that the maximum change in w is smallish integration_time = numpy.pi * (24 / (12 * 60)) self.times = numpy.linspace(-integration_time * (self.ntimes // 2), integration_time * (self.ntimes // 2), self.ntimes) if freqwin > 1: self.frequency = numpy.linspace(0.8e8, 1.2e8, self.freqwin) self.channelwidth = numpy.array( freqwin * [self.frequency[1] - self.frequency[0]]) else: self.frequency = numpy.array([1.0e8]) self.channelwidth = numpy.array([4e7]) if dopol: self.vis_pol = PolarisationFrame('linear') self.image_pol = PolarisationFrame('stokesIQUV') f = numpy.array([100.0, 20.0, -10.0, 1.0]) else: self.vis_pol = PolarisationFrame('stokesI') self.image_pol = PolarisationFrame('stokesI') f = numpy.array([100.0]) if dospectral: flux = numpy.array( [f * numpy.power(freq / 1e8, -0.7) for freq in self.frequency]) else: flux = numpy.array([f]) self.phasecentre = SkyCoord(ra=+180.0 * u.deg, dec=-60.0 * u.deg, frame='icrs', equinox='J2000') self.vis_list = [ arlexecute.execute(ingest_unittest_visibility)( self.low, [self.frequency[freqwin]], [self.channelwidth[freqwin]], self.times, self.vis_pol, self.phasecentre, block=block, zerow=zerow) for freqwin, _ in enumerate(self.frequency) ] self.model_list = [ arlexecute.execute(create_unittest_model, nout=freqwin)(self.vis_list[freqwin], self.image_pol, cellsize=self.cellsize, npixel=self.npixel) for freqwin, _ in enumerate(self.frequency) ] self.components_list = [ arlexecute.execute(create_unittest_components)( self.model_list[freqwin], flux[freqwin, :][numpy.newaxis, :], single=True) for freqwin, _ in enumerate(self.frequency) ] self.components_list = arlexecute.compute(self.components_list, sync=True) self.model_list = [ arlexecute.execute(insert_skycomponent, nout=1)(self.model_list[freqwin], self.components_list[freqwin]) for freqwin, _ in enumerate(self.frequency) ] self.model_list = arlexecute.compute(self.model_list, sync=True) self.vis_list = [ arlexecute.execute(predict_skycomponent_visibility)( self.vis_list[freqwin], self.components_list[freqwin]) for freqwin, _ in enumerate(self.frequency) ] centre = self.freqwin // 2 # Calculate the model convolved with a Gaussian. self.model = self.model_list[centre] self.cmodel = smooth_image(self.model) export_image_to_fits(self.model, '%s/test_imaging_model.fits' % self.dir) export_image_to_fits(self.cmodel, '%s/test_imaging_cmodel.fits' % self.dir) if add_errors and block: self.vis_list = [ arlexecute.execute(insert_unittest_errors)(self.vis_list[i]) for i, _ in enumerate(self.frequency) ] self.components = self.components_list[centre] if makegcfcf: self.gcfcf = [ create_awterm_convolutionfunction(self.model, nw=61, wstep=16.0, oversampling=8, support=64, use_aaf=True) ] self.gcfcf_clipped = [ (self.gcfcf[0][0], apply_bounding_box_convolutionfunction(self.gcfcf[0][1], fractional_level=1e-3)) ] self.gcfcf_joint = [ create_awterm_convolutionfunction(self.model, nw=11, wstep=16.0, oversampling=8, support=64, use_aaf=True) ] else: self.gcfcf = None self.gcfcf_clipped = None self.gcfcf_joint = None
def deconvolve_cube_complex(dirty: Image, psf: Image, **kwargs) -> (Image, Image): """ Clean using the complex Hogbom algorithm for polarised data (2016MNRAS.462.3483P) The algorithm available is: hogbom-complex: See: Pratley L. & Johnston-Hollitt M., (2016), MNRAS, 462, 3483. This code is based upon the deconvolve_cube code for standard Hogbom clean available in ARL. Args: dirty (numpy array): The dirty image, i.e., the image to be deconvolved. psf (numpy array): The point spread-function. window (float): Regions where clean components are allowed. If True, entire dirty Image is allowed. algorithm (str): Cleaning algorithm: 'hogbom-complex' only. gain (float): The "loop gain", i.e., the fraction of the brightest pixel that is removed in each iteration. threshold (float): Cleaning stops when the maximum of the absolute deviation of the residual is less than this value. niter (int): Maximum number of components to make if the threshold `thresh` is not hit. fractional_threshold (float): The predefined fractional threshold at which to stop cleaning. Returns: comp_image: clean component image. residual_image: residual image. """ assert isinstance(dirty, Image), "Type is %s" % (type(dirty)) assert isinstance(psf, Image), "Type is %s" % (type(psf)) window_shape = get_parameter(kwargs, 'window_shape', None) if window_shape == 'quarter': qx = dirty.shape[3] // 4 qy = dirty.shape[2] // 4 window = np.zeros_like(dirty.data) window[..., (qy + 1):3 * qy, (qx + 1):3 * qx] = 1.0 log.info( 'deconvolve_cube_complex: Cleaning inner quarter of each sky plane' ) else: window = None psf_support = get_parameter(kwargs, 'psf_support', None) if isinstance(psf_support, int): if (psf_support < psf.shape[2] // 2) and ( (psf_support < psf.shape[3] // 2)): centre = [psf.shape[2] // 2, psf.shape[3] // 2] psf.data = psf.data[..., (centre[0] - psf_support):(centre[0] + psf_support), (centre[1] - psf_support):(centre[1] + psf_support)] log.info('deconvolve_cube_complex: PSF support = +/- %d pixels' % (psf_support)) algorithm = get_parameter(kwargs, 'algorithm', 'msclean') if algorithm == 'hogbom-complex': log.info( "deconvolve_cube_complex: Hogbom-complex clean of each polarisation and channel separately" ) gain = get_parameter(kwargs, 'gain', 0.7) assert 0.0 < gain < 2.0, "Loop gain must be between 0 and 2" thresh = get_parameter(kwargs, 'threshold', 0.0) assert thresh >= 0.0 niter = get_parameter(kwargs, 'niter', 100) assert niter > 0 fracthresh = get_parameter(kwargs, 'fractional_threshold', 0.1) assert 0.0 <= fracthresh < 1.0 comp_array = np.zeros(dirty.data.shape) residual_array = np.zeros(dirty.data.shape) for channel in range(dirty.data.shape[0]): for pol in range(dirty.data.shape[1]): if pol == 0 or pol == 3: if psf.data[channel, pol, :, :].max(): log.info( "deconvolve_cube_complex: Processing pol %d, channel %d" % (pol, channel)) if window is None: comp_array[channel, pol, :, :], residual_array[channel, pol, :, :] = \ hogbom(dirty.data[channel, pol, :, :], psf.data[channel, pol, :, :], None, gain, thresh, niter, fracthresh) else: comp_array[channel, pol, :, :], residual_array[channel, pol, :, :] = \ hogbom(dirty.data[channel, pol, :, :], psf.data[channel, pol, :, :], window[channel, pol, :, :], gain, thresh, niter, fracthresh) else: log.info( "deconvolve_cube_complex: Skipping pol %d, channel %d" % (pol, channel)) if pol == 1: if psf.data[channel, 1:2, :, :].max(): log.info( "deconvolve_cube_complex: Processing pol 1 and 2, channel %d" % (channel)) if window is None: comp_array[channel, 1, :, :], comp_array[ channel, 2, :, :], residual_array[ channel, 1, :, :], residual_array[ channel, 2, :, :] = hogbom_complex( dirty.data[channel, 1, :, :], dirty.data[channel, 2, :, :], psf.data[channel, 1, :, :], psf.data[channel, 2, :, :], None, gain, thresh, niter, fracthresh) else: comp_array[channel, 1, :, :], comp_array[ channel, 2, :, :], residual_array[ channel, 1, :, :], residual_array[ channel, 2, :, :] = hogbom_complex( dirty.data[channel, 1, :, :], dirty.data[channel, 2, :, :], psf.data[channel, 1, :, :], psf.data[channel, 2, :, :], window[channel, pol, :, :], gain, thresh, niter, fracthresh) else: log.info( "deconvolve_cube_complex: Skipping pol 1 and 2, channel %d" % (channel)) if pol == 2: continue comp_image = create_image_from_array( comp_array, dirty.wcs, polarisation_frame=PolarisationFrame('stokesIQUV')) residual_image = create_image_from_array( residual_array, dirty.wcs, polarisation_frame=PolarisationFrame('stokesIQUV')) else: raise ValueError('deconvolve_cube_complex: Unknown algorithm %s' % algorithm) return comp_image, residual_image
def simulate_list_arlexecute_workflow(config='LOWBD2', phasecentre=SkyCoord(ra=+15.0 * u.deg, dec=-60.0 * u.deg, frame='icrs', equinox='J2000'), frequency=None, channel_bandwidth=None, times=None, polarisation_frame=PolarisationFrame("stokesI"), order='frequency', format='blockvis', rmax=1000.0, zerow=False): """ A component to simulate an observation The simulation step can generate a single BlockVisibility or a list of BlockVisibility's. The parameter keyword determines the way that the list is constructed. If order='frequency' then len(frequency) BlockVisibility's with all times are created. If order='time' then len(times) BlockVisibility's with all frequencies are created. If order = 'both' then len(times) * len(times) BlockVisibility's are created each with a single time and frequency. If order = None then all data are created in one BlockVisibility. The output format can be either 'blockvis' (for calibration) or 'vis' (for imaging) :param config: Name of configuration: def LOWBDS-CORE :param phasecentre: Phase centre def: SkyCoord(ra=+15.0 * u.deg, dec=-60.0 * u.deg, frame='icrs', equinox='J2000') :param frequency: def [1e8] :param channel_bandwidth: def [1e6] :param times: Observing times in radians: def [0.0] :param polarisation_frame: def PolarisationFrame("stokesI") :param order: 'time' or 'frequency' or 'both' or None: def 'frequency' :param format: 'blockvis' or 'vis': def 'blockvis' :return: vis_list with different frequencies in different elements """ if format == 'vis': create_vis = create_visibility else: create_vis = create_blockvisibility if times is None: times = [0.0] if channel_bandwidth is None: channel_bandwidth = [1e6] if frequency is None: frequency = [1e8] conf = create_named_configuration(config, rmax=rmax) if order == 'time': log.debug("simulate_list_arlexecute_workflow: Simulating distribution in %s" % order) vis_list = list() for i, time in enumerate(times): vis_list.append(arlexecute.execute(create_vis, nout=1)(conf, numpy.array([times[i]]), frequency=frequency, channel_bandwidth=channel_bandwidth, weight=1.0, phasecentre=phasecentre, polarisation_frame=polarisation_frame, zerow=zerow)) elif order == 'frequency': log.debug("simulate_list_arlexecute_workflow: Simulating distribution in %s" % order) vis_list = list() for j, _ in enumerate(frequency): vis_list.append(arlexecute.execute(create_vis, nout=1)(conf, times, frequency=numpy.array([frequency[j]]), channel_bandwidth=numpy.array( [channel_bandwidth[j]]), weight=1.0, phasecentre=phasecentre, polarisation_frame=polarisation_frame, zerow=zerow)) elif order == 'both': log.debug("simulate_list_arlexecute_workflow: Simulating distribution in time and frequency") vis_list = list() for i, _ in enumerate(times): for j, _ in enumerate(frequency): vis_list.append(arlexecute.execute(create_vis, nout=1)(conf, numpy.array([times[i]]), frequency=numpy.array([frequency[j]]), channel_bandwidth=numpy.array( [channel_bandwidth[j]]), weight=1.0, phasecentre=phasecentre, polarisation_frame=polarisation_frame, zerow=zerow)) elif order is None: log.debug("simulate_list_arlexecute_workflow: Simulating into single %s" % format) vis_list = list() vis_list.append(arlexecute.execute(create_vis, nout=1)(conf, times, frequency=frequency, channel_bandwidth=channel_bandwidth, weight=1.0, phasecentre=phasecentre, polarisation_frame=polarisation_frame, zerow=zerow)) else: raise NotImplementedError("order $s not known" % order) return vis_list
def create_test_image_from_s3(npixel=16384, polarisation_frame=PolarisationFrame("stokesI"), cellsize=0.000015, frequency=numpy.array([1e8]), channel_bandwidth=numpy.array([1e6]), phasecentre=None, fov=20, flux_limit=1e-3) -> Image: """Create LOW test image from S3 The input catalog was generated at http://s-cubed.physics.ox.ac.uk/s3_sex using the following query:: Database: s3_sex SQL: select * from Galaxies where (pow(10,itot_151)*1000 > 1.0) and (right_ascension between -5 and 5) and (declination between -5 and 5);; Number of rows returned: 29966 For frequencies < 610MHz, there are three tables to use:: data/models/S3_151MHz_10deg.csv, use fov=10 data/models/S3_151MHz_20deg.csv, use fov=20 data/models/S3_151MHz_40deg.csv, use fov=40 For frequencies > 610MHz, there are three tables: data/models/S3_1400MHz_1mJy_10deg.csv, use flux_limit>= 1e-3 data/models/S3_1400MHz_100uJy_10deg.csv, use flux_limit < 1e-3 data/models/S3_1400MHz_1mJy_18deg.csv, use flux_limit>= 1e-3 data/models/S3_1400MHz_100uJy_18deg.csv, use flux_limit < 1e-3 The component spectral index is calculated from the 610MHz and 151MHz or 1400MHz and 610MHz, and then calculated for the specified frequencies. If polarisation_frame is not stokesI then the image will a polarised axis but the values will be zero. :param npixel: Number of pixels :param polarisation_frame: Polarisation frame (default PolarisationFrame("stokesI")) :param cellsize: cellsize in radians :param frequency: :param channel_bandwidth: Channel width (Hz) :param phasecentre: phasecentre (SkyCoord) :param fov: fov 10 | 20 | 40 :param flux_limit: Minimum flux (Jy) :return: Image """ ras = [] decs = [] fluxes = [] if phasecentre is None: phasecentre = SkyCoord(ra=+180.0 * u.deg, dec=-60.0 * u.deg, frame='icrs', equinox='J2000') if polarisation_frame is None: polarisation_frame = PolarisationFrame("stokesI") npol = polarisation_frame.npol nchan = len(frequency) shape = [nchan, npol, npixel, npixel] w = WCS(naxis=4) # The negation in the longitude is needed by definition of RA, DEC w.wcs.cdelt = [-cellsize * 180.0 / numpy.pi, cellsize * 180.0 / numpy.pi, 1.0, channel_bandwidth[0]] w.wcs.crpix = [npixel // 2 + 1, npixel // 2 + 1, 1.0, 1.0] w.wcs.ctype = ["RA---SIN", "DEC--SIN", 'STOKES', 'FREQ'] w.wcs.crval = [phasecentre.ra.deg, phasecentre.dec.deg, 1.0, frequency[0]] w.naxis = 4 w.wcs.radesys = 'ICRS' w.wcs.equinox = 2000.0 model = create_image_from_array(numpy.zeros(shape), w, polarisation_frame=polarisation_frame) if numpy.max(frequency) > 6.1E8: if fov > 10: fovstr = '18' else: fovstr = '10' if flux_limit >= 1e-3: csvfilename = arl_path('data/models/S3_1400MHz_1mJy_%sdeg.csv' % fovstr) else: csvfilename = arl_path('data/models/S3_1400MHz_100uJy_%sdeg.csv' % fovstr) log.info('create_test_image_from_s3: Reading S3 sources from %s ' % csvfilename) else: assert fov in [10, 20, 40], "Field of view invalid: use one of %s" % ([10, 20, 40]) csvfilename = arl_path('data/models/S3_151MHz_%ddeg.csv' % (fov)) log.info('create_test_image_from_s3: Reading S3 sources from %s ' % csvfilename) with open(csvfilename) as csvfile: readCSV = csv.reader(csvfile, delimiter=',') r = 0 for row in readCSV: # Skip first row if r > 0: ra = float(row[4]) + phasecentre.ra.deg dec = float(row[5]) + phasecentre.dec.deg if numpy.max(frequency) > 6.1E9: alpha = (float(row[11]) - float(row[10])) / numpy.log10(1400.0 / 610.0) flux = numpy.power(10, float(row[10])) * numpy.power(frequency / 1.4e9, alpha) else: alpha = (float(row[10]) - float(row[9])) / numpy.log10(610.0 / 151.0) flux = numpy.power(10, float(row[9])) * numpy.power(frequency / 1.51e8, alpha) if flux.any() > flux_limit: ras.append(ra) decs.append(dec) fluxes.append(flux) r += 1 csvfile.close() assert len(fluxes) > 0, "No sources found above flux limit %s" % flux_limit log.info('create_test_image_from_s3: %d sources read' % (len(fluxes))) p = w.sub(2).wcs_world2pix(numpy.array(ras), numpy.array(decs), 1) total_flux = numpy.sum(fluxes) fluxes = numpy.array(fluxes) ip = numpy.round(p).astype('int') ok = numpy.where((0 <= ip[0, :]) & (npixel > ip[0, :]) & (0 <= ip[1, :]) & (npixel > ip[1, :]))[0] ps = ip[:, ok] fluxes = fluxes[ok] actual_flux = numpy.sum(fluxes) log.info('create_test_image_from_s3: %d sources inside the image' % (ps.shape[1])) log.info('create_test_image_from_s3: average channel flux in S3 model = %.3f, actual average channel flux in ' 'image = %.3f' % (total_flux / float(nchan), actual_flux / float(nchan))) for chan in range(nchan): for iflux, flux in enumerate(fluxes): model.data[chan, 0, ps[1, iflux], ps[0, iflux]] = flux[chan] return model
def create_blockvisibility_from_uvfits(fitsname, channum=None, ack=False, antnum=None): """ Minimal UVFIT to BlockVisibility converter The UVFITS format is much more general than the ARL BlockVisibility so we cut many corners. Creates a list of BlockVisibility's, split by field and spectral window :param fitsname: File name of UVFITS :param channum: range of channels e.g. range(17,32), default is None meaning all :param antnum: the number of antenna :return: """ def ParamDict(hdul): "Return the dictionary of the random parameters" """ The keys of the dictionary are the parameter names uppercased for consistency. The values are the column numbers. If multiple parameters have the same name (e.g., DATE) their columns are entered as a list. """ pre=re.compile(r"PTYPE(?P<i>\d+)") res={} for k,v in hdul.header.items(): m=pre.match(k) if m : vu=v.upper() if vu in res: res[ vu ] = [ res[vu], int(m.group("i")) ] else: res[ vu ] = int(m.group("i")) return res # Open the file with fits.open(fitsname) as hdul: # Read Spectral Window nspw = hdul[0].header['NAXIS5'] # Read Channel and Frequency Interval freq_ref = hdul[0].header['CRVAL4'] mid_chan_freq = hdul[0].header['CRPIX4'] delt_freq = hdul[0].header['CDELT4'] # Real the number of channels in one spectral window channels = hdul[0].header['NAXIS4'] freq = numpy.zeros([nspw, channels]) # Read Frequency or IF freqhdulname="AIPS FQ" sdhu = hdul.index_of(freqhdulname) if_freq = hdul[sdhu].data['IF FREQ'].ravel() for i in range(nspw): temp = numpy.array([if_freq[i] + freq_ref+delt_freq* ff for ff in range(channels)]) freq[i,:] = temp[:] freq_delt = numpy.ones(channels) * delt_freq if channum is None: channum = range(channels) primary = hdul[0].data # Read time bvtimes = Time(hdul[0].data['DATE'], hdul[0].data['_DATE'], format='jd') bv_times = numpy.unique(bvtimes.jd) ntimes = len(bv_times) # # Get Antenna # blin = hdul[0].data['BASELINE'] antennahdulname="AIPS AN" adhu = hdul.index_of(antennahdulname) try: antenna_name = hdul[adhu].data['ANNAME'] antenna_name = antenna_name.encode('ascii','ignore') except: antenna_name = None antenna_xyz = hdul[adhu].data['STABXYZ'] antenna_mount = hdul[adhu].data['MNTSTA'] try: antenna_diameter = hdul[adhu].data['DIAMETER'] except: antenna_diameter = None # To reading some UVFITS with wrong numbers of antenna if antnum is not None: if antenna_name is not None: antenna_name = antenna_name[:antnum] antenna_xyz = antenna_xyz[:antnum] antenna_mount = antenna_mount[:antnum] if antenna_diameter is not None: antenna_diameter = antenna_diameter[:antnum] nants = len(antenna_xyz) # res= {} # for i,row in enumerate(fin[ahdul].data): # res[row.field("ANNAME") ] = i +1 # Get polarisation info npol = hdul[0].header['NAXIS3'] corr_type = numpy.arange(hdul[0].header['NAXIS3']) - (hdul[0].header['CRPIX3'] - 1) corr_type *= hdul[0].header['CDELT3'] corr_type += hdul[0].header['CRVAL3'] # xx yy xy yx # These correspond to the CASA Stokes enumerations if numpy.array_equal(corr_type, [1, 2, 3, 4]): polarisation_frame = PolarisationFrame('stokesIQUV') elif numpy.array_equal(corr_type, [-1, -2, -3, -4]): polarisation_frame = PolarisationFrame('circular') elif numpy.array_equal(corr_type, [-5, -6, -7, -8]): polarisation_frame = PolarisationFrame('linear') else: raise KeyError("Polarisation not understood: %s" % str(corr_type)) configuration = Configuration(name='', data=None, location=None, names=antenna_name, xyz=antenna_xyz, mount=antenna_mount, frame=None, receptor_frame=polarisation_frame, diameter=antenna_diameter) # Get RA and DEC phase_center_ra_degrees = numpy.float(hdul[0].header['CRVAL6']) phase_center_dec_degrees = numpy.float(hdul[0].header['CRVAL7']) # Get phasecentres phasecentre = SkyCoord(ra=phase_center_ra_degrees * u.deg, dec=phase_center_dec_degrees * u.deg, frame='icrs', equinox='J2000') # Get UVW d=ParamDict(hdul[0]) if "UU" in d: uu = hdul[0].data['UU'] vv = hdul[0].data['VV'] ww = hdul[0].data['WW'] else: uu = hdul[0].data['UU---SIN'] vv = hdul[0].data['VV---SIN'] ww = hdul[0].data['WW---SIN'] _vis = hdul[0].data['DATA'] #_vis.shape = (nchan, ntimes, (nants*(nants-1)//2 ), npol, -1) #self.vis = -(_vis[...,0] * 1.j + _vis[...,1]) row = 0 nchan = len(channum) vis_list = list() for spw_index in range(nspw): bv_vis = numpy.zeros([ntimes, nants, nants, nchan, npol]).astype('complex') bv_weight = numpy.zeros([ntimes, nants, nants, nchan, npol]) bv_uvw = numpy.zeros([ntimes, nants, nants, 3]) for time_index , time in enumerate(bv_times): #restfreq = freq[channel_index] for antenna1 in range(nants-1): for antenna2 in range(antenna1 + 1, nants): for channel_no, channel_index in enumerate(channum): for pol_index in range(npol): bv_vis[time_index, antenna2, antenna1, channel_no,pol_index] = complex(_vis[row,:,:,spw_index,channel_index, pol_index ,0],_vis[row,:,:,spw_index,channel_index,pol_index ,1]) bv_weight[time_index, antenna2, antenna1, channel_no, pol_index] = _vis[row,:,:,spw_index,channel_index,pol_index ,2] bv_uvw[time_index, antenna2, antenna1, 0] = uu[row]* constants.c.value bv_uvw[time_index, antenna2, antenna1, 1] = vv[row]* constants.c.value bv_uvw[time_index, antenna2, antenna1, 2] = ww[row]* constants.c.value row += 1 vis_list.append(BlockVisibility(uvw=bv_uvw, time=bv_times, frequency=freq[spw_index][channum], channel_bandwidth=freq_delt[channum], vis=bv_vis, weight=bv_weight, imaging_weight= bv_weight, configuration=configuration, phasecentre=phasecentre, polarisation_frame=polarisation_frame)) return vis_list
def create_low_test_image_from_gleam(npixel=512, polarisation_frame=PolarisationFrame("stokesI"), cellsize=0.000015, frequency=numpy.array([1e8]), channel_bandwidth=numpy.array([1e6]), phasecentre=None, kind='cubic', applybeam=False, flux_limit=0.1, radius=None, insert_method='Nearest') -> Image: """Create LOW test image from the GLEAM survey Stokes I is estimated from a cubic spline fit to the measured fluxes. The polarised flux is always zero. See http://www.mwatelescope.org/science/gleam-survey The catalog is available from Vizier. VIII/100 GaLactic and Extragalactic All-sky MWA survey (Hurley-Walker+, 2016) GaLactic and Extragalactic All-sky Murchison Wide Field Array (GLEAM) survey. I: A low-frequency extragalactic catalogue. Hurley-Walker N., et al., Mon. Not. R. Astron. Soc., 464, 1146-1167 (2017), 2017MNRAS.464.1146H :param npixel: Number of pixels :param polarisation_frame: Polarisation frame (default PolarisationFrame("stokesI")) :param cellsize: cellsize in radians :param frequency: :param channel_bandwidth: Channel width (Hz) :param phasecentre: phasecentre (SkyCoord) :param kind: Kind of interpolation (see scipy.interpolate.interp1d) Default: linear :return: Image """ if phasecentre is None: phasecentre = SkyCoord(ra=+15.0 * u.deg, dec=-35.0 * u.deg, frame='icrs', equinox='J2000') if radius is None: radius = npixel * cellsize / numpy.sqrt(2.0) sc = create_low_test_skycomponents_from_gleam(flux_limit=flux_limit, polarisation_frame=polarisation_frame, frequency=frequency, phasecentre=phasecentre, kind=kind, radius=radius) if polarisation_frame is None: polarisation_frame = PolarisationFrame("stokesI") npol = polarisation_frame.npol nchan = len(frequency) shape = [nchan, npol, npixel, npixel] w = WCS(naxis=4) # The negation in the longitude is needed by definition of RA, DEC w.wcs.cdelt = [-cellsize * 180.0 / numpy.pi, cellsize * 180.0 / numpy.pi, 1.0, channel_bandwidth[0]] w.wcs.crpix = [npixel // 2 + 1, npixel // 2 + 1, 1.0, 1.0] w.wcs.ctype = ["RA---SIN", "DEC--SIN", 'STOKES', 'FREQ'] w.wcs.crval = [phasecentre.ra.deg, phasecentre.dec.deg, 1.0, frequency[0]] w.naxis = 4 w.wcs.radesys = 'ICRS' w.wcs.equinox = 2000.0 model = create_image_from_array(numpy.zeros(shape), w, polarisation_frame=polarisation_frame) model = insert_skycomponent(model, sc, insert_method=insert_method) if applybeam: beam = create_low_test_beam(model) model.data[...] *= beam.data[...] log.info(qa_image(model, context='create_low_test_image_from_gleam')) return model
def create_image_from_visibility(vis: Union[BlockVisibility, Visibility], **kwargs) -> Image: """Make an empty image from params and Visibility This makes an empty, template image consistent with the visibility, allowing optional overriding of select parameters. This is a convenience function and does not transform the visibilities. :param vis: :param phasecentre: Phasecentre (Skycoord) :param channel_bandwidth: Channel width (Hz) :param cellsize: Cellsize (radians) :param npixel: Number of pixels on each axis (512) :param frame: Coordinate frame for WCS (ICRS) :param equinox: Equinox for WCS (2000.0) :param nchan: Number of image channels (Default is 1 -> MFS) :return: image """ assert isinstance(vis, Visibility) or isinstance(vis, BlockVisibility), \ "vis is not a Visibility or a BlockVisibility: %r" % (vis) log.debug("create_image_from_visibility: Parsing parameters to get definition of WCS") imagecentre = get_parameter(kwargs, "imagecentre", vis.phasecentre) phasecentre = get_parameter(kwargs, "phasecentre", vis.phasecentre) # Spectral processing options ufrequency = numpy.unique(vis.frequency) vnchan = len(ufrequency) frequency = get_parameter(kwargs, "frequency", vis.frequency) inchan = get_parameter(kwargs, "nchan", vnchan) reffrequency = frequency[0] * units.Hz channel_bandwidth = get_parameter(kwargs, "channel_bandwidth", 0.99999999999 * vis.channel_bandwidth[0]) * units.Hz if (inchan == vnchan) and vnchan > 1: log.debug( "create_image_from_visibility: Defining %d channel Image at %s, starting frequency %s, and bandwidth %s" % (inchan, imagecentre, reffrequency, channel_bandwidth)) elif (inchan == 1) and vnchan > 1: assert numpy.abs(channel_bandwidth.value) > 0.0, "Channel width must be non-zero for mfs mode" log.debug("create_image_from_visibility: Defining single channel MFS Image at %s, starting frequency %s, " "and bandwidth %s" % (imagecentre, reffrequency, channel_bandwidth)) elif inchan > 1 and vnchan > 1: assert numpy.abs(channel_bandwidth.value) > 0.0, "Channel width must be non-zero for mfs mode" log.debug("create_image_from_visibility: Defining multi-channel MFS Image at %s, starting frequency %s, " "and bandwidth %s" % (imagecentre, reffrequency, channel_bandwidth)) elif (inchan == 1) and (vnchan == 1): assert numpy.abs(channel_bandwidth.value) > 0.0, "Channel width must be non-zero for mfs mode" log.debug("create_image_from_visibility: Defining single channel Image at %s, starting frequency %s, " "and bandwidth %s" % (imagecentre, reffrequency, channel_bandwidth)) else: raise ValueError("create_image_from_visibility: unknown spectral mode ") # Image sampling options npixel = get_parameter(kwargs, "npixel", 512) uvmax = numpy.max((numpy.abs(vis.data['uvw'][..., 0:1]))) if isinstance(vis, BlockVisibility): uvmax *= numpy.max(frequency) / constants.c.to('m s^-1').value log.debug("create_image_from_visibility: uvmax = %f wavelengths" % uvmax) criticalcellsize = 1.0 / (uvmax * 2.0) log.debug("create_image_from_visibility: Critical cellsize = %f radians, %f degrees" % ( criticalcellsize, criticalcellsize * 180.0 / numpy.pi)) cellsize = get_parameter(kwargs, "cellsize", 0.5 * criticalcellsize) log.debug("create_image_from_visibility: Cellsize = %g radians, %g degrees" % (cellsize, cellsize * 180.0 / numpy.pi)) override_cellsize = get_parameter(kwargs, "override_cellsize", True) if override_cellsize and cellsize > criticalcellsize: log.debug("create_image_from_visibility: Resetting cellsize %g radians to criticalcellsize %g radians" % ( cellsize, criticalcellsize)) cellsize = criticalcellsize pol_frame = get_parameter(kwargs, "polarisation_frame", PolarisationFrame("stokesI")) inpol = pol_frame.npol # Now we can define the WCS, which is a convenient place to hold the info above # Beware of python indexing order! wcs and the array have opposite ordering shape = [inchan, inpol, npixel, npixel] log.debug("create_image_from_visibility: image shape is %s" % str(shape)) w = wcs.WCS(naxis=4) # The negation in the longitude is needed by definition of RA, DEC w.wcs.cdelt = [-cellsize * 180.0 / numpy.pi, cellsize * 180.0 / numpy.pi, 1.0, channel_bandwidth.to(units.Hz).value] # The numpy definition of the phase centre of an FFT is n // 2 (0 - rel) so that's what we use for # the reference pixel. We have to use 0 rel everywhere. w.wcs.crpix = [npixel // 2 + 1, npixel // 2 + 1, 1.0, 1.0] w.wcs.ctype = ["RA---SIN", "DEC--SIN", 'STOKES', 'FREQ'] w.wcs.crval = [phasecentre.ra.deg, phasecentre.dec.deg, 1.0, reffrequency.to(units.Hz).value] w.naxis = 4 w.wcs.radesys = get_parameter(kwargs, 'frame', 'ICRS') w.wcs.equinox = get_parameter(kwargs, 'equinox', 2000.0) return create_image_from_array(numpy.zeros(shape), wcs=w, polarisation_frame=pol_frame)
def create_low_test_skycomponents_from_gleam(flux_limit=0.1, polarisation_frame=PolarisationFrame("stokesI"), frequency=numpy.array([1e8]), kind='cubic', phasecentre=None, radius=1.0) \ -> List[Skycomponent]: """Create sky components from the GLEAM survey Stokes I is estimated from a cubic spline fit to the measured fluxes. The polarised flux is always zero. See http://www.mwatelescope.org/science/gleam-survey The catalog is available from Vizier. VIII/100 GaLactic and Extragalactic All-sky MWA survey (Hurley-Walker+, 2016) GaLactic and Extragalactic All-sky Murchison Wide Field Array (GLEAM) survey. I: A low-frequency extragalactic catalogue. Hurley-Walker N., et al., Mon. Not. R. Astron. Soc., 464, 1146-1167 (2017), 2017MNRAS.464.1146H :rtype: Union[None, List[processing_library.data_models.data_models.Skycomponent], List] :param flux_limit: Only write components brighter than this (Jy) :param polarisation_frame: Polarisation frame (default PolarisationFrame("stokesI")) :param frequency: Frequencies at which the flux will be estimated :param kind: Kind of interpolation (see scipy.interpolate.interp1d) Default: linear :param phasecentre: Desired phase centre (SkyCoord) default None implies all sources :param radius: Radius of sources selected around phasecentre (default 1.0 rad) :return: List of Skycomponents """ fitsfile = arl_path("data/models/GLEAM_EGC.fits") rad2deg = 180.0 / numpy.pi decmin = phasecentre.dec.to('deg').value - rad2deg * radius / 2.0 decmax = phasecentre.dec.to('deg').value + rad2deg * radius / 2.0 hdulist = fits.open(fitsfile, lazy_load_hdus=False) recs = hdulist[1].data[0].array # Do the simple forms of filtering in pyfits. Filtering on radious is done below. fluxes = recs['peak_flux_wide'] mask = fluxes > flux_limit filtered_recs = recs[mask] decs = filtered_recs['DEJ2000'] mask = decs > decmin filtered_recs = filtered_recs[mask] decs = filtered_recs['DEJ2000'] mask = decs < decmax filtered_recs = filtered_recs[mask] ras = filtered_recs['RAJ2000'] decs = filtered_recs['DEJ2000'] names = filtered_recs['Name'] if polarisation_frame is None: polarisation_frame = PolarisationFrame("stokesI") npol = polarisation_frame.npol nchan = len(frequency) # For every source, we read all measured fluxes and interpolate to the # required frequencies gleam_freqs = numpy.array([76, 84, 92, 99, 107, 115, 122, 130, 143, 151, 158, 166, 174, 181, 189, 197, 204, 212, 220, 227]) gleam_flux_freq = numpy.zeros([len(names), len(gleam_freqs)]) for i, f in enumerate(gleam_freqs): gleam_flux_freq[:, i] = filtered_recs['int_flux_%03d' % (f)][:] skycomps = [] for isource, name in enumerate(names): direction = SkyCoord(ra=ras[isource] * u.deg, dec=decs[isource] * u.deg) if phasecentre is None or direction.separation(phasecentre).to('rad').value < radius: fint = interpolate.interp1d(gleam_freqs * 1.0e6, gleam_flux_freq[isource, :], kind=kind) flux = numpy.zeros([nchan, npol]) flux[:, 0] = fint(frequency) if not numpy.isnan(flux).any(): skycomps.append(Skycomponent(direction=direction, flux=flux, frequency=frequency, name=name, shape='Point', polarisation_frame=polarisation_frame)) log.info('create_low_test_skycomponents_from_gleam: %d sources above flux limit %.3f' % (len(skycomps), flux_limit)) hdulist.close() return skycomps
def __init__(self, data=None, frequency=None, channel_bandwidth=None, phasecentre=None, configuration=None, uvw=None, time=None, antenna1=None, antenna2=None, vis=None, weight=None, imaging_weight=None, integration_time=None, polarisation_frame=PolarisationFrame('stokesI'), cindex=None, blockvis=None): """Visibility :param data: :param frequency: :param channel_bandwidth: :param phasecentre: :param configuration: :param uvw: :param time: :param antenna1: :param antenna2: :param vis: :param weight: :param imaging_weight: :param integration_time: :param polarisation_frame: :param cindex: :param blockvis: """ if data is None and vis is not None: if imaging_weight is None: imaging_weight = weight nvis = vis.shape[0] assert len(time) == nvis assert len(frequency) == nvis assert len(channel_bandwidth) == nvis assert len(antenna1) == nvis assert len(antenna2) == nvis npol = polarisation_frame.npol desc = [('index', '>i8'), ('uvw', '>f8', (3, )), ('time', '>f8'), ('frequency', '>f8'), ('channel_bandwidth', '>f8'), ('integration_time', '>f8'), ('antenna1', '>i8'), ('antenna2', '>i8'), ('vis', '>c16', (npol, )), ('weight', '>f8', (npol, )), ('imaging_weight', '>f8', (npol, ))] data = numpy.zeros(shape=[nvis], dtype=desc) data['index'] = list(range(nvis)) data['uvw'] = uvw data['time'] = time data['frequency'] = frequency data['channel_bandwidth'] = channel_bandwidth data['integration_time'] = integration_time data['antenna1'] = antenna1 data['antenna2'] = antenna2 data['vis'] = vis data['weight'] = weight data['imaging_weight'] = imaging_weight self.data = data # numpy structured array self.cindex = cindex self.blockvis = blockvis self.phasecentre = phasecentre # Phase centre of observation self.configuration = configuration # Antenna/station configuration self.polarisation_frame = polarisation_frame self.frequency_map = None
plt.xlabel('uvdist') plt.ylabel('Amp Visibility') plt.title('Field %d' % (field)) plt.show() cellsize = 0.00001 model = create_image_from_visibility( vis_list[0], cellsize=cellsize, npixel=512, nchan=1, frequency=[0.5 * (8435100000.0 + 8.4851e+09)], channel_bandwidth=[1e8], imagecentre=vis_list[0].phasecentre, polarisation_frame=PolarisationFrame('stokesIQUV')) mosaic = copy_image(model) mosaicsens = copy_image(model) work = copy_image(model) for vt in vis_list: channel_model = create_image_from_visibility( vt, cellsize=cellsize, npixel=512, nchan=1, imagecentre=vis_list[0].phasecentre, polarisation_frame=PolarisationFrame('stokesIQUV')) beam = create_pb(channel_model, telescope='VLA',
def actualSetUp(self, freqwin=1, block=True, dopol=False, zerow=False): self.npixel = 1024 self.low = create_named_configuration('LOWBD2', rmax=550.0) self.freqwin = freqwin self.blockvis_list = list() self.ntimes = 5 self.cellsize = 0.0005 # Choose the interval so that the maximum change in w is smallish integration_time = numpy.pi * (24 / (12 * 60)) self.times = numpy.linspace(-integration_time * (self.ntimes // 2), integration_time * (self.ntimes // 2), self.ntimes) if freqwin > 1: self.frequency = numpy.linspace(0.8e8, 1.2e8, self.freqwin) self.channelwidth = numpy.array( freqwin * [self.frequency[1] - self.frequency[0]]) else: self.frequency = numpy.array([1.0e8]) self.channelwidth = numpy.array([4e7]) if dopol: self.vis_pol = PolarisationFrame('linear') self.image_pol = PolarisationFrame('stokesIQUV') f = numpy.array([100.0, 20.0, -10.0, 1.0]) else: self.vis_pol = PolarisationFrame('stokesI') self.image_pol = PolarisationFrame('stokesI') f = numpy.array([100.0]) self.phasecentre = SkyCoord(ra=+0.0 * u.deg, dec=-40.0 * u.deg, frame='icrs', equinox='J2000') self.blockvis_list = [ ingest_unittest_visibility(self.low, [self.frequency[freqwin]], [self.channelwidth[freqwin]], self.times, self.vis_pol, self.phasecentre, block=block, zerow=zerow) for freqwin, _ in enumerate(self.frequency) ] self.vis_list = [ convert_blockvisibility_to_visibility(bv) for bv in self.blockvis_list ] self.skymodel_list = [ create_low_test_skymodel_from_gleam( npixel=self.npixel, cellsize=self.cellsize, frequency=[self.frequency[f]], phasecentre=self.phasecentre, polarisation_frame=PolarisationFrame("stokesI"), flux_limit=0.6, flux_threshold=1.0, flux_max=5.0) for f, freq in enumerate(self.frequency) ] assert isinstance(self.skymodel_list[0].image, Image), self.skymodel_list[0].image assert isinstance(self.skymodel_list[0].components[0], Skycomponent), self.skymodel_list[0].components[0] assert len(self.skymodel_list[0].components) == 35, len( self.skymodel_list[0].components) self.skymodel_list = expand_skymodel_by_skycomponents( self.skymodel_list[0]) assert len(self.skymodel_list) == 36, len(self.skymodel_list) assert numpy.max(numpy.abs( self.skymodel_list[-1].image.data)) > 0.0, "Image is empty" self.vis_list = [ copy_visibility(self.vis_list[0], zero=True) for i, _ in enumerate(self.skymodel_list) ]
def create_skymodel_wrapper(conf): """ Wrapper to create skymodel :param conf: Configuration from JSON file :return: """ cellsize = json_to_quantity(conf["image"]["cellsize"]).to("rad").value npixel = conf["image"]["npixel"] pol_frame = PolarisationFrame(conf["image"]["polarisation_frame"]) phasecentre = json_to_skycoord(conf['image']['phasecentre']) frequency = json_to_linspace(conf['image']['frequency']) if conf['image']['frequency']['steps'] > 1: channel_bandwidth = numpy.array(conf['image']['frequency']['steps'] * [frequency[1] - frequency[0]]) else: channel_bandwidth = numpy.array(conf['image']['frequency']['start']) flux_limit = json_to_quantity( conf['create_skymodel']['flux_limit']).to("Jy").value radius = json_to_quantity( conf['create_skymodel']['radius']).to('rad').value kind = conf['create_skymodel']['kind'] models = [ arlexecute.execute(create_image)( npixel=npixel, frequency=[frequency[f]], channel_bandwidth=[channel_bandwidth[f]], cellsize=cellsize, phasecentre=phasecentre, polarisation_frame=pol_frame) for f, freq in enumerate(frequency) ] catalog = conf['create_skymodel']["catalog"] if catalog == "gleam": components = arlexecute.execute( create_low_test_skycomponents_from_gleam)( phasecentre=phasecentre, polarisation_frame=pol_frame, flux_limit=flux_limit, frequency=frequency, kind=kind, radius=radius) if conf['create_skymodel']["fill_image"]: models = [ arlexecute.execute(insert_skycomponent)(m, components) for m in models ] elif catalog == "empty": components = [] else: raise RuntimeError("Catalog %s is not supported" % catalog) def output_skymodel(model_list, comp_list): if conf['create_skymodel']["fill_image"]: skymodel = SkyModel(images=model_list, components=[]) else: skymodel = SkyModel(images=[], components=comp_list) return memory_data_model_to_buffer(skymodel, conf["buffer"], conf["outputs"]["skymodel"]) return arlexecute.execute(output_skymodel)(models, components)
if rank == 0: print('timeslice processing') vis_slices = 2 else: if rank == 0: print('2d processing') context = '2d' vis_slices = 1 input_vis = [arl_path('data/vis/sim-1.ms'), arl_path('data/vis/sim-2.ms')] import time start = time.time() pol_frame = PolarisationFrame("stokesIQUV") def load_invert_and_deconvolve(c): v1 = create_visibility_from_ms(input_vis[0], channum=[c])[0] v2 = create_visibility_from_ms(input_vis[1], channum=[c])[0] vf = append_visibility(v1, v2) vf = convert_visibility_to_stokes(vf) vf.configuration.diameter[...] = 35.0 rows = vis_select_uvrange(vf, 0.0, uvmax=uvmax) v = create_visibility_from_rows(vf, rows) m = create_image_from_visibility(v, npixel=npixel, cellsize=cellsize, polarisation_frame=pol_frame) if context == '2d':
def actualSetUp(self, add_errors=False, freqwin=1, block=False, dospectral=True, dopol=False, zerow=False): self.npixel = 256 self.low = create_named_configuration('LOWBD2', rmax=750.0) self.freqwin = freqwin self.vis_list = list() self.ntimes = 5 self.times = numpy.linspace(-3.0, +3.0, self.ntimes) * numpy.pi / 12.0 if freqwin > 1: self.frequency = numpy.linspace(0.8e8, 1.2e8, self.freqwin) self.channelwidth = numpy.array( freqwin * [self.frequency[1] - self.frequency[0]]) else: self.frequency = numpy.array([0.8e8]) self.channelwidth = numpy.array([1e6]) if dopol: self.vis_pol = PolarisationFrame('linear') self.image_pol = PolarisationFrame('stokesIQUV') f = numpy.array([100.0, 20.0, -10.0, 1.0]) else: self.vis_pol = PolarisationFrame('stokesI') self.image_pol = PolarisationFrame('stokesI') f = numpy.array([100.0]) if dospectral: flux = numpy.array( [f * numpy.power(freq / 1e8, -0.7) for freq in self.frequency]) else: flux = numpy.array([f]) self.phasecentre = SkyCoord(ra=+180.0 * u.deg, dec=-60.0 * u.deg, frame='icrs', equinox='J2000') # NOTE: We initialize big data only at rank=0 if self.rank == 0: self.vis_list = [ ingest_unittest_visibility(self.low, [self.frequency[freqwin]], [self.channelwidth[freqwin]], self.times, self.vis_pol, self.phasecentre, block=block, zerow=zerow) for freqwin, _ in enumerate(self.frequency) ] self.model_list = [ create_unittest_model(self.vis_list[freqwin], self.image_pol, npixel=self.npixel) for freqwin, _ in enumerate(self.frequency) ] self.components_list = [ create_unittest_components(self.model_list[freqwin], flux[freqwin, :][numpy.newaxis, :]) for freqwin, _ in enumerate(self.frequency) ] self.model_list = [ insert_skycomponent(self.model_list[freqwin], self.components_list[freqwin]) for freqwin, _ in enumerate(self.frequency) ] self.vis_list = [ predict_skycomponent_visibility(self.vis_list[freqwin], self.components_list[freqwin]) for freqwin, _ in enumerate(self.frequency) ] # Calculate the model convolved with a Gaussian. self.model = self.model_list[0] self.cmodel = smooth_image(self.model) export_image_to_fits(self.model, '%s/test_imaging_model.fits' % self.dir) export_image_to_fits(self.cmodel, '%s/test_imaging_cmodel.fits' % self.dir) if add_errors and block: self.vis_list = [ insert_unittest_errors(self.vis_list[i]) for i, _ in enumerate(self.frequency) ] self.vis = self.vis_list[0] self.components = self.components_list[0] else: self.vis_list = list() self.model_list = list() self.components_list = list() # Calculate the model convolved with a Gaussian. self.model = list() self.cmodel = list() self.vis = list() self.components = list()