def test_skymodel_solve_fixed(self):
        self.actualSetup(ntimes=1, doiso=True, fixed=True)
        calskymodel, residual_vis = calskymodel_solve(self.vis,
                                                      self.skymodels,
                                                      niter=30,
                                                      gain=0.25)

        # Check that the components are unchanged
        calskymodel_skycomponents = list()
        for sm in [csm[0] for csm in calskymodel]:
            for comp in sm.components:
                calskymodel_skycomponents.append(comp)

        recovered_components = find_skycomponent_matches(
            calskymodel_skycomponents, self.components, 1e-5)
        for p in recovered_components:
            assert numpy.abs(calskymodel_skycomponents[p[0]].flux[0, 0] -
                             self.components[p[1]].flux[0, 0]) < 1e-15
            assert calskymodel_skycomponents[p[0]].direction.separation(
                self.components[p[1]].direction).rad < 1e-15

        residual_vis = convert_blockvisibility_to_visibility(residual_vis)
        residual_vis, _, _ = weight_visibility(residual_vis, self.beam)
        dirty, sumwt = invert_function(residual_vis, self.beam, context='2d')
        export_image_to_fits(
            dirty, "%s/test_skymodel-final-iso-residual.fits" % self.dir)

        qa = qa_image(dirty)
        assert qa.data['rms'] < 3.4e-3, qa
    def test_continuum_imaging_pipeline(self):
        self.actualSetUp(add_errors=False, block=True)
        continuum_imaging_list = \
            continuum_imaging_component(self.vis_list, model_imagelist=self.model_imagelist, context='2d',
                                        algorithm='mmclean', facets=1,
                                        scales=[0, 3, 10],
                                        niter=1000, fractional_threshold=0.1,
                                        nmoments=2, nchan=self.freqwin,
                                        threshold=2.0, nmajor=5, gain=0.1,
                                        deconvolve_facets=8, deconvolve_overlap=16,
                                        deconvolve_taper='tukey')
        clean, residual, restored = arlexecute.compute(continuum_imaging_list,
                                                       sync=True)
        export_image_to_fits(
            clean[0],
            '%s/test_pipelines_continuum_imaging_pipeline_clean.fits' %
            self.dir)
        export_image_to_fits(
            residual[0][0],
            '%s/test_pipelines_continuum_imaging_pipeline_residual.fits' %
            self.dir)
        export_image_to_fits(
            restored[0],
            '%s/test_pipelines_continuum_imaging_pipeline_restored.fits' %
            self.dir)

        qa = qa_image(restored[0])
        assert numpy.abs(qa.data['max'] - 116.9) < 1.0, str(qa)
        assert numpy.abs(qa.data['min'] + 0.118) < 1.0, str(qa)
 def test_ical_pipeline(self):
     amp_errors = {'T': 0.0, 'G': 0.00, 'B': 0.0}
     phase_errors = {'T': 0.1, 'G': 0.0, 'B': 0.0}
     self.actualSetUp(add_errors=True, block=True, amp_errors=amp_errors, phase_errors=phase_errors)
     
     controls = create_calibration_controls()
     
     controls['T']['first_selfcal'] = 1
     controls['G']['first_selfcal'] = 3
     controls['B']['first_selfcal'] = 4
     
     controls['T']['timescale'] = 'auto'
     controls['G']['timescale'] = 'auto'
     controls['B']['timescale'] = 1e5
     
     ical_list = \
         ical_workflow(self.vis_list, model_imagelist=self.model_imagelist, context='2d',
                        calibration_context='T', controls=controls, do_selfcal=True,
                        global_solution=False,
                        algorithm='mmclean',
                        facets=1,
                        scales=[0, 3, 10],
                        niter=1000, fractional_threshold=0.1,
                        nmoments=2, nchan=self.freqwin,
                        threshold=2.0, nmajor=5, gain=0.1,
                        deconvolve_facets=8, deconvolve_overlap=16, deconvolve_taper='tukey')
     clean, residual, restored = arlexecute.compute(ical_list, sync=True)
     export_image_to_fits(clean[0], '%s/test_pipelines_ical_pipeline_clean.fits' % self.dir)
     export_image_to_fits(residual[0][0], '%s/test_pipelines_ical_pipeline_residual.fits' % self.dir)
     export_image_to_fits(restored[0], '%s/test_pipelines_ical_pipeline_restored.fits' % self.dir)
     
     qa = qa_image(restored[0])
     assert numpy.abs(qa.data['max'] - 116.9) < 1.0, str(qa)
     assert numpy.abs(qa.data['min'] + 0.118) < 1.0, str(qa)
Esempio n. 4
0
 def test_create_image_from_array(self):
     m31model_by_array = create_image_from_array(
         self.m31image.data, self.m31image.wcs,
         self.m31image.polarisation_frame)
     add_image(self.m31image, m31model_by_array)
     add_image(self.m31image, m31model_by_array, docheckwcs=True)
     assert m31model_by_array.shape == self.m31image.shape
     log.debug(
         export_image_to_fits(self.m31image,
                              fitsfile='%s/test_model.fits' % (self.dir)))
     log.debug(qa_image(m31model_by_array,
                        context='test_create_from_image'))
    def test_skymodel_solve_noiso(self):
        self.actualSetup(ntimes=1, doiso=False)
        calskymodel, residual_vis = calskymodel_solve(self.vis,
                                                      self.skymodels,
                                                      niter=30,
                                                      gain=0.25)

        residual_vis = convert_blockvisibility_to_visibility(residual_vis)
        residual_vis, _, _ = weight_visibility(residual_vis, self.beam)
        dirty, sumwt = invert_function(residual_vis, self.beam, context='2d')
        export_image_to_fits(
            dirty, "%s/test_skymodel-final-noiso-residual.fits" % self.dir)

        qa = qa_image(dirty)
        assert qa.data['rms'] < 3.8e-3, qa
 def test_modelpartition_solve_arlexecute(self):
     
     self.actualSetup(doiso=True)
     
     self.skymodel_list = [arlexecute.execute(SkyModel, nout=1)(components=[cm])
                           for cm in self.components]
     
     modelpartition_list = solve_modelpartition_arlexecute(self.vis, skymodel_list=self.skymodel_list, niter=30,
                                                           gain=0.25)
     skymodel, residual_vis = arlexecute.compute(modelpartition_list, sync=True)
     
     residual_vis = convert_blockvisibility_to_visibility(residual_vis)
     residual_vis, _, _ = weight_visibility(residual_vis, self.beam)
     dirty, sumwt = invert_arlexecute(residual_vis, self.beam, context='2d')
     export_image_to_fits(dirty, "%s/test_modelpartition-%s-final-iso-residual.fits" % (self.dir, arlexecute.type()))
     
     qa = qa_image(dirty)
     assert qa.data['rms'] < 3.2e-3, qa
    # In[ ]:

    log.info('About to run ical')
    result = arlexecute.compute(ical_list, sync=True)
    deconvolved = result[0][0]
    residual = result[1][0]
    restored = result[2][0]
    arlexecute.close()

    show_image(deconvolved,
               title='Clean image',
               cm='Greys',
               vmax=0.1,
               vmin=-0.01)
    print(qa_image(deconvolved, context='Clean image'))
    plt.show()
    export_image_to_fits(deconvolved,
                         '%s/gleam_ical_deconvolved.fits' % (results_dir))

    show_image(restored,
               title='Restored clean image',
               cm='Greys',
               vmax=0.1,
               vmin=-0.01)
    print(qa_image(restored, context='Restored clean image'))
    plt.show()
    export_image_to_fits(restored,
                         '%s/gleam_ical_restored.fits' % (results_dir))

    show_image(residual[0],
Esempio n. 8
0
phasecentre = SkyCoord(ra=+15.0 * u.deg,
                       dec=-35.0 * u.deg,
                       frame='icrs',
                       equinox='J2000')
model = create_test_image(frequency=frequency,
                          phasecentre=phasecentre,
                          cellsize=0.001,
                          polarisation_frame=PolarisationFrame('stokesI'))
#print(model)
nchan, npol, ny, nx = model.data.shape
sumwt = numpy.ones([nchan, npol])
print('%d:before Reduce:    data = ' % rank)
print(sumwt)

#f=show_image(model, title='Model image', cm='Greys', vmax=1.0, vmin=-0.1)
print(qa_image(model, context='Model image'))
#plt.show()

# In[5]:

# Accum images into one with weights
result_image = create_empty_image_like(model)
comm.Reduce(model.data, result_image.data, root=0, op=MPI.SUM)
#f=show_image(result_image, title='Result image', cm='Greys', vmax=1.0, vmin=-0.1)
#plt.show()
if rank == 0:
    print('%d:after Reduce:    data = ' % rank)
    print(qa_image(result_image, context='Result image'))
    # test correctness
    assert (result_image.data.shape == model.data.shape)
    numpy.testing.assert_array_almost_equal_nulp(result_image.data,
#print("np.sum(gt.data): ", numpy.sum(gt.data['gain']))
blockvis = apply_gaintable(block_vis, gt)
#print("np.sum(blockvis.data): ", numpy.sum(blockvis.data['vis']))

model = create_image_from_visibility(
    block_vis,
    npixel=npixel,
    frequency=[numpy.average(frequency)],
    nchan=1,
    channel_bandwidth=[numpy.sum(channel_bandwidth)],
    cellsize=cellsize,
    phasecentre=phasecentre)

#print("model sum, min, max, shape: ", numpy.sum(model.data), numpy.amin(model.data), numpy.amax(model.data), model.shape)

print(qa_image(model, context='Blockvis model image'))
export_image_to_fits(model, '%s/imaging-blockvis_model.fits' % (results_dir))

dirty, sumwt = invert_function(predicted_vis,
                               model,
                               vis_slices=vis_slices,
                               dopsf=False,
                               context='wstack')

print(qa_image(dirty, context='Dirty image'))
export_image_to_fits(dirty, '%s/imaging-dirty.fits' % (results_dir))

deconvolved, residual, restored = ical(block_vis=blockvis,
                                       model=model,
                                       vis_slices=vis_slices,
                                       timeslice='auto',
def create_low_test_image_from_gleam(npixel=512, polarisation_frame=PolarisationFrame("stokesI"), cellsize=0.000015,
                                     frequency=numpy.array([1e8]), channel_bandwidth=numpy.array([1e6]),
                                     phasecentre=None, kind='cubic', applybeam=False, flux_limit=0.1,
                                     radius=None, insert_method='Nearest') -> Image:
    """Create LOW test image from the GLEAM survey

    Stokes I is estimated from a cubic spline fit to the measured fluxes. The polarised flux is always zero.
    
    See http://www.mwatelescope.org/science/gleam-survey The catalog is available from Vizier.
    
    VIII/100   GaLactic and Extragalactic All-sky MWA survey  (Hurley-Walker+, 2016)

    GaLactic and Extragalactic All-sky Murchison Wide Field Array (GLEAM) survey. I: A low-frequency extragalactic
    catalogue. Hurley-Walker N., et al., Mon. Not. R. Astron. Soc., 464, 1146-1167 (2017), 2017MNRAS.464.1146H

    :param npixel: Number of pixels
    :param polarisation_frame: Polarisation frame (default PolarisationFrame("stokesI"))
    :param cellsize: cellsize in radians
    :param frequency:
    :param channel_bandwidth: Channel width (Hz)
    :param phasecentre: phasecentre (SkyCoord)
    :param kind: Kind of interpolation (see scipy.interpolate.interp1d) Default: linear
    :return: Image
    
    """
    
    if phasecentre is None:
        phasecentre = SkyCoord(ra=+15.0 * u.deg, dec=-35.0 * u.deg, frame='icrs', equinox='J2000')
    
    if radius is None:
        radius = npixel * cellsize / numpy.sqrt(2.0)
    
    sc = create_low_test_skycomponents_from_gleam(flux_limit=flux_limit, polarisation_frame=polarisation_frame,
                                                  frequency=frequency, phasecentre=phasecentre,
                                                  kind=kind, radius=radius)
    if polarisation_frame is None:
        polarisation_frame = PolarisationFrame("stokesI")
    
    npol = polarisation_frame.npol
    nchan = len(frequency)
    shape = [nchan, npol, npixel, npixel]
    w = WCS(naxis=4)
    # The negation in the longitude is needed by definition of RA, DEC
    w.wcs.cdelt = [-cellsize * 180.0 / numpy.pi, cellsize * 180.0 / numpy.pi, 1.0, channel_bandwidth[0]]
    w.wcs.crpix = [npixel // 2 + 1, npixel // 2 + 1, 1.0, 1.0]
    w.wcs.ctype = ["RA---SIN", "DEC--SIN", 'STOKES', 'FREQ']
    w.wcs.crval = [phasecentre.ra.deg, phasecentre.dec.deg, 1.0, frequency[0]]
    w.naxis = 4
    w.wcs.radesys = 'ICRS'
    w.wcs.equinox = 2000.0
    
    model = create_image_from_array(numpy.zeros(shape), w, polarisation_frame=polarisation_frame)
    
    model = insert_skycomponent(model, sc, insert_method=insert_method)
    if applybeam:
        beam = create_low_test_beam(model)
        model.data[...] *= beam.data[...]
    
    log.info(qa_image(model, context='create_low_test_image_from_gleam'))
    
    return model
def main():
    """Run the workflow."""
    init_logging()

    LOG.info("Starting imaging-pipeline")

    # Read parameters
    PARFILE = 'parameters.json'
    if len(sys.argv) > 1:
       PARFILE = sys.argv[1]
    LOG.info("JSON parameter file = %s", PARFILE)
    try: 	
       with open(PARFILE, "r") as par_file:
             jspar = json.load(par_file)       
    except AssertionError as error:
       LOG.critical('ERROR %s', error)
       return

    # We will use dask
    arlexecute.set_client(get_dask_Client())
    arlexecute.run(init_logging)

    # Import visibility list from HDF5 file
    vis_list = import_blockvisibility_from_hdf5(
        '%s/%s' % (RESULTS_DIR, jspar["files"]["vis_list"]))

    # Now read the BlockVisibilities constructed using a model drawn from GLEAM
    predicted_vislist = import_blockvisibility_from_hdf5(
        '%s/%s' % (RESULTS_DIR, jspar["files"]["predicted_vis_list"]))
    corrupted_vislist = import_blockvisibility_from_hdf5(
        '%s/%s' % (RESULTS_DIR, jspar["files"]["corrupted_vis_list"]))

# Reproduce parameters from the visibility data
    ntimes = vis_list[0].nvis

    phasecentre = vis_list[0].phasecentre
    print(phasecentre)
    polframe = vis_list[0].polarisation_frame.type
    LOG.info("Polarisation Frame of vis_list: %s", polframe)

    wprojection_planes = jspar["advice"]["wprojection_planes"]
    guard_band_image   = jspar["advice"]["guard_band_image"]
    delA               = jspar["advice"]["delA"]
    advice_low = advise_wide_field(vis_list[0], guard_band_image=guard_band_image,
                                   delA=delA,
                                   wprojection_planes=wprojection_planes)
    advice_high = advise_wide_field(vis_list[-1], guard_band_image=guard_band_image,
                                    delA=delA,
                                    wprojection_planes=wprojection_planes)

    vis_slices = advice_low['vis_slices']
    npixel = advice_high['npixels2']
    cellsize = min(advice_low['cellsize'], advice_high['cellsize'])
    
# Recovering frequencies
    fstart = vis_list[0].frequency
    fend = vis_list[-1].frequency
    num_freq_win = len(vis_list)
    frequency = numpy.linspace(fstart, fend, num_freq_win)

# Recovering bandwidths
    channel_bandwidth = numpy.array(num_freq_win * [vis_list[1].frequency - vis_list[0].frequency])
    
    # Get the LSM. This is currently blank.
    model_list = [
        arlexecute.execute(create_image_from_visibility)(
            vis_list[f],
            npixel=npixel,
            frequency=[frequency[f]],
            channel_bandwidth=[channel_bandwidth[f]],
            cellsize=cellsize,
            phasecentre=phasecentre,
            polarisation_frame=PolarisationFrame(polframe))
        for f, freq in enumerate(frequency)
    ]
    # future_predicted_vislist = arlexecute.scatter(predicted_vislist)

    # Create and execute graphs to make the dirty image and PSF
    # LOG.info('About to run invert to get dirty image')
    # dirty_list = invert_component(future_predicted_vislist, model_list,
    #                               context='wstack',
    #                               vis_slices=vis_slices, dopsf=False)
    # dirty_list = arlexecute.compute(dirty_list, sync=True)

    # LOG.info('About to run invert to get PSF')
    # psf_list = invert_component(future_predicted_vislist, model_list,
    #                             context='wstack',
    #                             vis_slices=vis_slices, dopsf=True)
    # psf_list = arlexecute.compute(psf_list, sync=True)

    # Now deconvolve using msclean
    # LOG.info('About to run deconvolve')
    # deconvolve_list, _ = deconvolve_component(
    #     dirty_list, psf_list,
    #     model_imagelist=model_list,
    #     deconvolve_facets=8,
    #     deconvolve_overlap=16,
    #     deconvolve_taper='tukey',
    #     scales=[0, 3, 10],
    #     algorithm='msclean',
    #     niter=1000,
    #     fractional_threshold=0.1,
    #     threshold=0.1,
    #     gain=0.1,
    #     psf_support=64)
    # deconvolved = arlexecute.compute(deconvolve_list, sync=True)

    LOG.info('About to run continuum imaging')
    continuum_imaging_list = continuum_imaging_arlexecute(
        predicted_vislist,
        model_imagelist		= model_list,
        context			= jspar["processing"]["continuum_imaging"]["context"] , #'wstack',
        vis_slices		= vis_slices,
        scales			= jspar["processing"]["continuum_imaging"]["scales"],             #[0, 3, 10],
        algorithm		= jspar["processing"]["continuum_imaging"]["algorithm"],          #'mmclean',
        nmoment			= jspar["processing"]["continuum_imaging"]["nmoment"],            #3,
        niter			= jspar["processing"]["continuum_imaging"]["niter"],		  #1000,
        fractional_threshold	= jspar["processing"]["continuum_imaging"]["fractional_threshold"], #0.1,
        threshold		= jspar["processing"]["continuum_imaging"]["threshold"], 	  #0.1,
        nmajor			= jspar["processing"]["continuum_imaging"]["nmajor"], 		  #5,
        gain			= jspar["processing"]["continuum_imaging"]["gain"],               #0.25,
        deconvolve_facets	= jspar["processing"]["continuum_imaging"]["deconvolve_facets"],  #8,
        deconvolve_overlap	= jspar["processing"]["continuum_imaging"]["deconvolve_overlap"], #16,
        deconvolve_taper	= jspar["processing"]["continuum_imaging"]["deconvolve_taper"],   #'tukey',
        psf_support		= jspar["processing"]["continuum_imaging"]["psf_support"] )        #64)
    result = arlexecute.compute(continuum_imaging_list, sync=True)
    deconvolved = result[0][0]
    residual = result[1][0]
    restored = result[2][0]

    print(qa_image(deconvolved, context='Clean image - no selfcal'))
    print(qa_image(restored, context='Restored clean image - no selfcal'))
    export_image_to_fits(restored,
                         '%s/%s' % (RESULTS_DIR, jspar["files"]["continuum_imaging_restored"]))

    print(qa_image(residual[0], context='Residual clean image - no selfcal'))
    export_image_to_fits(residual[0],
                         '%s/%s' % (RESULTS_DIR, jspar["files"]["continuum_imaging_residual"]))

    controls = create_calibration_controls()

    controls['T']['first_selfcal'] = jspar["processing"]["controls"]["T"]["first_selfcal"]
    controls['G']['first_selfcal'] = jspar["processing"]["controls"]["G"]["first_selfcal"]
    controls['B']['first_selfcal'] = jspar["processing"]["controls"]["B"]["first_selfcal"]

    controls['T']['timescale'] = jspar["processing"]["controls"]["T"]["timescale"]
    controls['G']['timescale'] = jspar["processing"]["controls"]["G"]["timescale"]
    controls['B']['timescale'] = jspar["processing"]["controls"]["B"]["timescale"]

    PP.pprint(controls)

    future_corrupted_vislist = arlexecute.scatter(corrupted_vislist)
    ical_list = ical_arlexecute(future_corrupted_vislist,
        model_imagelist		= model_list,
        context			= jspar["processing"]["ical"]["context"] , 		#'wstack',
        calibration_context	= jspar["processing"]["ical"]["calibration_context"] , 	#'TG',
        controls		= controls,
        vis_slices		= ntimes,
        scales			= jspar["processing"]["ical"]["scales"],             	#[0, 3, 10],
        timeslice		= jspar["processing"]["ical"]["timeslice"],          	#'auto',
        algorithm		= jspar["processing"]["ical"]["algorithm"],          	#'mmclean',
        nmoment			= jspar["processing"]["ical"]["nmoment"],            	#3,
        niter			= jspar["processing"]["ical"]["niter"],		  	#1000,
        fractional_threshold	= jspar["processing"]["ical"]["fractional_threshold"], 	#0.1,
        threshold		= jspar["processing"]["ical"]["threshold"], 	  	#0.1,
        nmajor			= jspar["processing"]["ical"]["nmajor"], 		#5,
        gain			= jspar["processing"]["ical"]["gain"],               	#0.25,
        deconvolve_facets	= jspar["processing"]["ical"]["deconvolve_facets"],  	#8,
        deconvolve_overlap	= jspar["processing"]["ical"]["deconvolve_overlap"], 	#16,
        deconvolve_taper	= jspar["processing"]["ical"]["deconvolve_taper"],   	#'tukey',
        global_solution		= jspar["processing"]["ical"]["global_solution"],    	#False,
        do_selfcal		= jspar["processing"]["ical"]["do_selfcal"],         	#True,
        psf_support		= jspar["processing"]["ical"]["psf_support"])         	#64


    LOG.info('About to run ical')
    result = arlexecute.compute(ical_list, sync=True)
    deconvolved = result[0][0]
    residual = result[1][0]
    restored = result[2][0]

    print(qa_image(deconvolved, context='Clean image'))
    print(qa_image(restored, context='Restored clean image'))
    export_image_to_fits(restored, '%s/%s' % (RESULTS_DIR, jspar["files"]["ical_restored"]))

    print(qa_image(residual[0], context='Residual clean image'))
    export_image_to_fits(residual[0], '%s/%s' % (RESULTS_DIR, jspar["files"]["ical_residual"]))

    arlexecute.close()
    vis_list = arlexecute.persist(vis_list)

    cellsize = 0.001
    npixel = 1024
    pol_frame = PolarisationFrame("stokesI")

    model_list = [
        arlexecute.execute(create_image_from_visibility)(
            v, npixel=npixel, cellsize=cellsize, polarisation_frame=pol_frame)
        for v in vis_list
    ]

    model_list = arlexecute.persist(model_list)

    dirty_list = invert_list_arlexecute_workflow(
        vis_list,
        template_model_imagelist=model_list,
        context='wstack',
        vis_slices=51)

    log.info('About to run invert_list_arlexecute_workflow')
    result = arlexecute.compute(dirty_list, sync=True)
    dirty, sumwt = result[centre]

    arlexecute.close()

    show_image(dirty, title='Dirty image', cm='Greys', vmax=0.1, vmin=-0.01)
    print(qa_image(dirty, context='Dirty image'))
    export_image_to_fits(
        dirty, '%s/ska-imaging_arlexecute_dirty.fits' % (results_dir))
    frequency = numpy.array([1e8])
    phasecentre = SkyCoord(ra=+15.0 * u.deg,
                           dec=-35.0 * u.deg,
                           frame='icrs',
                           equinox='J2000')
    model = create_test_image(frequency=frequency,
                              phasecentre=phasecentre,
                              cellsize=0.001,
                              polarisation_frame=PolarisationFrame('stokesI'))

    # Rank 0 scatters the test image
    if rank == 0:
        subimages = image_scatter_facets(model, facets=facets)
        subimages = numpy.array_split(subimages, size)
    else:
        subimages = list()

    sublist = comm.scatter(subimages, root=0)

    root_images = imagerooter(sublist)

    roots = comm.gather(root_images, root=0)

    if rank == 0:
        results = sum(roots, [])
        root_model = create_empty_image_like(model)
        result = image_gather_facets(results, root_model, facets=facets)
        numpy.testing.assert_array_almost_equal_nulp(result.data**2,
                                                     numpy.abs(model.data), 7)
        print(qa_image(result))
Esempio n. 14
0
def trial_case(results,
               seed=180555,
               context='wstack',
               nworkers=8,
               threads_per_worker=1,
               memory=8,
               processes=True,
               order='frequency',
               nfreqwin=7,
               ntimes=3,
               rmax=750.0,
               facets=1,
               wprojection_planes=1,
               use_dask=True,
               use_serial=False):
    """ Single trial for performance-timings
    
    Simulates visibilities from GLEAM including phase errors
    Makes dirty image and PSF
    Runs ICAL pipeline
    
    The results are in a dictionary:
    
    'context': input - a string describing concisely the purpose of the test
    'time overall',  overall execution time (s)
    'time create gleam', time to create GLEAM prediction graph
    'time predict', time to execute GLEAM prediction graph
    'time corrupt', time to corrupt data_models
    'time invert', time to make dirty image
    'time psf invert', time to make PSF
    'time ICAL graph', time to create ICAL graph
    'time ICAL', time to execute ICAL graph
    'context', type of imaging e.g. 'wstack'
    'nworkers', number of workers to create
    'threads_per_worker',
    'nnodes', Number of nodes,
    'processes', 'order', Ordering of data_models
    'nfreqwin', Number of frequency windows in simulation
    'ntimes', Number of hour angles in simulation
    'rmax', Maximum radius of stations used in simulation (m)
    'facets', Number of facets in deconvolution and imaging
    'wprojection_planes', Number of wprojection planes
    'vis_slices', Number of visibility slices (per Visibbility)
    'npixel', Number of pixels in image
    'cellsize', Cellsize in radians
    'seed', Random number seed
    'dirty_max', Maximum in dirty image
    'dirty_min', Minimum in dirty image
    'psf_max',
    'psf_min',
    'restored_max',
    'restored_min',
    'deconvolved_max',
    'deconvolved_min',
    'residual_max',
    'residual_min',
    'git_info', GIT hash (not definitive since local mods are possible)
    
    :param results: Initial state
    :param seed: Random number seed (used in gain simulations)
    :param context: imaging context
    :param context: Type of context: '2d'|'timeslice'|'wstack'
    :param nworkers: Number of dask workers to use
    :param threads_per_worker: Number of threads per worker
    :param processes: Use processes instead of threads 'processes'|'threads'
    :param order: See simulate_list_list_arlexecute_workflow_workflowkflow
    :param nfreqwin: See simulate_list_list_arlexecute_workflow_workflowkflow
    :param ntimes: See simulate_list_list_arlexecute_workflow_workflowkflow
    :param rmax: See simulate_list_list_arlexecute_workflow_workflowkflow
    :param facets: Number of facets to use
    :param wprojection_planes: Number of wprojection planes to use
    :param use_dask: Use dask or immediate evaluation
    :param kwargs:
    :return: results dictionary
    """

    numpy.random.seed(seed)
    results['seed'] = seed

    start_all = time.time()

    results['context'] = context
    results['hostname'] = socket.gethostname()
    results['git_hash'] = git_hash()
    results['epoch'] = time.strftime("%Y-%m-%d %H:%M:%S")

    zerow = False
    print("Context is %s" % context)

    results['nworkers'] = nworkers
    results['threads_per_worker'] = threads_per_worker
    results['processes'] = processes
    results['memory'] = memory
    results['order'] = order
    results['nfreqwin'] = nfreqwin
    results['ntimes'] = ntimes
    results['rmax'] = rmax
    results['facets'] = facets
    results['wprojection_planes'] = wprojection_planes

    results['use_dask'] = use_dask

    print("At start, configuration is {0!r}".format(results))

    # Parameters determining scale
    frequency = numpy.linspace(0.8e8, 1.2e8, nfreqwin)
    centre = nfreqwin // 2
    if nfreqwin > 1:
        channel_bandwidth = numpy.array(nfreqwin *
                                        [frequency[1] - frequency[0]])
    else:
        channel_bandwidth = numpy.array([1e6])
    times = numpy.linspace(-numpy.pi / 3.0, numpy.pi / 3.0, ntimes)

    phasecentre = SkyCoord(ra=+30.0 * u.deg,
                           dec=-60.0 * u.deg,
                           frame='icrs',
                           equinox='J2000')

    if use_dask:
        client = get_dask_Client(threads_per_worker=threads_per_worker,
                                 memory_limit=memory * 1024 * 1024 * 1024,
                                 n_workers=nworkers)
        arlexecute.set_client(client)
        nodes = findNodes(arlexecute.client)
        unodes = list(numpy.unique(nodes))
        results['nnodes'] = len(unodes)
        print("Defined %d workers on %d nodes" % (nworkers, results['nnodes']))
        print("Workers are: %s" % str(nodes))
    else:
        arlexecute.set_client(use_dask=use_dask)
        results['nnodes'] = 1
        unodes = None

    vis_list = simulate_list_arlexecute_workflow(
        'LOWBD2',
        frequency=frequency,
        channel_bandwidth=channel_bandwidth,
        times=times,
        phasecentre=phasecentre,
        order=order,
        format='blockvis',
        rmax=rmax)

    print("****** Visibility creation ******")
    vis_list = arlexecute.persist(vis_list)

    # Find the best imaging parameters but don't bring the vis_list back here
    def get_wf(bv):
        v = convert_blockvisibility_to_visibility(bv)
        return advise_wide_field(v,
                                 guard_band_image=6.0,
                                 delA=0.02,
                                 facets=facets,
                                 wprojection_planes=wprojection_planes,
                                 oversampling_synthesised_beam=4.0)

    wprojection_planes = 1
    advice = arlexecute.compute(arlexecute.execute(get_wf)(vis_list[0]),
                                sync=True)

    npixel = advice['npixels2']
    cellsize = advice['cellsize']

    if context == 'timeslice':
        vis_slices = ntimes
        print("Using timeslice with %d slices" % vis_slices)
    elif context == '2d':
        vis_slices = 1
    else:
        context = 'wstack'
        vis_slices = 5 * advice['vis_slices']
        print("Using wstack with %d slices" % vis_slices)

    results['vis_slices'] = vis_slices
    results['cellsize'] = cellsize
    results['npixel'] = npixel

    gleam_model_list = [
        arlexecute.execute(create_low_test_image_from_gleam)(
            npixel=npixel,
            frequency=[frequency[f]],
            channel_bandwidth=[channel_bandwidth[f]],
            cellsize=cellsize,
            phasecentre=phasecentre,
            polarisation_frame=PolarisationFrame("stokesI"),
            flux_limit=0.3,
            applybeam=True) for f, freq in enumerate(frequency)
    ]

    start = time.time()
    print("****** Starting GLEAM model creation ******")
    gleam_model_list = arlexecute.compute(gleam_model_list, sync=True)
    cmodel = smooth_image(gleam_model_list[centre])
    export_image_to_fits(cmodel,
                         "pipelines-timings-arlexecute-gleam_cmodel.fits")
    end = time.time()
    results['time create gleam'] = end - start
    print("Creating GLEAM model took %.2f seconds" % (end - start))

    gleam_model_list = arlexecute.scatter(gleam_model_list)
    vis_list = predict_list_arlexecute_workflow(vis_list,
                                                gleam_model_list,
                                                vis_slices=vis_slices,
                                                context=context)
    start = time.time()
    print("****** Starting GLEAM model visibility prediction ******")
    vis_list = arlexecute.compute(vis_list, sync=True)

    end = time.time()
    results['time predict'] = end - start
    print("GLEAM model Visibility prediction took %.2f seconds" %
          (end - start))

    # Corrupt the visibility for the GLEAM model
    print("****** Visibility corruption ******")
    vis_list = corrupt_list_arlexecute_workflow(vis_list, phase_error=1.0)
    start = time.time()
    vis_list = arlexecute.compute(vis_list, sync=True)

    vis_list = arlexecute.scatter(vis_list)

    end = time.time()
    results['time corrupt'] = end - start
    print("Visibility corruption took %.2f seconds" % (end - start))

    # Create an empty model image
    model_list = [
        arlexecute.execute(create_image_from_visibility)(
            vis_list[f],
            npixel=npixel,
            cellsize=cellsize,
            frequency=[frequency[f]],
            channel_bandwidth=[channel_bandwidth[f]],
            polarisation_frame=PolarisationFrame("stokesI"))
        for f, freq in enumerate(frequency)
    ]
    model_list = arlexecute.compute(model_list, sync=True)
    model_list = arlexecute.scatter(model_list)

    psf_list = invert_list_arlexecute_workflow(vis_list,
                                               model_list,
                                               vis_slices=vis_slices,
                                               context=context,
                                               facets=facets,
                                               dopsf=True)
    start = time.time()
    print("****** Starting PSF calculation ******")
    psf, sumwt = arlexecute.compute(psf_list, sync=True)[centre]
    end = time.time()
    results['time psf invert'] = end - start
    print("PSF invert took %.2f seconds" % (end - start))

    results['psf_max'] = qa_image(psf).data['max']
    results['psf_min'] = qa_image(psf).data['min']

    dirty_list = invert_list_arlexecute_workflow(vis_list,
                                                 model_list,
                                                 vis_slices=vis_slices,
                                                 context=context,
                                                 facets=facets)
    start = time.time()
    print("****** Starting dirty image calculation ******")
    dirty, sumwt = arlexecute.compute(dirty_list, sync=True)[centre]
    end = time.time()
    results['time invert'] = end - start
    print("Dirty image invert took %.2f seconds" % (end - start))
    print("Maximum in dirty image is ", numpy.max(numpy.abs(dirty.data)),
          ", sumwt is ", sumwt)
    qa = qa_image(dirty)
    results['dirty_max'] = qa.data['max']
    results['dirty_min'] = qa.data['min']

    # Create the ICAL pipeline to run 5 major cycles, starting selfcal at cycle 1. A global solution across all
    # frequencies (i.e. Visibilities) is performed.
    start = time.time()
    print("****** Starting ICAL ******")

    controls = create_calibration_controls()

    controls['T']['first_selfcal'] = 1
    controls['G']['first_selfcal'] = 3
    controls['B']['first_selfcal'] = 4

    controls['T']['timescale'] = 'auto'
    controls['G']['timescale'] = 'auto'
    controls['B']['timescale'] = 1e5

    if nfreqwin > 6:
        nmoment = 3
        algorithm = 'mmclean'
    elif nfreqwin > 2:
        nmoment = 2
        algorithm = 'mmclean'
    else:
        nmoment = 1
        algorithm = 'msclean'

    start = time.time()
    ical_list = ical_list_arlexecute_workflow(vis_list,
                                              model_imagelist=model_list,
                                              context='wstack',
                                              calibration_context='TG',
                                              controls=controls,
                                              scales=[0, 3, 10],
                                              algorithm=algorithm,
                                              nmoment=nmoment,
                                              niter=1000,
                                              fractional_threshold=0.1,
                                              threshold=0.1,
                                              nmajor=5,
                                              gain=0.25,
                                              vis_slices=vis_slices,
                                              timeslice='auto',
                                              global_solution=False,
                                              psf_support=64,
                                              do_selfcal=True)

    end = time.time()
    results['time ICAL graph'] = end - start
    print("Construction of ICAL graph took %.2f seconds" % (end - start))

    # Execute the graph
    start = time.time()
    result = arlexecute.compute(ical_list, sync=True)
    deconvolved, residual, restored = result
    end = time.time()

    results['time ICAL'] = end - start
    print("ICAL graph execution took %.2f seconds" % (end - start))
    qa = qa_image(deconvolved[centre])
    results['deconvolved_max'] = qa.data['max']
    results['deconvolved_min'] = qa.data['min']
    export_image_to_fits(deconvolved[centre],
                         "pipelines-timings-arlexecute-ical_deconvolved.fits")

    qa = qa_image(residual[centre][0])
    results['residual_max'] = qa.data['max']
    results['residual_min'] = qa.data['min']
    export_image_to_fits(residual[centre][0],
                         "pipelines-timings-arlexecute-ical_residual.fits")

    qa = qa_image(restored[centre])
    results['restored_max'] = qa.data['max']
    results['restored_min'] = qa.data['min']
    export_image_to_fits(restored[centre],
                         "pipelines-timings-arlexecute-ical_restored.fits")
    #
    arlexecute.close()

    end_all = time.time()
    results['time overall'] = end_all - start_all

    print("At end, results are {0!r}".format(results))

    return results
def main(args):
    """
    Initialising launch sequence.
    """
    # ------------------------------------------------------
    # Print some stuff to show that the code is running:
    print("")
    os.system(
        "printf 'A demonstration of a \033[5mDPrepB/DPrepC\033[m SDP pipeline\n'"
    )
    print("")
    # Set the directory for the moment images:
    MOMENTS_DIR = args.outputs + '/MOMENTS'
    # Check that the output directories exist, if not then create:
    os.makedirs(args.outputs, exist_ok=True)
    os.makedirs(MOMENTS_DIR, exist_ok=True)
    # Set the polarisation definition of the instrument:
    POLDEF = init_inst(args.inst)

    # Setup Variables for SIP services
    # ------------------------------------------------------
    # Define the Queue Producer settings:
    if args.queues:
        queue_settings = {
            'bootstrap.servers': 'scheduler:9092',
            'message.max.bytes': 100000000
        }  #10.60.253.31:9092

    # Setup the Confluent Kafka Queue
    # ------------------------------------------------------
    if args.queues:
        from confluent_kafka import Producer
        import pickle
        # Create an SDP queue:
        sip_queue = Producer(queue_settings)

    # Define a Data Array Format
    # ------------------------------------------------------
    def gen_data(channel):
        return np.array([
            vis1[channel], vis2[channel], channel, None, None, False, False,
            args.plots,
            float(args.uvcut),
            float(args.pixels), POLDEF, args.outputs,
            float(args.angres), None, None, None, None, None, None, args.twod,
            npixel_advice, cell_advice
        ])

    # Setup the Dask Cluster
    # ------------------------------------------------------
    starttime = t.time()

    dask.config.set(get=dask.distributed.Client.get)
    client = Client(
        args.daskaddress)  # scheduler for Docker container, localhost for P3.

    print("Dask Client details:")
    print(client)
    print("")

    # Define channel range for 1 subband, each containing 40 channels:
    channel_range = np.array(range(int(args.channels)))

    # Load the data into memory:
    """
    The input data should be interfaced with Buffer Management.
    """
    print("Loading data:")
    print("")
    vis1 = [
        load('%s/%s' % (args.inputs, args.ms1), range(channel, channel + 1),
             POLDEF) for channel in range(0, int(args.channels))
    ]
    vis2 = [
        load('%s/%s' % (args.inputs, args.ms2), range(channel, channel + 1),
             POLDEF) for channel in range(0, int(args.channels))
    ]

    # Prepare Measurement Set
    # ------------------------------------------------------
    # Combine MSSS snapshots:
    vis_advice = append_visibility(vis1[0], vis2[0])

    # Apply a uv-distance cut to the data:
    vis_advice = uv_cut(vis_advice, float(args.uvcut))
    npixel_advice, cell_advice = uv_advice(vis_advice, float(args.uvcut),
                                           float(args.pixels))

    # Begin imaging via the Dask cluster
    # ------------------------------------------------------
    # Submit data for each channel to the client, and return an image:

    # Scatter all the data in advance to all the workers:
    """
    The data here could be passed via Data Queues.
    Queues may not be ideal. Data throughput challenges.
    Need to think more about the optimum approach.
    """
    print("Scatter data to workers:")
    print("")
    big_job = [client.scatter(gen_data(channel)) for channel in channel_range]

    # Submit jobs to the cluster and create a list of futures:
    futures = [
        client.submit(dprepb_imaging, big_job[channel], pure=False, retries=3)
        for channel in channel_range
    ]
    """
    The dprepb_imaging function could generate QA, logging, and pass this information via Data Queues.
    Queues work well for this.
    Python logging calls are preferable. Send them to a text file on the node.
    Run another service that watches that file. Or just read from standard out.
    The Dockerisation will assist with logs.
    """

    print("Imaging on workers:")
    # Watch progress:
    progress(futures)

    # Wait until all futures are complete:
    wait(futures)

    # Check that no futures have errors, if so resubmit:
    for future in futures:
        if future.status == 'error':
            print("ERROR: Future", future, "has 'error' status, as:")
            print(client.recreate_error_locally(future))
            print("Rerunning...")
            print("")
            index = futures.index(future)
            futures[index].cancel()
            futures[index] = client.submit(dprepb_imaging,
                                           big_job[index],
                                           pure=False,
                                           retries=3)

    # Wait until all futures are complete:
    wait(futures)

    # Gather results from the futures:
    results = client.gather(futures, errors='raise')

    # Run QA on ARL objects and produce to queue:
    if args.queues:
        print("Adding QA to queue:")
        for result in results:
            sip_queue.produce('qa', pickle.dumps(qa_image(result), protocol=2))

        sip_queue.flush()

    # Return the data element of each ARL object, as a Dask future:
    futures = [
        client.submit(arl_data_future, result, pure=False, retries=3)
        for result in results
    ]

    progress(futures)

    wait(futures)

    # Calculate the Moment images
    # ------------------------------------------------------
    # Now use 'distributed Dask arrays' in order to parallelise the Moment image calculation:
    # Construct a small Dask array for every future:
    print("")
    print("Calculating Moment images:")
    print("")
    arrays = [
        da.from_delayed(future,
                        dtype=np.dtype('float64'),
                        shape=(1, 4, 512, 512)) for future in futures
    ]

    # Stack all small Dask arrays into one:
    stack = da.stack(arrays, axis=0)

    # Combine chunks to reduce overhead - is initially (40, 1, 4, 512, 512):
    stack = stack.rechunk((1, 1, 4, 64, 64))

    # Spread the data around on the cluster:
    stack = client.persist(stack)
    # Data is now coordinated by the single logical Dask array, 'stack'.

    # Save the Moment images:
    """
    The output moment images should be interfaced with Buffer Management.
    
    Need to know more about the Buffer specification.
    Related to initial data distribution also/staging.
    """
    print("Saving Moment images to disk:")
    print("")
    # First generate a template:
    image_template = import_image_from_fits('%s/imaging_dirty_WStack-%s.fits' %
                                            (args.outputs, 0))

    # Output mean images:
    # I:
    image_template.data = stack[:, :, 0, :, :].mean(axis=0).compute()
    # Run QA on ARL objects and produce to queue:
    if args.queues:
        sip_queue.produce('qa',
                          pickle.dumps(qa_image(image_template), protocol=2))
    # Export the data to disk:
    export_image_to_fits(image_template,
                         '%s/Mean-%s.fits' % (MOMENTS_DIR, 'I'))

    # Q:
    image_template.data = stack[:, :, 1, :, :].mean(axis=0).compute()
    # Run QA on ARL objects and produce to queue:
    if args.queues:
        sip_queue.produce('qa',
                          pickle.dumps(qa_image(image_template), protocol=2))
    # Export the data to disk:
    export_image_to_fits(image_template,
                         '%s/Mean-%s.fits' % (MOMENTS_DIR, 'Q'))

    # U:
    image_template.data = stack[:, :, 2, :, :].mean(axis=0).compute()
    # Run QA on ARL objects and produce to queue:
    if args.queues:
        sip_queue.produce('qa',
                          pickle.dumps(qa_image(image_template), protocol=2))
    # Export the data to disk:
    export_image_to_fits(image_template,
                         '%s/Mean-%s.fits' % (MOMENTS_DIR, 'U'))

    # P:
    image_template.data = da.sqrt(
        (da.square(stack[:, :, 1, :, :]) +
         da.square(stack[:, :, 2, :, :]))).mean(axis=0).compute()
    # Run QA on ARL objects and produce to queue:
    if args.queues:
        sip_queue.produce('qa',
                          pickle.dumps(qa_image(image_template), protocol=2))
    # Export the data to disk:
    export_image_to_fits(image_template,
                         '%s/Mean-%s.fits' % (MOMENTS_DIR, 'P'))

    # Output standard deviation images:
    # I:
    image_template.data = stack[:, :, 0, :, :].std(axis=0).compute()
    # Run QA on ARL objects and produce to queue:
    if args.queues:
        sip_queue.produce('qa',
                          pickle.dumps(qa_image(image_template), protocol=2))
    # Export the data to disk:
    export_image_to_fits(image_template, '%s/Std-%s.fits' % (MOMENTS_DIR, 'I'))

    # Q:
    image_template.data = stack[:, :, 1, :, :].std(axis=0).compute()
    # Run QA on ARL objects and produce to queue:
    if args.queues:
        sip_queue.produce('qa',
                          pickle.dumps(qa_image(image_template), protocol=2))
    # Export the data to disk:
    export_image_to_fits(image_template, '%s/Std-%s.fits' % (MOMENTS_DIR, 'Q'))

    # U:
    image_template.data = stack[:, :, 2, :, :].std(axis=0).compute()
    # Run QA on ARL objects and produce to queue:
    if args.queues:
        sip_queue.produce('qa',
                          pickle.dumps(qa_image(image_template), protocol=2))
    # Export the data to disk:
    export_image_to_fits(image_template, '%s/Std-%s.fits' % (MOMENTS_DIR, 'U'))

    # P:
    image_template.data = da.sqrt(
        (da.square(stack[:, :, 1, :, :]) +
         da.square(stack[:, :, 2, :, :]))).std(axis=0).compute()
    # Run QA on ARL objects and produce to queue:
    if args.queues:
        sip_queue.produce('qa',
                          pickle.dumps(qa_image(image_template), protocol=2))
    # Export the data to disk:
    export_image_to_fits(image_template, '%s/Std-%s.fits' % (MOMENTS_DIR, 'P'))

    # Flush queue:
    if args.queues:
        sip_queue.flush()

    # Make a tarball of moment images:
    subprocess.call([
        'tar', '-cvf',
        '%s/moment.tar' % (MOMENTS_DIR),
        '%s/' % (MOMENTS_DIR)
    ])
    subprocess.call(['gzip', '-9f', '%s/moment.tar' % (MOMENTS_DIR)])

    endtime = t.time()
    print(endtime - starttime)
Esempio n. 16
0
def main():
    """Run the workflow."""
    init_logging()

    LOG.info("Starting imaging-pipeline")

    # Read parameters
    PARFILE = 'parameters.json'
    if len(sys.argv) > 1:
        PARFILE = sys.argv[1]
    LOG.info("JSON parameter file = %s", PARFILE)
    try:
        with open(PARFILE, "r") as par_file:
            jspar = json.load(par_file)
    except AssertionError as error:
        LOG.critical('ERROR %s', error)
        return

    # We will use dask
    arlexecute.set_client(get_dask_Client())
    arlexecute.run(init_logging)

    # Import visibility list from HDF5 file
    vis_list = import_blockvisibility_from_hdf5(
        '%s/%s' % (RESULTS_DIR, jspar["files"]["vis_list"]))

    # Now read the BlockVisibilities constructed using a model drawn from GLEAM
    predicted_vislist = import_blockvisibility_from_hdf5(
        '%s/%s' % (RESULTS_DIR, jspar["files"]["predicted_vis_list"]))
    corrupted_vislist = import_blockvisibility_from_hdf5(
        '%s/%s' % (RESULTS_DIR, jspar["files"]["corrupted_vis_list"]))

    # Reproduce parameters from the visibility data
    ntimes = vis_list[0].nvis

    phasecentre = vis_list[0].phasecentre
    print(phasecentre)
    polframe = vis_list[0].polarisation_frame.type
    LOG.info("Polarisation Frame of vis_list: %s", polframe)

    wprojection_planes = jspar["advice"]["wprojection_planes"]
    guard_band_image = jspar["advice"]["guard_band_image"]
    delA = jspar["advice"]["delA"]
    advice_low = advise_wide_field(vis_list[0],
                                   guard_band_image=guard_band_image,
                                   delA=delA,
                                   wprojection_planes=wprojection_planes)
    advice_high = advise_wide_field(vis_list[-1],
                                    guard_band_image=guard_band_image,
                                    delA=delA,
                                    wprojection_planes=wprojection_planes)

    vis_slices = advice_low['vis_slices']
    npixel = advice_high['npixels2']
    cellsize = min(advice_low['cellsize'], advice_high['cellsize'])

    # Recovering frequencies
    fstart = vis_list[0].frequency
    fend = vis_list[-1].frequency
    num_freq_win = len(vis_list)
    frequency = numpy.linspace(fstart, fend, num_freq_win)

    # Recovering bandwidths
    channel_bandwidth = numpy.array(
        num_freq_win * [vis_list[1].frequency - vis_list[0].frequency])

    # Get the LSM. This is currently blank.
    model_list = [
        arlexecute.execute(create_image_from_visibility)(
            vis_list[f],
            npixel=npixel,
            frequency=[frequency[f]],
            channel_bandwidth=[channel_bandwidth[f]],
            cellsize=cellsize,
            phasecentre=phasecentre,
            polarisation_frame=PolarisationFrame(polframe))
        for f, freq in enumerate(frequency)
    ]
    # future_predicted_vislist = arlexecute.scatter(predicted_vislist)

    # Create and execute graphs to make the dirty image and PSF
    # LOG.info('About to run invert to get dirty image')
    # dirty_list = invert_component(future_predicted_vislist, model_list,
    #                               context='wstack',
    #                               vis_slices=vis_slices, dopsf=False)
    # dirty_list = arlexecute.compute(dirty_list, sync=True)

    # LOG.info('About to run invert to get PSF')
    # psf_list = invert_component(future_predicted_vislist, model_list,
    #                             context='wstack',
    #                             vis_slices=vis_slices, dopsf=True)
    # psf_list = arlexecute.compute(psf_list, sync=True)

    # Now deconvolve using msclean
    # LOG.info('About to run deconvolve')
    # deconvolve_list, _ = deconvolve_component(
    #     dirty_list, psf_list,
    #     model_imagelist=model_list,
    #     deconvolve_facets=8,
    #     deconvolve_overlap=16,
    #     deconvolve_taper='tukey',
    #     scales=[0, 3, 10],
    #     algorithm='msclean',
    #     niter=1000,
    #     fractional_threshold=0.1,
    #     threshold=0.1,
    #     gain=0.1,
    #     psf_support=64)
    # deconvolved = arlexecute.compute(deconvolve_list, sync=True)

    LOG.info('About to run continuum imaging')
    continuum_imaging_list = continuum_imaging_arlexecute(
        predicted_vislist,
        model_imagelist=model_list,
        context=jspar["processing"]["continuum_imaging"]
        ["context"],  #'wstack',
        vis_slices=vis_slices,
        scales=jspar["processing"]["continuum_imaging"]
        ["scales"],  #[0, 3, 10],
        algorithm=jspar["processing"]["continuum_imaging"]
        ["algorithm"],  #'mmclean',
        nmoment=jspar["processing"]["continuum_imaging"]["nmoment"],  #3,
        niter=jspar["processing"]["continuum_imaging"]["niter"],  #1000,
        fractional_threshold=jspar["processing"]["continuum_imaging"]
        ["fractional_threshold"],  #0.1,
        threshold=jspar["processing"]["continuum_imaging"]["threshold"],  #0.1,
        nmajor=jspar["processing"]["continuum_imaging"]["nmajor"],  #5,
        gain=jspar["processing"]["continuum_imaging"]["gain"],  #0.25,
        deconvolve_facets=jspar["processing"]["continuum_imaging"]
        ["deconvolve_facets"],  #8,
        deconvolve_overlap=jspar["processing"]["continuum_imaging"]
        ["deconvolve_overlap"],  #16,
        deconvolve_taper=jspar["processing"]["continuum_imaging"]
        ["deconvolve_taper"],  #'tukey',
        psf_support=jspar["processing"]["continuum_imaging"]
        ["psf_support"])  #64)
    result = arlexecute.compute(continuum_imaging_list, sync=True)
    deconvolved = result[0][0]
    residual = result[1][0]
    restored = result[2][0]

    print(qa_image(deconvolved, context='Clean image - no selfcal'))
    print(qa_image(restored, context='Restored clean image - no selfcal'))
    export_image_to_fits(
        restored,
        '%s/%s' % (RESULTS_DIR, jspar["files"]["continuum_imaging_restored"]))

    print(qa_image(residual[0], context='Residual clean image - no selfcal'))
    export_image_to_fits(
        residual[0],
        '%s/%s' % (RESULTS_DIR, jspar["files"]["continuum_imaging_residual"]))

    controls = create_calibration_controls()

    controls['T']['first_selfcal'] = jspar["processing"]["controls"]["T"][
        "first_selfcal"]
    controls['G']['first_selfcal'] = jspar["processing"]["controls"]["G"][
        "first_selfcal"]
    controls['B']['first_selfcal'] = jspar["processing"]["controls"]["B"][
        "first_selfcal"]

    controls['T']['timescale'] = jspar["processing"]["controls"]["T"][
        "timescale"]
    controls['G']['timescale'] = jspar["processing"]["controls"]["G"][
        "timescale"]
    controls['B']['timescale'] = jspar["processing"]["controls"]["B"][
        "timescale"]

    PP.pprint(controls)

    future_corrupted_vislist = arlexecute.scatter(corrupted_vislist)
    ical_list = ical_arlexecute(
        future_corrupted_vislist,
        model_imagelist=model_list,
        context=jspar["processing"]["ical"]["context"],  #'wstack',
        calibration_context=jspar["processing"]["ical"]
        ["calibration_context"],  #'TG',
        controls=controls,
        vis_slices=ntimes,
        scales=jspar["processing"]["ical"]["scales"],  #[0, 3, 10],
        timeslice=jspar["processing"]["ical"]["timeslice"],  #'auto',
        algorithm=jspar["processing"]["ical"]["algorithm"],  #'mmclean',
        nmoment=jspar["processing"]["ical"]["nmoment"],  #3,
        niter=jspar["processing"]["ical"]["niter"],  #1000,
        fractional_threshold=jspar["processing"]["ical"]
        ["fractional_threshold"],  #0.1,
        threshold=jspar["processing"]["ical"]["threshold"],  #0.1,
        nmajor=jspar["processing"]["ical"]["nmajor"],  #5,
        gain=jspar["processing"]["ical"]["gain"],  #0.25,
        deconvolve_facets=jspar["processing"]["ical"]
        ["deconvolve_facets"],  #8,
        deconvolve_overlap=jspar["processing"]["ical"]
        ["deconvolve_overlap"],  #16,
        deconvolve_taper=jspar["processing"]["ical"]
        ["deconvolve_taper"],  #'tukey',
        global_solution=jspar["processing"]["ical"]
        ["global_solution"],  #False,
        do_selfcal=jspar["processing"]["ical"]["do_selfcal"],  #True,
        psf_support=jspar["processing"]["ical"]["psf_support"])  #64

    LOG.info('About to run ical')
    result = arlexecute.compute(ical_list, sync=True)
    deconvolved = result[0][0]
    residual = result[1][0]
    restored = result[2][0]

    print(qa_image(deconvolved, context='Clean image'))
    print(qa_image(restored, context='Restored clean image'))
    export_image_to_fits(
        restored, '%s/%s' % (RESULTS_DIR, jspar["files"]["ical_restored"]))

    print(qa_image(residual[0], context='Residual clean image'))
    export_image_to_fits(
        residual[0], '%s/%s' % (RESULTS_DIR, jspar["files"]["ical_residual"]))

    arlexecute.close()