コード例 #1
0
def load_im_data(results_dir):
    """Load the full-Stokes images into memory.
    
    Args:
    results_dir (str): directory to save results.
    
    Returns:
    image_temp: ARL image data.
    frequency: array of observed frequencies in Hz.
    weights: array of weights per channel (1/sigma**2).
    """
    try:
        if os.path.isdir(results_dir):
            try:
                # Load the channel 0 data as an image template:
                image_temp = import_image_from_fits('%s/imaging_clean_WStack-%s.fits'
                                                    % (results_dir, 0))
                # Fill image_temp with the multi-frequency data:
                image_temp.data = np.concatenate(([import_image_from_fits(
                                                   '%s/imaging_clean_WStack-%s.fits'
                                                    % (results_dir, channel)).data
                                                    for channel in range(0, 40)]))
                # Read the array of the channel frequencies:
                frequency = image_temp.frequency
                # Calculate the weights, in the form [channel, stokes, npix, npix]:
                # Initially using std for weights, should consider more robust options.
                weights = np.array([1.0/(np.std(image_temp.data[channel, 0, :, :])**2)
                                    for channel in range(0, 40)])
            except:
                print("Unexpected error:", sys.exc_info()[0])
                raise
    except:
        print("Input directory does not exist:", sys.exc_info()[0])
        raise
    return image_temp, frequency, weights
コード例 #2
0
    def test_create_gaintable_from_screen(self):
        screen = import_image_from_fits(
            arl_path('data/models/test_mpc_screen.fits'))
        beam = create_test_image(cellsize=0.0015,
                                 phasecentre=self.vis.phasecentre,
                                 frequency=self.frequency)

        beam = create_low_test_beam(beam)

        gleam_components = create_low_test_skycomponents_from_gleam(
            flux_limit=1.0,
            phasecentre=self.phasecentre,
            frequency=self.frequency,
            polarisation_frame=PolarisationFrame('stokesI'),
            radius=0.2)

        pb_gleam_components = apply_beam_to_skycomponent(
            gleam_components, beam)

        actual_components = filter_skycomponents_by_flux(pb_gleam_components,
                                                         flux_min=1.0)

        gaintables = create_gaintable_from_screen(self.vis, actual_components,
                                                  screen)
        assert len(gaintables) == len(actual_components), len(gaintables)
        assert gaintables[0].gain.shape == (3, 94, 1, 1,
                                            1), gaintables[0].gain.shape
コード例 #3
0
def buffer_data_model_to_memory(jbuff, dm):
    """Copy a buffer data model into memory data model
    
    The file type is derived from the file extension. All are hdf only with the exception of Imaghe which can also be
    fits.

    :param jbuff: JSON describing buffer
    :param dm: JSON describing data model
    :return: data model
    """
    import os
    name = os.path.join(jbuff["directory"], dm["name"])

    import os
    _, file_extension = os.path.splitext(dm["name"])
    
    if dm["data_model"] == "BlockVisibility":
        return import_blockvisibility_from_hdf5(name)
    elif dm["data_model"] == "Image":
        if file_extension == ".fits":
            return import_image_from_fits(name)
        else:
            return import_image_from_hdf5(name)
    elif dm["data_model"] == "SkyModel":
        return import_skymodel_from_hdf5(name)
    elif dm["data_model"] == "GainTable":
        return import_gaintable_from_hdf5(name)
    else:
        raise ValueError("Data model %s not supported" % dm["data_model"])
コード例 #4
0
def moments_save_to_disk(moments_im, stokes_type='q', results_dir='./results_dir', outname='mean'):
    """Save the Faraday moments images to disk.
    
    Args:
    rmsynth (numpy array): array of complex numbers from RM Synthesis.
    cellsize (float): advised cellsize in Faraday space.
    maxrm_est (float): maximum observable RM (50 percent sensitivity).
    rmtype (str): the component of the complex numbers to process and save.
    results_dir (str): directory to save results.
    outname (str): outname for saved file.
    
    Returns:
    None
    """
    # Read in the first channel image, and appropriate it as the new moments image:
    im_moments = import_image_from_fits('%s/imaging_clean_WStack-%s.fits' % (results_dir, 0))
    # Place the data into the open image:
    im_moments.data = moments_im
    try:
        if stokes_type == 'p':
            stokes_val = 0.0
        elif stokes_type == 'q':
            stokes_val = 2.0
        elif stokes_type == 'u':
            stokes_val = 3.0
    except:
        print("Unknown value for stokes_type:", sys.exc_info()[0])
        raise

    # This line also adjusts the listed Stokes parameter (use 0.0=? for P):
    im_moments.wcs.wcs.crval = [im_moments.wcs.wcs.crval[0], im_moments.wcs.wcs.crval[1],
                                stokes_val, im_moments.wcs.wcs.crval[3]]
    # Output the file to disk:
    export_image_to_fits(im_moments, '%s/%s_%s.fits' % (results_dir, outname, stokes_type))
    return
コード例 #5
0
def create_test_image(
    canonical=True,
    cellsize=None,
    frequency=None,
    channel_bandwidth=None,
    phasecentre=None,
    polarisation_frame=PolarisationFrame("stokesI")) -> Image:
    """Create a useful test image

    This is the test image M31 widely used in ALMA and other simulations. It is actually part of an Halpha region in
    M31.

    :param canonical: Make the image into a 4 dimensional image
    :param cellsize:
    :param frequency: Frequency (array) in Hz
    :param channel_bandwidth: Channel bandwidth (array) in Hz
    :param phasecentre: Phase centre of image (SkyCoord)
    :param polarisation_frame: Polarisation frame
    :return: Image
    """
    if frequency is None:
        frequency = [1e8]
    im = import_image_from_fits(arl_path("data/models/M31.MOD"))
    if canonical:

        if polarisation_frame is None:
            im.polarisation_frame = PolarisationFrame("stokesI")
        elif isinstance(polarisation_frame, PolarisationFrame):
            im.polarisation_frame = polarisation_frame
        else:
            raise ValueError("polarisation_frame is not valid")

        im = replicate_image(im,
                             frequency=frequency,
                             polarisation_frame=im.polarisation_frame)
        if cellsize is not None:
            im.wcs.wcs.cdelt[0] = -180.0 * cellsize / numpy.pi
            im.wcs.wcs.cdelt[1] = +180.0 * cellsize / numpy.pi
        if frequency is not None:
            im.wcs.wcs.crval[3] = frequency[0]
        if channel_bandwidth is not None:
            im.wcs.wcs.cdelt[3] = channel_bandwidth[0]
        else:
            if len(frequency) > 1:
                im.wcs.wcs.cdelt[3] = frequency[1] - frequency[0]
            else:
                im.wcs.wcs.cdelt[3] = 0.001 * frequency[0]
        im.wcs.wcs.radesys = 'ICRS'
        im.wcs.wcs.equinox = 2000.00

    if phasecentre is not None:
        im.wcs.wcs.crval[0] = phasecentre.ra.deg
        im.wcs.wcs.crval[1] = phasecentre.dec.deg
        # WCS is 1 relative
        im.wcs.wcs.crpix[0] = im.data.shape[3] // 2 + 1
        im.wcs.wcs.crpix[1] = im.data.shape[2] // 2 + 1

    return im
コード例 #6
0
def create_low_test_beam(model: Image) -> Image:
    """Create a test power beam for LOW using an image from OSKAR

    :param model: Template image
    :return: Image
    """

    beam = import_image_from_fits(arl_path('data/models/SKA1_LOW_beam.fits'))

    # Scale the image cellsize to account for the different in frequencies. Eventually we will want to
    # use a frequency cube
    log.info("create_low_test_beam: primary beam is defined at %.3f MHz" %
             (beam.wcs.wcs.crval[2] * 1e-6))

    nchan, npol, ny, nx = model.shape

    # We need to interpolate each frequency channel separately. The beam is assumed to just scale with
    # frequency.

    reprojected_beam = create_empty_image_like(model)

    for chan in range(nchan):

        model2dwcs = model.wcs.sub(2).deepcopy()
        model2dshape = [model.shape[2], model.shape[3]]
        beam2dwcs = beam.wcs.sub(2).deepcopy()

        # The frequency axis is the second to last in the beam
        frequency = model.wcs.sub(['spectral']).wcs_pix2world([chan], 0)[0]
        fscale = beam.wcs.wcs.crval[2] / frequency

        beam2dwcs.wcs.cdelt = fscale * beam.wcs.sub(2).wcs.cdelt
        beam2dwcs.wcs.crpix = beam.wcs.sub(2).wcs.crpix
        beam2dwcs.wcs.crval = model.wcs.sub(2).wcs.crval
        beam2dwcs.wcs.ctype = model.wcs.sub(2).wcs.ctype
        model2dwcs.wcs.crpix = [
            model.shape[2] // 2 + 1, model.shape[3] // 2 + 1
        ]

        beam2d = create_image_from_array(beam.data[0, 0, :, :], beam2dwcs,
                                         model.polarisation_frame)
        reprojected_beam2d, footprint = reproject_image(beam2d,
                                                        model2dwcs,
                                                        shape=model2dshape)
        assert numpy.max(
            footprint.data) > 0.0, "No overlap between beam and model"

        reprojected_beam2d.data *= reprojected_beam2d.data
        reprojected_beam2d.data[footprint.data <= 0.0] = 0.0
        for pol in range(npol):
            reprojected_beam.data[chan,
                                  pol, :, :] = reprojected_beam2d.data[:, :]

    return reprojected_beam
コード例 #7
0
    def test_create_gradient(self):
        real_vp = import_image_from_fits(
            arl_path('data/models/MID_GRASP_VP_real.fits'))
        gradx, grady = image_gradients(real_vp)

        gradxx, gradxy = image_gradients(gradx)
        gradyx, gradyy = image_gradients(grady)

        gradx.data *= real_vp.data
        grady.data *= real_vp.data
        gradxx.data *= real_vp.data
        gradxy.data *= real_vp.data
        gradyx.data *= real_vp.data
        gradyy.data *= real_vp.data

        import matplotlib.pyplot as plt
        plt.clf()
        show_image(gradx, title='gradx')
        plt.show()
        plt.clf()
        show_image(grady, title='grady')
        plt.show()
        export_image_to_fits(gradx,
                             "%s/test_image_gradients_gradx.fits" % (self.dir))
        export_image_to_fits(grady,
                             "%s/test_image_gradients_grady.fits" % (self.dir))

        plt.clf()
        show_image(gradxx, title='gradxx')
        plt.show()
        plt.clf()
        show_image(gradxy, title='gradxy')
        plt.show()
        plt.clf()
        show_image(gradyx, title='gradyx')
        plt.show()
        plt.clf()
        show_image(gradyy, title='gradyy')
        plt.show()
        export_image_to_fits(
            gradxx, "%s/test_image_gradients_gradxx.fits" % (self.dir))
        export_image_to_fits(
            gradxy, "%s/test_image_gradients_gradxy.fits" % (self.dir))
        export_image_to_fits(
            gradyx, "%s/test_image_gradients_gradyx.fits" % (self.dir))
        export_image_to_fits(
            gradyy, "%s/test_image_gradients_gradyy.fits" % (self.dir))
コード例 #8
0
def rmcube_save_to_disk(rmsynth, cellsize, maxrm_est, rmtype='abs', results_dir='./results_dir', outname='dirty'):
    """Save the RM cubes to disk.
        
    Args:
    rmsynth (numpy array): array of complex numbers from RM Synthesis.
    cellsize (float): advised cellsize in Faraday space.
    maxrm_est (float): maximum observable RM (50 percent sensitivity).
    rmtype (str): the component of the complex numbers to process and save.
    results_dir (str): directory to save results.
    outname (str): outname for saved file.
    """
    # Read in the first channel image, and appropriate it as the new RM cube:
    im_rmsynth = import_image_from_fits('%s/imaging_clean_WStack-%s.fits' % (results_dir, 0))
    # Output the polarised data:
    try:
        if rmtype == 'abs':
            im_rmsynth.data = np.abs(rmsynth)
            stokes_val = 0.0
        elif rmtype == 'real':
            im_rmsynth.data = np.real(rmsynth)
            stokes_val = 2.0
        elif rmtype == 'imag':
            im_rmsynth.data = np.imag(rmsynth)
            stokes_val = 3.0
    except:
        print("Unknown value for rmtype:", sys.exc_info()[0])
        raise
    # Adjust the various axes of the cube:
    im_rmsynth.wcs.wcs.ctype = [im_rmsynth.wcs.wcs.ctype[0], im_rmsynth.wcs.wcs.ctype[1],
                                'FARDEPTH', im_rmsynth.wcs.wcs.ctype[2]]
    im_rmsynth.wcs.wcs.cdelt = [im_rmsynth.wcs.wcs.cdelt[0], im_rmsynth.wcs.wcs.cdelt[1],
                                cellsize, im_rmsynth.wcs.wcs.cdelt[2]]
    im_rmsynth.wcs.wcs.crpix = [im_rmsynth.wcs.wcs.crpix[0], im_rmsynth.wcs.wcs.crpix[1],
                                1.0, im_rmsynth.wcs.wcs.crpix[2]]
    im_rmsynth.wcs.wcs.cunit = [im_rmsynth.wcs.wcs.cunit[0], im_rmsynth.wcs.wcs.cunit[1],
                                'rad / m^2', im_rmsynth.wcs.wcs.cunit[2]]
    # This line also adjusts the listed Stokes parameter (use 0.0=? for P):
    im_rmsynth.wcs.wcs.crval = [im_rmsynth.wcs.wcs.crval[0], im_rmsynth.wcs.wcs.crval[1],
                                -maxrm_est, stokes_val]
    # Tweak the axes into a more sensible order:
    im_rmsynth.data = np.rollaxis(im_rmsynth.data, 2, 0)
    im_rmsynth.data = np.rollaxis(im_rmsynth.data, 2, 1)
    # Output the file to disk:
    export_image_to_fits(im_rmsynth, '%s/rmsynth-%s-%s.fits' % (results_dir, rmtype, outname))
    return
コード例 #9
0
    def test_grid_gaintable_to_screen(self):
        screen = import_image_from_fits(
            arl_path('data/models/test_mpc_screen.fits'))
        beam = create_test_image(cellsize=0.0015,
                                 phasecentre=self.vis.phasecentre,
                                 frequency=self.frequency)

        beam = create_low_test_beam(beam, use_local=False)

        gleam_components = create_low_test_skycomponents_from_gleam(
            flux_limit=1.0,
            phasecentre=self.phasecentre,
            frequency=self.frequency,
            polarisation_frame=PolarisationFrame('stokesI'),
            radius=0.2)

        pb_gleam_components = apply_beam_to_skycomponent(
            gleam_components, beam)

        actual_components = filter_skycomponents_by_flux(pb_gleam_components,
                                                         flux_min=1.0)

        gaintables = create_gaintable_from_screen(self.vis, actual_components,
                                                  screen)
        assert len(gaintables) == len(actual_components), len(gaintables)
        assert gaintables[0].gain.shape == (3, 94, 3, 1,
                                            1), gaintables[0].gain.shape

        newscreen = create_empty_image_like(screen)

        newscreen, weights = grid_gaintable_to_screen(self.vis, gaintables,
                                                      newscreen)
        assert numpy.max(numpy.abs(screen.data)) > 0.0
        if self.persist:
            export_image_to_fits(
                newscreen,
                arl_path('test_results/test_mpc_screen_gridded.fits'))
        if self.persist:
            export_image_to_fits(
                weights,
                arl_path('test_results/test_mpc_screen_gridded_weights.fits'))
コード例 #10
0
 def test_read_screen(self):
     screen = import_image_from_fits(
         arl_path('data/models/test_mpc_screen.fits'))
     assert screen.data.shape == (1, 3, 2000, 2000), screen.data.shape
コード例 #11
0
def main(args):
    """
    Initialising launch sequence.
    """
    # ------------------------------------------------------
    # Print some stuff to show that the code is running:
    print("")
    os.system(
        "printf 'A demonstration of a \033[5mDPrepB/DPrepC\033[m SDP pipeline\n'"
    )
    print("")
    # Set the directory for the moment images:
    MOMENTS_DIR = args.outputs + '/MOMENTS'
    # Check that the output directories exist, if not then create:
    os.makedirs(args.outputs, exist_ok=True)
    os.makedirs(MOMENTS_DIR, exist_ok=True)
    # Set the polarisation definition of the instrument:
    POLDEF = init_inst(args.inst)

    # Setup Variables for SIP services
    # ------------------------------------------------------
    # Define the Queue Producer settings:
    if args.queues:
        queue_settings = {
            'bootstrap.servers': 'scheduler:9092',
            'message.max.bytes': 100000000
        }  #10.60.253.31:9092

    # Setup the Confluent Kafka Queue
    # ------------------------------------------------------
    if args.queues:
        from confluent_kafka import Producer
        import pickle
        # Create an SDP queue:
        sip_queue = Producer(queue_settings)

    # Define a Data Array Format
    # ------------------------------------------------------
    def gen_data(channel):
        return np.array([
            vis1[channel], vis2[channel], channel, None, None, False, False,
            args.plots,
            float(args.uvcut),
            float(args.pixels), POLDEF, args.outputs,
            float(args.angres), None, None, None, None, None, None, args.twod,
            npixel_advice, cell_advice
        ])

    # Setup the Dask Cluster
    # ------------------------------------------------------
    starttime = t.time()

    dask.config.set(get=dask.distributed.Client.get)
    client = Client(
        args.daskaddress)  # scheduler for Docker container, localhost for P3.

    print("Dask Client details:")
    print(client)
    print("")

    # Define channel range for 1 subband, each containing 40 channels:
    channel_range = np.array(range(int(args.channels)))

    # Load the data into memory:
    """
    The input data should be interfaced with Buffer Management.
    """
    print("Loading data:")
    print("")
    vis1 = [
        load('%s/%s' % (args.inputs, args.ms1), range(channel, channel + 1),
             POLDEF) for channel in range(0, int(args.channels))
    ]
    vis2 = [
        load('%s/%s' % (args.inputs, args.ms2), range(channel, channel + 1),
             POLDEF) for channel in range(0, int(args.channels))
    ]

    # Prepare Measurement Set
    # ------------------------------------------------------
    # Combine MSSS snapshots:
    vis_advice = append_visibility(vis1[0], vis2[0])

    # Apply a uv-distance cut to the data:
    vis_advice = uv_cut(vis_advice, float(args.uvcut))
    npixel_advice, cell_advice = uv_advice(vis_advice, float(args.uvcut),
                                           float(args.pixels))

    # Begin imaging via the Dask cluster
    # ------------------------------------------------------
    # Submit data for each channel to the client, and return an image:

    # Scatter all the data in advance to all the workers:
    """
    The data here could be passed via Data Queues.
    Queues may not be ideal. Data throughput challenges.
    Need to think more about the optimum approach.
    """
    print("Scatter data to workers:")
    print("")
    big_job = [client.scatter(gen_data(channel)) for channel in channel_range]

    # Submit jobs to the cluster and create a list of futures:
    futures = [
        client.submit(dprepb_imaging, big_job[channel], pure=False, retries=3)
        for channel in channel_range
    ]
    """
    The dprepb_imaging function could generate QA, logging, and pass this information via Data Queues.
    Queues work well for this.
    Python logging calls are preferable. Send them to a text file on the node.
    Run another service that watches that file. Or just read from standard out.
    The Dockerisation will assist with logs.
    """

    print("Imaging on workers:")
    # Watch progress:
    progress(futures)

    # Wait until all futures are complete:
    wait(futures)

    # Check that no futures have errors, if so resubmit:
    for future in futures:
        if future.status == 'error':
            print("ERROR: Future", future, "has 'error' status, as:")
            print(client.recreate_error_locally(future))
            print("Rerunning...")
            print("")
            index = futures.index(future)
            futures[index].cancel()
            futures[index] = client.submit(dprepb_imaging,
                                           big_job[index],
                                           pure=False,
                                           retries=3)

    # Wait until all futures are complete:
    wait(futures)

    # Gather results from the futures:
    results = client.gather(futures, errors='raise')

    # Run QA on ARL objects and produce to queue:
    if args.queues:
        print("Adding QA to queue:")
        for result in results:
            sip_queue.produce('qa', pickle.dumps(qa_image(result), protocol=2))

        sip_queue.flush()

    # Return the data element of each ARL object, as a Dask future:
    futures = [
        client.submit(arl_data_future, result, pure=False, retries=3)
        for result in results
    ]

    progress(futures)

    wait(futures)

    # Calculate the Moment images
    # ------------------------------------------------------
    # Now use 'distributed Dask arrays' in order to parallelise the Moment image calculation:
    # Construct a small Dask array for every future:
    print("")
    print("Calculating Moment images:")
    print("")
    arrays = [
        da.from_delayed(future,
                        dtype=np.dtype('float64'),
                        shape=(1, 4, 512, 512)) for future in futures
    ]

    # Stack all small Dask arrays into one:
    stack = da.stack(arrays, axis=0)

    # Combine chunks to reduce overhead - is initially (40, 1, 4, 512, 512):
    stack = stack.rechunk((1, 1, 4, 64, 64))

    # Spread the data around on the cluster:
    stack = client.persist(stack)
    # Data is now coordinated by the single logical Dask array, 'stack'.

    # Save the Moment images:
    """
    The output moment images should be interfaced with Buffer Management.
    
    Need to know more about the Buffer specification.
    Related to initial data distribution also/staging.
    """
    print("Saving Moment images to disk:")
    print("")
    # First generate a template:
    image_template = import_image_from_fits('%s/imaging_dirty_WStack-%s.fits' %
                                            (args.outputs, 0))

    # Output mean images:
    # I:
    image_template.data = stack[:, :, 0, :, :].mean(axis=0).compute()
    # Run QA on ARL objects and produce to queue:
    if args.queues:
        sip_queue.produce('qa',
                          pickle.dumps(qa_image(image_template), protocol=2))
    # Export the data to disk:
    export_image_to_fits(image_template,
                         '%s/Mean-%s.fits' % (MOMENTS_DIR, 'I'))

    # Q:
    image_template.data = stack[:, :, 1, :, :].mean(axis=0).compute()
    # Run QA on ARL objects and produce to queue:
    if args.queues:
        sip_queue.produce('qa',
                          pickle.dumps(qa_image(image_template), protocol=2))
    # Export the data to disk:
    export_image_to_fits(image_template,
                         '%s/Mean-%s.fits' % (MOMENTS_DIR, 'Q'))

    # U:
    image_template.data = stack[:, :, 2, :, :].mean(axis=0).compute()
    # Run QA on ARL objects and produce to queue:
    if args.queues:
        sip_queue.produce('qa',
                          pickle.dumps(qa_image(image_template), protocol=2))
    # Export the data to disk:
    export_image_to_fits(image_template,
                         '%s/Mean-%s.fits' % (MOMENTS_DIR, 'U'))

    # P:
    image_template.data = da.sqrt(
        (da.square(stack[:, :, 1, :, :]) +
         da.square(stack[:, :, 2, :, :]))).mean(axis=0).compute()
    # Run QA on ARL objects and produce to queue:
    if args.queues:
        sip_queue.produce('qa',
                          pickle.dumps(qa_image(image_template), protocol=2))
    # Export the data to disk:
    export_image_to_fits(image_template,
                         '%s/Mean-%s.fits' % (MOMENTS_DIR, 'P'))

    # Output standard deviation images:
    # I:
    image_template.data = stack[:, :, 0, :, :].std(axis=0).compute()
    # Run QA on ARL objects and produce to queue:
    if args.queues:
        sip_queue.produce('qa',
                          pickle.dumps(qa_image(image_template), protocol=2))
    # Export the data to disk:
    export_image_to_fits(image_template, '%s/Std-%s.fits' % (MOMENTS_DIR, 'I'))

    # Q:
    image_template.data = stack[:, :, 1, :, :].std(axis=0).compute()
    # Run QA on ARL objects and produce to queue:
    if args.queues:
        sip_queue.produce('qa',
                          pickle.dumps(qa_image(image_template), protocol=2))
    # Export the data to disk:
    export_image_to_fits(image_template, '%s/Std-%s.fits' % (MOMENTS_DIR, 'Q'))

    # U:
    image_template.data = stack[:, :, 2, :, :].std(axis=0).compute()
    # Run QA on ARL objects and produce to queue:
    if args.queues:
        sip_queue.produce('qa',
                          pickle.dumps(qa_image(image_template), protocol=2))
    # Export the data to disk:
    export_image_to_fits(image_template, '%s/Std-%s.fits' % (MOMENTS_DIR, 'U'))

    # P:
    image_template.data = da.sqrt(
        (da.square(stack[:, :, 1, :, :]) +
         da.square(stack[:, :, 2, :, :]))).std(axis=0).compute()
    # Run QA on ARL objects and produce to queue:
    if args.queues:
        sip_queue.produce('qa',
                          pickle.dumps(qa_image(image_template), protocol=2))
    # Export the data to disk:
    export_image_to_fits(image_template, '%s/Std-%s.fits' % (MOMENTS_DIR, 'P'))

    # Flush queue:
    if args.queues:
        sip_queue.flush()

    # Make a tarball of moment images:
    subprocess.call([
        'tar', '-cvf',
        '%s/moment.tar' % (MOMENTS_DIR),
        '%s/' % (MOMENTS_DIR)
    ])
    subprocess.call(['gzip', '-9f', '%s/moment.tar' % (MOMENTS_DIR)])

    endtime = t.time()
    print(endtime - starttime)