def make_image(name, algorithm, root_prefix, fov_deg, num_pixels):
    """Generates an image from visibility data."""
    settings_dict = {
        'image': {
            'size': str(num_pixels),
            'fov_deg': str(fov_deg),
            'algorithm': algorithm,
            'fft/use_gpu': 'true',
            'fft/grid_on_gpu': 'true',
            'input_vis_data': 'sim_' + name + '.vis',
            'root_path': root_prefix + '_' + name
        }
    }
    settings = oskar.SettingsTree('oskar_imager')
    settings.from_dict(settings_dict)
    if name == 'large':
        settings['image/fft/use_gpu'] = 'false'
    LOG.info('Starting imager for "%s"', settings['image/root_path'])
    imager = oskar.Imager(settings=settings)
    imager.run()
    LOG.info('Imaging complete')
コード例 #2
0
def main():
    """Runs test imaging pipeline using MPI."""
    # Check command line arguments.
    if len(sys.argv) < 2:
        raise RuntimeError('Usage: mpiexec -n <np> '
                           'python mpi_imager_test.py <settings_file> <dir>')

    # Get the MPI communicator and initialise broadcast variables.
    comm = MPI.COMM_WORLD
    settings = None
    inputs = None
    grid_weights = None

    # Create log.
    log = logging.getLogger()
    log.setLevel(logging.DEBUG)
    if len(log.handlers) == 0:
        log.addHandler(logging.StreamHandler(sys.stdout))

    if comm.Get_rank() == 0:
        # Load pipeline settings.
        with open(sys.argv[1]) as f:
            settings = json.load(f)

        # Get a list of input Measurement Sets to process.
        data_dir = str(sys.argv[2])
        inputs = glob(join(data_dir, '*.ms')) + glob(join(data_dir, '*.MS'))
        inputs = filter(None, inputs)
        log.info('Found input Measurement Sets: %s', ', '.join(inputs))

        # Distribute the list of Measurement Sets among processors.
        inputs = chunks(inputs, comm.Get_size())

    # Broadcast settings and scatter list of input files.
    comm.barrier()
    settings = comm.bcast(settings)
    inputs = comm.scatter(inputs)

    # Record which file(s) this node is working on.
    log.debug('Rank %d, processing [%s]', comm.Get_rank(), ', '.join(inputs))

    # Create an imager and configure it.
    precision = settings['precision']
    imager = oskar.Imager(precision)
    for key, value in settings['imager'].items():
        setattr(imager, key, value)

    # Allocate a local visibility grid.
    grid_norm = 0.
    grid_dim = [imager.plane_size, imager.plane_size]
    grid_data = numpy.zeros(grid_dim,
                            dtype='c8' if precision == 'single' else 'c16')

    # Process data according to mode.
    if settings['combine']:
        if imager.weighting == 'Uniform' or imager.algorithm == 'W-projection':
            # If necessary, generate a local weights grid.
            local_weights = None
            if imager.weighting == 'Uniform':
                grid_weights = numpy.zeros(grid_dim, dtype=precision)
                local_weights = numpy.zeros(grid_dim, dtype=precision)

            # Do a first pass for uniform weighting or W-projection.
            imager.coords_only = True
            for f in inputs:
                log.info('Reading coordinates from %s', f)
                process_input_data(f, imager, None, 0.0, local_weights)
            imager.coords_only = False

            # Get maximum number of W-projection planes required.
            num_w_planes = imager.num_w_planes
            num_w_planes = comm.allreduce(num_w_planes, op=MPI.MAX)
            imager.num_w_planes = num_w_planes

            # Combine (reduce) weights grids, and broadcast the result.
            if local_weights is not None:
                comm.Allreduce(local_weights, grid_weights, op=MPI.SUM)

        # Populate the local visibility grid.
        for f in inputs:
            log.info('Reading visibilities from %s', f)
            grid_norm = process_input_data(f, imager, grid_data, grid_norm,
                                           grid_weights)

        # Combine (reduce) visibility grids.
        grid = numpy.zeros_like(grid_data)
        comm.Reduce(grid_data, grid, op=MPI.SUM)
        grid_norm = comm.reduce(grid_norm, op=MPI.SUM)

        # Finalise grid and save image.
        if comm.Get_rank() == 0:
            save_image(imager, grid, grid_norm, settings['output_file'])
            log.info('Finished. Output file is %s', settings['output_file'])
    else:
        for f in inputs:
            # Clear the grid.
            grid_norm = 0.
            grid_data.fill(0)
            if imager.weighting == 'Uniform':
                grid_weights = numpy.zeros(grid_dim, dtype=precision)

            # Do a first pass for uniform weighting or W-projection.
            if imager.weighting == 'Uniform' or \
                    imager.algorithm == 'W-projection':
                imager.coords_only = True
                log.info('Reading coordinates from %s', f)
                process_input_data(f, imager, None, 0.0, grid_weights)
                imager.coords_only = False

            # Populate the local visibility grid.
            log.info('Reading visibilities from %s', f)
            grid_norm = process_input_data(f, imager, grid_data, grid_norm,
                                           grid_weights)

            # Save image by finalising grid.
            output_file = splitext(f)[0] + '.fits'
            save_image(imager, grid_data, grid_norm, output_file)
            log.info('Finished. Output file is %s', output_file)
コード例 #3
0
    ra, dec = obs.radec_of(math.radians(az), math.radians(el))
    ra, dec = math.degrees(ra), math.degrees(dec)
    mjd_mid = ephem.julian_date(obs.date) - 2400000.5
    mjd_start = mjd_mid - obs_length_h / (2 * 24.0)
    tel.set_phase_centre(ra, dec)

    # Set up imagers for natural and uniform weighting for each sub-interval.
    intervals = numpy.logspace(numpy.log10(1), numpy.log10(num_times), 12)
    intervals = numpy.ceil(intervals).astype(int) // 2 * 2 + 1
    imagers = []
    for i in range(2 * len(intervals)):
        interval = intervals[i // 2]
        start_idx = ((num_times - interval) // 2)
        end_idx = ((num_times + interval) // 2) - 1
        root = output_root + ('_t%04d' % interval)
        imagers.append(oskar.Imager(precision))
        imagers[i].set(fov_deg=fov_deg, image_size=imsize, algorithm=algorithm)
        imagers[i].set(time_start=start_idx, time_end=end_idx)
        if i % 2 == 0:
            imagers[i].set(weighting='Natural', output_root=root + '_Natural')
        else:
            imagers[i].set(weighting='Uniform', output_root=root + '_Uniform')

    # Set up the imaging simulator.
    simulator = oskar.ImagingSimulator(imagers, precision)
    simulator.set_telescope_model(tel)
    simulator.set_observation_frequency(freq_hz)
    simulator.set_observation_time(start_time_mjd_utc=mjd_start,
                                   length_sec=obs_length_h * 3600.0,
                                   num_time_steps=num_times)
コード例 #4
0
ファイル: hello-world.py プロジェクト: liguicheng/OSKAR
precision = 'single'
if precision == 'single':
    settings['simulator/double_precision'] = 'false'

# Create a sky model containing three sources from a numpy array.
sky_data = numpy.array(
    [[20.0, -30.0, 1, 0, 0, 0, 100.0e6, -0.7, 0.0, 0, 0, 0],
     [20.0, -30.5, 3, 2, 2, 0, 100.0e6, -0.7, 0.0, 600, 50, 45],
     [20.5, -30.5, 3, 0, 0, 2, 100.0e6, -0.7, 0.0, 700, 10, -10]])
sky = oskar.Sky.from_array(sky_data, precision)  # Pass precision here.

# Set the sky model and run the simulation.
sim = oskar.Interferometer(settings=settings)
sim.set_sky_model(sky)
sim.run()

# Make an image 4 degrees across and return it to Python.
# (It will also be saved with the filename 'example_I.fits'.)
imager = oskar.Imager(precision)
imager.set(fov_deg=4, image_size=512)
imager.set(input_file='example.vis', output_root='example')
output = imager.run(return_images=1)
image = output['images'][0]

# Render the image using matplotlib and save it as a PNG file.
im = plt.imshow(image, cmap='jet')
plt.gca().invert_yaxis()
plt.colorbar(im)
plt.savefig('%s.png' % imager.output_root)
plt.close('all')
コード例 #5
0
def node_run(input_file, coords_only, bc_settings, bc_grid_weights):
    """Main function to process visibility data on Spark cluster nodes.

    Args:
        input_file (str):
            RDD element containing filename to process.
        coords_only (boolean):
            If true, read only baseline coordinates to define the weights grid.
        bc_settings (pyspark.broadcast.Broadcast):
            Spark broadcast variable containing pipeline settings dictionary.
        bc_grid_weights (pyspark.broadcast.Broadcast):
            Spark broadcast variable containing weights grid. May be None.

    Returns:
        tuple: Output RDD element.
    """
    # Create a logger.
    log = logging.getLogger('pyspark')
    log.setLevel(logging.INFO)
    if len(log.handlers) == 0:
        log.addHandler(logging.StreamHandler(sys.stdout))

    # Create an imager and configure it.
    precision = bc_settings.value['precision']
    imager = oskar.Imager(precision)
    for key, value in bc_settings.value['imager'].items():
        setattr(imager, key, value)
    grid_size = imager.plane_size
    grid_weights = None

    # Get a handle to the input Measurement Set.
    ms_han = oskar.MeasurementSet.open(input_file)

    # Check if doing a first pass.
    if coords_only:
        # If necessary, generate a local weights grid.
        if imager.weighting == 'Uniform':
            grid_weights = numpy.zeros([grid_size, grid_size], dtype=precision)

        # Do a first pass for uniform weighting or W-projection.
        log.info('Reading coordinates from %s', input_file)
        imager.coords_only = True
        process_input_data(ms_han, imager, None, grid_weights)
        imager.coords_only = False

        # Return weights grid and required number of W-planes as RDD element.
        return grid_weights, imager.num_w_planes

    # Allocate a local visibility grid on the node.
    grid_data = numpy.zeros([grid_size, grid_size],
                            dtype='c8' if precision == 'single' else 'c16')

    # Process data according to mode.
    log.info('Reading visibilities from %s', input_file)
    if bc_settings.value['combine']:
        # Get weights grid from Spark Broadcast variable.
        if imager.weighting == 'Uniform':
            grid_weights = bc_grid_weights.value

        # Populate the local visibility grid.
        grid_norm = process_input_data(ms_han, imager, grid_data, grid_weights)

        # Return grid as RDD element.
        log.info('Returning gridded visibilities to RDD')
        return grid_data, grid_norm
    else:
        # If necessary, generate a local weights grid.
        if imager.weighting == 'Uniform':
            grid_weights = numpy.zeros([grid_size, grid_size], dtype=precision)

        # If necessary, do a first pass for uniform weighting or W-projection.
        if imager.weighting == 'Uniform' or imager.algorithm == 'W-projection':
            imager.coords_only = True
            process_input_data(ms_han, imager, None, grid_weights)
            imager.coords_only = False

        # Populate the local visibility grid.
        grid_norm = process_input_data(ms_han, imager, grid_data, grid_weights)

        # Save image by finalising grid.
        output_file = splitext(input_file)[0] + '.fits'
        save_image(imager, grid_data, grid_norm, output_file)
        log.info('Finished. Output file is %s', output_file)
        return 0
コード例 #6
0
def main():
    """Runs test imaging pipeline using Spark."""
    # Check command line arguments.
    if len(sys.argv) < 3:
        raise RuntimeError(
            'Usage: spark-submit spark_imager_test.py <settings_file> <dir> '
            '[partitions]')

    # Create log object.
    log = logging.getLogger('pyspark')
    log.setLevel(logging.INFO)
    log.addHandler(logging.StreamHandler(sys.stdout))

    # Load pipeline settings.
    with open(sys.argv[1]) as f:
        settings = json.load(f)

    # Get a list of input Measurement Sets to process.
    data_dir = str(sys.argv[2])
    inputs = glob(join(data_dir, '*.ms')) + glob(join(data_dir, '*.MS'))
    inputs = filter(None, inputs)
    log.info('Found input Measurement Sets: %s', ', '.join(inputs))

    # Get a Spark context.
    context = pyspark.SparkContext(appName="spark_imager_test")

    # Create the Spark RDD containing the input filenames,
    # suitably parallelized.
    partitions = int(sys.argv[3]) if len(sys.argv) > 3 else 2
    rdd = context.parallelize(inputs, partitions)

    # Define Spark broadcast variables.
    bc_settings = context.broadcast(settings)
    bc_grid_weights = None

    # Process coordinates first if required.
    if (settings['combine'] and (
            settings['imager']['weighting'] == 'Uniform' or
            settings['imager']['algorithm'] == 'W-projection')):
        # Create RDD to generate weights grids.
        rdd_coords = rdd.map(
            partial(node_run, coords_only=True, bc_settings=bc_settings,
                    bc_grid_weights=None))

        # Mark this RDD as persistent so it isn't computed more than once.
        rdd_coords.persist()

        # Get the maximum number of W-planes required, and update settings.
        num_w_planes = rdd_coords.map(lambda x: x[1]).max()
        settings['imager']['num_w_planes'] = num_w_planes

        # Get the combined grid of weights and broadcast it to nodes.
        output = rdd_coords.reduce(reduce_sequences)
        bc_grid_weights = context.broadcast(output[0])

        # Delete this RDD.
        rdd_coords.unpersist()

    # Re-broadcast updated settings.
    bc_settings = context.broadcast(settings)

    # Run parallel pipeline on worker nodes and combine visibility grids.
    output = rdd.map(
        partial(node_run, coords_only=False, bc_settings=bc_settings,
                bc_grid_weights=bc_grid_weights)).reduce(reduce_sequences)

    # Finalise combined visibility grids if required.
    if settings['combine']:
        # Create an imager to finalise (FFT) the gridded data.
        imager = oskar.Imager(settings['precision'])
        for key, value in settings['imager'].items():
            setattr(imager, key, value)

        # Finalise grid and save image.
        save_image(imager, output[0], output[1], settings['output_file'])
        log.info('Finished. Output file is %s', settings['output_file'])

    context.stop()