Exemple #1
0
def convert(input, input_schema, output_schema):
    mapping, in_shape, out_shape, dtype = convert_setup(
        input, input_schema, output_schema)

    n_free_dims = len(input.shape) - len(in_shape)
    free_dims = tuple("dim-%d" % i for i in range(n_free_dims))
    in_corr_dims = tuple("icorr-%d" % i for i in range(len(in_shape)))
    out_corr_dims = tuple("ocorr-%d" % i for i in range(len(out_shape)))

    # Output dimension are new dimensions
    new_axes = {d: s for d, s in zip(out_corr_dims, out_shape)}

    # Note the dummy in_corr_dims introduced at the end of our output,
    # We do this to prevent a contraction over the input dimensions
    # (which can be arbitrary) within the wrapper class
    res = da.core.blockwise(_wrapper,
                            free_dims + out_corr_dims + in_corr_dims,
                            input,
                            free_dims + in_corr_dims,
                            mapping=mapping,
                            in_shape=in_shape,
                            out_shape=out_shape,
                            new_axes=new_axes,
                            dtype_=dtype,
                            dtype=dtype)

    # Now contract over the dummy dimensions
    start = len(free_dims) + len(out_corr_dims)
    end = start + len(in_corr_dims)
    return res.sum(axis=list(range(start, end)))
    def _create_dict(self):
        (source, row, _, chan), corrs = self.blocks[:4], self.blocks[4:]

        # Iterator of block id's for row, channel and correlation blocks
        # We don't reduce over these dimensions
        block_ids = enumerate(
            product(range(row), range(chan), *[range(cb) for cb in corrs]))

        source_block_chunks = _source_stream_blocks(source, self.streams)

        layers = {}

        # This looping structure should match
        for flat_bid, bid in block_ids:
            rb, fb = bid[0:2]
            cb = bid[2:]

            last_stream_keys = []

            for sb_start in range(0, source, source_block_chunks):
                sb_end = min(sb_start + source_block_chunks, source)
                key = (sb_end - 1, flat_bid)
                last_stream_keys.append((self.in_name, sb_end - 1, flat_bid))

            key = (self.out_name, rb, fb) + cb
            task = (sum, last_stream_keys)
            layers[key] = task

        return layers
    def _create_dict(self):
        # Graph dictionary
        layers = {}

        # For loop performance
        out_name = self.out_name
        ti = self.time_index_name
        a1 = self.ant1_name
        a2 = self.ant2_name
        dde1 = self.dde1_name
        coh = self.coh_name
        dde2 = self.dde2_name

        # Extract dimension blocks
        (source_blocks, row_blocks, ant_blocks,
         chan_blocks), corr_blocks = self.blocks[:4], self.blocks[4:]

        assert ant_blocks == 1
        ab = 0

        # Subdivide number of source blocks by number of streams
        source_block_chunks = _source_stream_blocks(source_blocks,
                                                    self.streams)

        # Iterator of block id's for row, channel and correlation blocks
        # We don't reduce over these dimensions
        block_ids = enumerate(
            product(range(row_blocks), range(chan_blocks),
                    *[range(cb) for cb in corr_blocks]))

        for flat_bid, bid in block_ids:
            rb, fb = bid[0:2]
            cb = bid[2:]

            # Create the streamed reduction proper.
            # For a stream, the base visibilities are set to the result
            # of the previous result in the stream (last_key)
            for sb_start in range(0, source_blocks, source_block_chunks):
                sb_end = min(sb_start + source_block_chunks, source_blocks)
                last_key = None

                for sb in range(sb_start, sb_end):
                    # Dask task object calling predict vis
                    task = (np_predict_vis, (ti, rb), (a1, rb), (a2, rb),
                            (dde1, sb, rb, ab, fb) + cb if dde1 else None,
                            (coh, sb, rb, fb) + cb if coh else None,
                            (dde2, sb, rb, ab, fb) + cb if dde2 else None,
                            None, last_key, None)

                    key = (out_name, sb, flat_bid)
                    layers[key] = task
                    last_key = key

        return layers
Exemple #4
0
    def __init__(self, header=None):
        super(BeamAxes, self).__init__(header)

        # Check for custom irregular grid format.
        # Currently only implemented for FREQ dimension.
        irregular_grid = np.asarray([[
            header.get('G%s%d' % (self._ctype[i], j), None)
            for j in range(1, self._naxis[i] + 1)
        ] for i in range(self._ndims)])

        # Irregular grids are only valid if values exist for all grid points
        self._irreg = [
            all(x is not None for x in irregular_grid[i])
            for i in range(self._ndims)
        ]

        def _regular_grid(i):
            """ Construct a regular grid from a FitsAxes object and index """
            R = np.arange(0.0, float(self._naxis[i]))
            return (R - self._crpix[i]) * self._cdelt[i] + self._crval[i]

        # Set up the grid
        self._grid = [
            _regular_grid(i)
            if not self._irreg[i] else np.asarray(irregular_grid[i])
            for i in range(self._ndims)
        ]

        self._sign = [1.0] * self._ndims

        for i in range(self._ndims):
            # Convert any degree axes to radians
            if self._cunit[i] == 'DEG':
                self._cunit[i] = 'RAD'
                self._crval[i] = np.deg2rad(self._crval[i])
                self._cdelt[i] = np.deg2rad(self._cdelt[i])
                self._grid[i] = np.deg2rad(self._grid[i])

            # Flip the sign and correct the ctype if necessary
            if self._ctype[i].startswith('-'):
                self._ctype[i] = self._ctype[i][1]
                self._sign[i] = -1.0
Exemple #5
0
    def __init__(self, header=None):
        # Create an zero-dimensional object if no header supplied
        self._ndims = ndims = 0 if header is None else header['NAXIS']

        # Extract header information for each dimension
        axr = list(range(1, ndims + 1))
        self._naxis = [header.get('NAXIS%d' % n) for n in axr]
        self._ctype = [header.get('CTYPE%d' % n, n).strip() for n in axr]
        self._crval = [header.get('CRVAL%d' % n, 0) for n in axr]
        # Convert right pixel from FORTRAN to C indexing
        self._crpix = [header['CRPIX%d' % n] - 1 for n in axr]
        self._cdelt = [header.get('CDELT%d' % n, 1) for n in axr]
        self._cunit = [
            header.get('CUNIT%d' % n, '').strip().upper() for n in axr
        ]
Exemple #6
0
def beam_grids(header):
    """
    Extracts the FITS indices and grids for the beam dimensions
    in the supplied FITS ``header``.
    Specifically the axes specified by

    1. ``L`` or ``X`` CTYPE
    2. ``M`` or ``Y`` CTYPE
    3. ``FREQ`` CTYPE

    If the first two axes have a negative sign, such as ``-L``, the grid
    will be inverted.

    Any grids corresponding to axes with a CUNIT type of ``DEG``
    will be converted to radians.

    Parameters
    ----------
    header : :class:`~astropy.io.fits.Header` or dict
        FITS header object.

    Returns
    -------
    tuple
        Returns
        ((l_axis, l_grid), (m_axis, m_grid), (freq_axis, freq_grid))
        where the axis is the FORTRAN indexed FITS axis (1-indexed)
        and grid contains the values at each pixel along the axis.
    """
    beam_axes = BeamAxes(header)

    l = m = freq = None  # noqa

    # Find the relevant axes
    for i in range(beam_axes.ndims):
        if beam_axes.ctype[i] in ('L', 'X'):
            l = i  # noqa
        elif beam_axes.ctype[i] in ('M', 'Y'):
            m = i
        elif beam_axes.ctype[i] == "FREQ":
            freq = i

    # Complain if not found
    if l is None:
        raise ValueError("No L/X axis present in FITS header")

    if m is None:
        raise ValueError("No M/Y axis present in FITS header")

    if freq is None:
        raise ValueError("No FREQ axis present in FITS header")

    # Sign of L/M axes?
    l_sign = beam_axes.sign[l]
    m_sign = beam_axes.sign[m]

    # Obtain axes grids
    l_grid = beam_axes.grid[l]
    m_grid = beam_axes.grid[m]
    freq_grid = beam_axes.grid[freq]

    # flip the grid around if signs are different
    l_grid = np.flipud(l_grid) if l_sign == -1.0 else l_grid
    m_grid = np.flipud(m_grid) if m_sign == -1.0 else m_grid

    return ((l + 1, l_grid), (m + 1, m_grid), (freq + 1, freq_grid))
def apply_dies(time_index, antenna1, antenna2, die1_jones, base_vis,
               die2_jones, predict_check_tup, out_dtype):
    """ Apply any Direction-Independent Effects and Base Visibilities """

    # Now apply any Direction Independent Effect Terms
    (have_ddes1, have_coh, have_ddes2, have_dies1, have_bvis,
     have_dies2) = predict_check_tup

    have_dies = have_dies1 and have_dies2

    # Generate strings for the correlation dimensions
    # This also has the effect of checking that we have all valid inputs
    if have_dies:
        cdims = tuple("corr-%d" % i for i in range(len(die1_jones.shape[3:])))
    elif have_bvis:
        cdims = tuple("corr-%d" % i for i in range(len(base_vis.shape[2:])))
    else:
        raise ValueError("Missing both antenna and baseline jones terms")

    # In the case of predict_vis, the "row" and "time" dimensions
    # are intimately related -- a contiguous series of rows
    # are related to a contiguous series of timesteps.
    # This means that the number of chunks of these
    # two dimensions must match even though the chunk sizes may not.
    # blockwise insists on matching chunk sizes.
    # For this reason, we use the lower level blockwise and
    # substitute "row" for "time" in arrays such as dde1_jones
    # and die1_jones.
    gjones_dims = ("row", "ant", "chan") + cdims

    # Setup
    # 1. Optional blockwise arguments
    # 2. Optional numblocks kwarg
    # 3. HighLevelGraph dependencies
    bw_args = [
        time_index.name, ("row", ), antenna1.name, ("row", ), antenna2.name,
        ("row", )
    ]
    numblocks = {
        time_index.name: time_index.numblocks,
        antenna1.name: antenna1.numblocks,
        antenna2.name: antenna2.numblocks
    }

    deps = [time_index, antenna1, antenna2]

    # dde1_jones, source_coh and dde2_jones not present
    # these are already applied into sum_coherencies
    bw_args.extend([None, None, None, None, None, None])

    if have_dies:
        bw_args.extend([die1_jones.name, gjones_dims])
        numblocks[die1_jones.name] = die1_jones.numblocks
        deps.append(die1_jones)
        other_chunks = die1_jones.chunks[2:]
    else:
        bw_args.extend([None, None])

    if have_bvis:
        bw_args.extend([base_vis.name, ("row", "chan") + cdims])
        numblocks[base_vis.name] = base_vis.numblocks
        deps.append(base_vis)
        other_chunks = base_vis.chunks[1:]
    else:
        bw_args.extend([None, None])

    if have_dies:
        bw_args.extend([die2_jones.name, gjones_dims])
        numblocks[die2_jones.name] = die2_jones.numblocks
        deps.append(die2_jones)
        other_chunks = die2_jones.chunks[2:]
    else:
        bw_args.extend([None, None])

    assert len(bw_args) // 2 == 9

    token = da.core.tokenize(time_index, antenna1, antenna2, die1_jones,
                             base_vis, die2_jones)
    name = '-'.join(("predict-vis-apply-dies", token))
    layer = blockwise(_predict_dies_wrapper,
                      name, ("row", "chan") + cdims,
                      *bw_args,
                      numblocks=numblocks)

    graph = HighLevelGraph.from_collections(name, layer, deps)
    chunks = (time_index.chunks[0], ) + other_chunks

    return da.Array(graph, name, chunks, dtype=out_dtype)
def fan_reduction(time_index, antenna1, antenna2, dde1_jones, source_coh,
                  dde2_jones, predict_check_tup, out_dtype):
    """ Does a standard dask tree reduction over source coherencies """
    (have_ddes1, have_coh, have_ddes2, have_dies1, have_bvis,
     have_dies2) = predict_check_tup

    have_ddes = have_ddes1 and have_ddes2

    if have_ddes:
        cdims = tuple("corr-%d" % i for i in range(len(dde1_jones.shape[4:])))
    elif have_coh:
        cdims = tuple("corr-%d" % i for i in range(len(source_coh.shape[3:])))
    else:
        raise ValueError("need ddes or source coherencies")

    ajones_dims = ("src", "row", "ant", "chan") + cdims

    # Setup
    # 1. Optional blockwise arguments
    # 2. Optional numblocks kwarg
    # 3. HighLevelGraph dependencies
    bw_args = [
        time_index.name, ("row", ), antenna1.name, ("row", ), antenna2.name,
        ("row", )
    ]
    numblocks = {
        time_index.name: time_index.numblocks,
        antenna1.name: antenna1.numblocks,
        antenna2.name: antenna2.numblocks
    }

    # Dependencies
    deps = [time_index, antenna1, antenna2]

    # Handle presence/absence of dde1_jones
    if have_ddes:
        bw_args.extend([dde1_jones.name, ajones_dims])
        numblocks[dde1_jones.name] = dde1_jones.numblocks
        deps.append(dde1_jones)
        other_chunks = dde1_jones.chunks[3:]
        src_chunks = dde1_jones.chunks[0]
    else:
        bw_args.extend([None, None])

    # Handle presence/absence of source_coh
    if have_coh:
        bw_args.extend([source_coh.name, ("src", "row", "chan") + cdims])
        numblocks[source_coh.name] = source_coh.numblocks
        deps.append(source_coh)
        other_chunks = source_coh.chunks[2:]
        src_chunks = source_coh.chunks[0]
    else:
        bw_args.extend([None, None])

    # Handle presence/absence of dde2_jones
    if have_ddes:
        bw_args.extend([dde2_jones.name, ajones_dims])
        numblocks[dde2_jones.name] = dde2_jones.numblocks
        deps.append(dde2_jones)
        other_chunks = dde2_jones.chunks[3:]
        src_chunks = dde2_jones.chunks[0]
    else:
        bw_args.extend([None, None])

    # die1_jones, base_vis and die2_jones absent for this part of the graph
    bw_args.extend([None, None, None, None, None, None])

    assert len(bw_args) // 2 == 9, len(bw_args) // 2

    token = da.core.tokenize(time_index, antenna1, antenna2, dde1_jones,
                             source_coh, dde2_jones)
    name = "-".join(("predict-vis-sum-coh", token))
    layer = blockwise(_predict_coh_wrapper,
                      name, ("src", "row", "chan") + cdims,
                      *bw_args,
                      numblocks=numblocks)

    graph = HighLevelGraph.from_collections(name, layer, deps)

    # We can infer output chunk sizes from source_coh
    chunks = (
        (1, ) * len(src_chunks),
        time_index.chunks[0],
    ) + other_chunks

    # Create array
    sum_coherencies = da.Array(graph, name, chunks, dtype=out_dtype)

    # Reduce source axis
    return sum_coherencies.sum(axis=0)