Пример #1
0
    def __init__(self, data, unit_x, unit_y, has_analytic_ft=False):
        """Create a new Convolvable object.

        Parameters
        ----------
        data : `numpy.ndarray`
            2D ndarray of data
        unit_x : `numpy.ndarray`
            1D ndarray defining x data grid
        unit_y : `numpy.ndarray`
            1D ndarray defining y data grid
        has_analytic_ft : `bool`, optional
            Whether this convolvable overrides self.analytic_ft, and has a known
            analytical fourier tansform

        """
        self.data = data
        self.unit_x = unit_x
        self.unit_y = unit_y
        self.has_analytic_ft = has_analytic_ft
        if data is not None:
            self.samples_y, self.samples_x = data.shape
            self.center_y, self.center_x = int(m.ceil(
                self.samples_y / 2)), int(m.ceil(self.samples_x / 2))
            self.sample_spacing = unit_x[1] - unit_x[0]
        else:
            self.sample_spacing = 1e99
Пример #2
0
def _padshape(array, Q):
    y, x = array.shape
    out_x = int(m.ceil(x * Q))
    out_y = int(m.ceil(y * Q))
    factor_x = (out_x - x) / 2
    factor_y = (out_y - y) / 2
    return (
        (int(m.floor(factor_y)), int(m.ceil(factor_y))),
        (int(m.floor(factor_x)), int(m.ceil(factor_x)))), out_x, out_y
Пример #3
0
    def capture(self, convolvable):
        """Sample a convolvable, mimics capturing a photo of an oversampled representation of an image.

        Parameters
        ----------
        convolvable : `prysm.Convolvable`
            a convolvable object

        Returns
        -------
        `prysm.convolvable`
            a new convolvable object, as it would be sampled by the detector

        Raises
        ------
        ValueError
            if the convolvable would have to become supersampled by the detector;
            this would lead to an inaccurate result and is not supported

        """
        # we assume the pixels are bigger than the samples in the convolvable
        samples_per_pixel = self.pixel_size / convolvable.sample_spacing
        if samples_per_pixel < 1:
            raise ValueError('Pixels smaller than samples, bindown not possible.')
        else:
            samples_per_pixel = int(m.ceil(samples_per_pixel))

        data = bindown(convolvable.data, samples_per_pixel)
        s = data.shape
        extx, exty = s[0] * self.pixel_size // 2, s[1] * self.pixel_size // 2
        ux, uy = m.arange(-extx, exty, self.pixel_size), m.arange(-exty, exty, self.pixel_size)
        self.captures.append(Convolvable(data=data, unit_x=ux, unit_y=uy, has_analytic_ft=False))
        return self.captures[-1]
Пример #4
0
    def sample_psf(self, psf):
        '''Samples a PSF, mimics capturing a photo of an oversampled representation of an image

        Args:
            PSF (prysm.PSF): a point spread function

        Returns:
            PSF.  A new PSF object, as it would be sampled by the detector

        Notes:
            inspired by https://stackoverflow.com/questions/14916545/numpy-rebinning-a-2d-array

        '''

        # we assume the pixels are bigger than the samples in the PSF
        samples_per_pixel = self.pixel_size / psf.sample_spacing
        if samples_per_pixel < 1:
            raise ValueError(
                'Pixels smaller than samples, bindown not possible.')
        else:
            samples_per_pixel = int(ceil(samples_per_pixel))

        data = bindown(psf.data, samples_per_pixel)
        self.captures.append(Image(data=data, sample_spacing=self.pixel_size))
        return self.captures[-1]
Пример #5
0
def prop_pupil_plane_to_psf_plane_units(wavefunction, input_sample_spacing,
                                        prop_dist, wavelength, Q):
    """Compute the ordinate axes for a pupil plane to PSF plane propagation.

    Parameters
    ----------
    wavefunction : `numpy.ndarray`
        the pupil wavefunction
    input_sample_spacing : `float`
        spacing between samples in the pupil plane
    prop_dist : `float`
        propagation distance along the z distance
    wavelength : `float`
        wavelength of light
    Q : `float`
        oversampling / padding factor

    Returns
    -------
    unit_x : `numpy.ndarray`
        x axis unit, 1D ndarray
    unit_y : `numpy.ndarray`
        y axis unit, 1D ndarray

    """
    s = wavefunction.shape
    samples_x, samples_y = s[1] * Q, s[0] * Q
    sample_spacing_x = pupil_sample_to_psf_sample(
        pupil_sample=input_sample_spacing,  # factor of
        samples=samples_x,  # 1e3 corrects
        wavelength=wavelength,  # for unit
        efl=prop_dist) / 1e3  # translation
    sample_spacing_y = pupil_sample_to_psf_sample(
        pupil_sample=input_sample_spacing,  # factor of
        samples=samples_y,  # 1e3 corrects
        wavelength=wavelength,  # for unit
        efl=prop_dist) / 1e3  # translation
    unit_x = m.arange(-1 * int(m.ceil(samples_x / 2)),
                      int(m.floor(samples_x / 2))) * sample_spacing_x
    unit_y = m.arange(-1 * int(m.ceil(samples_y / 2)),
                      int(m.floor(samples_y / 2))) * sample_spacing_y
    return unit_x, unit_y
Пример #6
0
def pad2d(array, Q=2, value=0):
    """Symmetrically pads a 2D array with a value.

    Parameters
    ----------
    array : `numpy.ndarray`
        source array
    Q : `float` or `int`
        oversampling factor; ratio of input to output array widths
    value : `float` or `int`
        value with which to pad the array

    Returns
    -------
    `numpy.ndarray`
        padded array

    Notes
    -----
    padding will be symmetric.

    """
    if Q is 1:
        return array
    else:
        y, x = array.shape
        out_x = int(x * Q)
        out_y = int(y * Q)
        factor_x = (out_x - x) / 2
        factor_y = (out_y - y) / 2
        pad_shape = ((int(m.floor(factor_y)), int(m.ceil(factor_y))),
                     (int(m.floor(factor_x)), int(m.ceil(factor_x))))
        if value is 0:
            out = m.zeros((out_y, out_x), dtype=array.dtype)
        else:
            out = m.zeros((out_y, out_x), dtype=array.dtype) + value
        yy, xx = pad_shape
        out[yy[0]:yy[0] + y, xx[0]:xx[0] + x] = array
        return out
Пример #7
0
    def mask(self, shape_or_mask, diameter=None):
        """Mask the signal.

        The mask will be inscribed in the axis with fewer pixels.  I.e., for
        a interferogram with 1280x1000 pixels, the mask will be 1000x1000 at
        largest.

        Parameters
        ----------
        shape_or_mask : `str` or `numpy.ndarray`
            valid shape from prysm.geometry
        diameter : `float`
            diameter of the mask, in self.spatial_units
        mask : `numpy.ndarray`
            user-provided mask

        Returns
        -------
        self
            modified Interferogram instance.

        """
        if isinstance(shape_or_mask, str):
            if diameter is None:
                diameter = self.diameter
            mask = mcache(shape_or_mask, min(self.shape), radius=diameter / min(self.diameter_x, self.diameter_y))
            base = m.zeros(self.shape, dtype=config.precision)
            difference = abs(self.shape[0] - self.shape[1])
            l, u = int(m.floor(difference / 2)), int(m.ceil(difference / 2))
            if u is 0:  # guard against nocrop scenario
                _slice = slice(None)
            else:
                _slice = slice(l, -u)
            if self.shape[0] < self.shape[1]:
                base[:, _slice] = mask
            else:
                base[_slice, :] = mask

            mask = base
        else:
            mask = shape_or_mask

        hitpts = mask == 0
        self.phase[hitpts] = m.nan
        return self
Пример #8
0
def bindown(array, nsamples_x, nsamples_y=None, mode='avg'):
    """Bin (resample) an array.

    Parameters
    ----------
    array : `numpy.ndarray`
        array of values
    nsamples_x : `int`
        number of samples in x axis to bin by
    nsamples_y : `int`
        number of samples in y axis to bin by.  If None, duplicates value from nsamples_x
    mode : `str`, {'avg', 'sum'}
        sum or avg, how to adjust the output signal

    Returns
    -------
    `numpy.ndarray`
        ndarray binned by given number of samples

    Notes
    -----
    Array should be 2D.  TODO: patch to allow 3D data.

    If the size of `array` is not evenly divisible by the number of samples,
    the algorithm will trim around the border of the array.  If the trim
    length is odd, one extra sample will be lost on the left side as opposed
    to the right side.

    Raises
    ------
    ValueError
        invalid mode

    """
    if nsamples_y is None:
        nsamples_y = nsamples_x

    if nsamples_x == 1 and nsamples_y == 1:
        return array

    # determine amount we need to trim the array
    samples_x, samples_y = array.shape
    total_samples_x = samples_x // nsamples_x
    total_samples_y = samples_y // nsamples_y
    final_idx_x = total_samples_x * nsamples_x
    final_idx_y = total_samples_y * nsamples_y

    residual_x = int(samples_x - final_idx_x)
    residual_y = int(samples_y - final_idx_y)

    # if the amount to trim is symmetric, trim symmetrically.
    if not is_odd(residual_x) and not is_odd(residual_y):
        samples_to_trim_x = residual_x // 2
        samples_to_trim_y = residual_y // 2
        trimmed_data = array[samples_to_trim_x:final_idx_x + samples_to_trim_x,
                             samples_to_trim_y:final_idx_y + samples_to_trim_y]
    # if not, trim more on the left.
    else:
        samples_tmp_x = (samples_x - final_idx_x) // 2
        samples_tmp_y = (samples_y - final_idx_y) // 2
        samples_top = int(m.floor(samples_tmp_y))
        samples_bottom = int(m.ceil(samples_tmp_y))
        samples_left = int(m.ceil(samples_tmp_x))
        samples_right = int(m.floor(samples_tmp_x))
        trimmed_data = array[samples_left:final_idx_x + samples_right,
                             samples_bottom:final_idx_y + samples_top]

    intermediate_view = trimmed_data.reshape(total_samples_x, nsamples_x,
                                             total_samples_y, nsamples_y)

    if mode.lower() in ('avg', 'average', 'mean'):
        output_data = intermediate_view.mean(axis=(1, 3))
    elif mode.lower() == 'sum':
        output_data = intermediate_view.sum(axis=(1, 3))
    else:
        raise ValueError('mode must be average of sum.')

    # trim as needed to make even number of samples.
    # TODO: allow work with images that are of odd dimensions
    px_x, px_y = output_data.shape
    trim_x, trim_y = 0, 0
    if is_odd(px_x):
        trim_x = 1
    if is_odd(px_y):
        trim_y = 1

    return output_data[:px_y - trim_y, :px_x - trim_x]
Пример #9
0
    def trace_focus(self, algorithm='avg'):
        ''' finds the focus position in each field.  This is, in effect, the
            "field curvature" for this azimuth.

        Args:
            algorithm (`str): algorithm to use to trace focus, currently only
                supports '0.5', see notes for a description of this technique.

        Returns:
            `numpy.ndarray`: focal surface sag, in microns, vs field.

        Notes:
            Algorithm '0.5' uses the frequency that has its peak closest to 0.5
            on-axis to estimate the focus coresponding to the minimum RMS WFE
            condition.  This is based on the following assumptions:
                * Any combination of third, fifth, and seventh order spherical
                    aberration will produce a focus shift that depends on
                    frequency, and this dependence can be well fit by an
                    equation of the form y(x) = ax^2 + bx + c.  If this is true,
                    then the frequency which peaks at 0.5 will be near the
                    vertex of the quadratic, which converges to the min RMS WFE
                    condition.

                * Coma, while it enhances depth of field, does not shift the
                    focus peak.

                * Astigmatism and field curvature are the dominant cause of any
                    shift in best focus with field.

                * Chromatic aberrations do not influence the thru-focus MTF peak
                    in a way that varies with field.

        '''
        if algorithm == '0.5':
            # locate the frequency index on axis
            idx_axis = np.searchsorted(self.field, 0)
            idx_freq = abs(self.data[:, idx_axis, :].max(axis=0) -
                           0.5).argmin(axis=1)
            focus_idx = self.data[:,
                                  np.arange(self.data.shape[1]),
                                  idx_freq].argmax(axis=0)
            return self.focus[focus_idx], self.field
        elif algorithm.lower() in ('avg', 'average'):
            if self.freq[0] == 0:
                # if the zero frequency is included, exclude it from our calculations
                avg_idxs = self.data.argmax(axis=0)[:, 1:].mean(axis=1)
            else:
                avg_idxs = self.data.argmax(axis=0).mean(axis=1)

            # account for fractional indexes
            focus_out = avg_idxs.copy()
            for i, idx in enumerate(avg_idxs):
                li, ri = floor(idx), ceil(idx)
                lf, rf = self.focus[li], self.focus[ri]
                diff = rf - lf
                part = idx % 1
                focus_out[i] = lf + diff * part

            return focus_out, self.field
        else:
            raise ValueError('0.5 is only algorithm supported')