Exemplo n.º 1
0
def test_pad_input():
    """Test `match_template` when `pad_input=True`.

    This test places two full templates (one with values lower than the image
    mean, the other higher) and two half templates, which are on the edges of
    the image. The two full templates should score the top (positive and
    negative) matches and the centers of the half templates should score 2nd.
    """
    # Float prefactors ensure that image range is between 0 and 1
    template = 0.5 * diamond(2)
    image = 0.5 * cp.ones((9, 19))
    mid = slice(2, 7)
    image[mid, :3] -= template[:, -3:]  # half min template centered at 0
    image[mid, 4:9] += template  # full max template centered at 6
    image[mid, -9:-4] -= template  # full min template centered at 12
    image[mid, -3:] += template[:, :3]  # half max template centered at 18

    result = match_template(image,
                            template,
                            pad_input=True,
                            constant_values=float(image.mean()))

    # get the max and min results.
    sorted_result = cp.argsort(result.ravel())
    i, j = cp.unravel_index(sorted_result[:2], result.shape)
    assert_array_equal(j, (12, 0))
    i, j = cp.unravel_index(sorted_result[-2:], result.shape)
    assert_array_equal(j, (18, 6))
Exemplo n.º 2
0
def register_translation_batch(src_image,
                               target_image,
                               upsample_factor=1,
                               space="real"):
    # assume complex data is already in Fourier space
    if space.lower() == 'fourier':
        src_freq = src_image
        target_freq = target_image
    # real data needs to be fft'd.
    elif space.lower() == 'real':
        src_freq = cp.fft.fft2(src_image)
        target_freq = cp.fft.fft2(target_image)

    # Whole-pixel shift - Compute cross-correlation by an IFFT
    shape = src_freq.shape
    image_product = src_freq * target_freq.conj()
    cross_correlation = cp.fft.ifft2(image_product)
    A = cp.abs(cross_correlation)
    maxima = A.reshape(A.shape[0], -1).argmax(1)
    maxima = cp.column_stack(cp.unravel_index(maxima, A[0, :, :].shape))

    midpoints = np.array([cp.fix(axis_size / 2) for axis_size in shape[1:]])

    shifts = cp.array(maxima, dtype=cp.float64)
    ids = cp.where(shifts[:, 0] > midpoints[0])
    shifts[ids[0], 0] -= shape[1]
    ids = cp.where(shifts[:, 1] > midpoints[1])
    shifts[ids[0], 1] -= shape[2]
    if upsample_factor > 1:
        # Initial shift estimate in upsampled grid
        shifts = np.round(shifts * upsample_factor) / upsample_factor
        upsampled_region_size = np.ceil(upsample_factor * 1.5)
        # Center of output array at dftshift + 1
        dftshift = np.fix(upsampled_region_size / 2.0)

        normalization = (src_freq[0].size * upsample_factor**2)
        # Matrix multiply DFT around the current shift estimate

        sample_region_offset = dftshift - shifts * upsample_factor
        cross_correlation = _upsampled_dft_batch(image_product.conj(),
                                                 upsampled_region_size,
                                                 upsample_factor,
                                                 sample_region_offset).conj()
        cross_correlation /= normalization
        # Locate maximum and map back to original pixel grid
        A = cp.abs(cross_correlation)
        maxima = A.reshape(A.shape[0], -1).argmax(1)
        maxima = cp.column_stack(cp.unravel_index(maxima, A[0, :, :].shape))

        maxima = cp.array(maxima, dtype=cp.float64) - dftshift

        shifts = shifts + maxima / upsample_factor

    # If its only one row or column the shift along that dimension has no
    # effect. We set to zero.
    for dim in range(src_freq.ndim):
        if shape[dim] == 1:
            shifts[dim] = 0

    return shifts
Exemplo n.º 3
0
def test_cross_correlate_masked_autocorrelation_trivial_masks():
    """Masked normalized cross-correlation between identical arrays
    should reduce to an autocorrelation even with random masks."""
    # See random number generator for reproducible results
    np.random.seed(23)

    arr1 = cp.asarray(camera())

    # Random masks with 75% of pixels being valid
    m1 = np.random.choice([True, False], arr1.shape, p=[3 / 4, 1 / 4])
    m2 = np.random.choice([True, False], arr1.shape, p=[3 / 4, 1 / 4])
    m1 = cp.asarray(m1)
    m2 = cp.asarray(m2)

    xcorr = cross_correlate_masked(arr1,
                                   arr1,
                                   m1,
                                   m2,
                                   axes=(0, 1),
                                   mode="same",
                                   overlap_ratio=0).real
    max_index = cp.unravel_index(cp.argmax(xcorr), xcorr.shape)
    max_index = tuple(map(int, max_index))

    # Autocorrelation should have maximum in center of array
    # CuPy Backend: uint8 inputs will be processed in float32, so reduce
    #               decimal to 5
    assert_almost_equal(float(xcorr.max()), 1, decimal=5)
    np.testing.assert_array_equal(max_index, np.array(arr1.shape) / 2)
Exemplo n.º 4
0
    def calculate_drift_offset(image_mat: cp.array):
        frame_n, h, w = image_mat.shape[:3]
        x = np.arange(w)
        y = np.arange(h)
        bounds = [(0, h), (0, w)]

        def find_max_loc_in_map(cr_map: np.array, rough_yx: cp.array):
            # 调用scipy的插值和优化寻找亚像素最大值
            precise_max_loc = np.empty((2, frame_n), dtype=cp.float32)
            for i in range(frame_n):
                np_cr_map = cr_map[i]
                F2 = interpolate.interp2d(x, y, -np_cr_map, kind="cubic")
                X0 = rough_yx[i]
                precise_max_loc[:, i] = optimize.minimize(lambda arg: F2(*arg),
                                                          X0,
                                                          bounds=bounds).x
            precise_max_loc[0, :] -= w // 2  # X
            precise_max_loc[1, :] -= h // 2  # Y
            return precise_max_loc

        result = cp.abs(
            cp.fft.fftshift(
                cp.fft.ifft2(cp.fft.fft2(image_mat) * base_frame_fft)))
        rough_max_loc = np.array(
            cp.unravel_index(cp.argmax(result.reshape(frame_n, -1), axis=1),
                             shape)).T
        result = find_max_loc_in_map(cp.asnumpy(result), rough_max_loc)
        avg_offset = cp.average(cp.array(result), axis=0)
        print("average offset: x:", avg_offset[0], ", y:", avg_offset[1])
        return result
Exemplo n.º 5
0
def constrain_center_peak(probe):
    """Force the peak illumination intensity to the center of the probe grid.

    After smoothing the intensity of the combined illumination with a gaussian
    filter with standard deviation sigma, the probe is shifted such that the
    maximum intensity is centered.
    """
    half = probe.shape[-2] // 2, probe.shape[-1] // 2
    logger.info("Constrained probe intensity to center with sigma=%f", half[0])
    # First reshape the probe to 3D so it is a single stack of 2D images.
    stack = probe.reshape((-1, *probe.shape[-2:]))
    intensity = cupyx.scipy.ndimage.gaussian_filter(
        input=np.sum(np.square(np.abs(stack)), axis=0),
        sigma=half,
        mode='wrap',
    )
    # Find the maximum intensity in 2D.
    center = np.argmax(intensity)
    # Find the 2D coordinates of the maximum.
    coords = cp.unravel_index(center, dims=probe.shape[-2:])
    # Shift each of the probes so the max is in the center.
    p = np.roll(stack, half[0] - coords[0], axis=-2)
    stack = np.roll(p, half[1] - coords[1], axis=-1)
    # Reform to the original shape; make contiguous.
    probe = stack.reshape(probe.shape)
    return probe
Exemplo n.º 6
0
def get_perlin_init(shape=(1000, 1000),
                    n=100000,
                    cutoff=None,
                    repetition=(1000, 1000),
                    scale=100,
                    octaves=20.0,
                    persistence=0.1,
                    lacunarity=2.0):
    """Returns a tuple of x,y-coordinates sampled from Perlin noise.
    This can be used to initialize the starting positions of a physarum-
    population, as well as to generate a cloudy feeding-pattern that will
    have a natural feel to it. This function wraps the one from the noise-
    library from Casey Duncan, and is in parts borrowed from here (see also this for a good explanation of the noise-parameters):
    https://medium.com/@yvanscher/playing-with-perlin-noise-generating-realistic-archipelagos-b59f004d8401
    The most relevant paramaters for our purposes are:

    :param shape: The shape of the area in which the noise is to be generated. Defaults to (1000,1000)
    :type shape: Tuple of integers with the form (width, height).
    :param n: Number of particles to sample. When used as a feeeding trace,
    this translates to the relative strength of the pattern. defaults to 100000.
    :param cutoff: value below which noise should be set to zero. Default is None. Will lead to probabilities 'contains NaN-error, if to high'
    :param scale: (python-noise parameter) The scale of the noise -- larger or smaller patterns, defaults to 100.
    :param repetition: (python-noise parameter) Tuple that denotes the size of the area in which the noise should repeat itself. Defaults to (1000,1000)

    """
    import numpy as np
    import cupy as cp  # vectorized not present in cupy, so for now to conversion at the end

    shape = [i - 1 for i in shape]

    # make coordinate grid on [0,1]^2
    x_idx = np.linspace(0, shape[0], shape[0])
    y_idx = np.linspace(0, shape[1], shape[1])
    world_x, world_y = np.meshgrid(x_idx, y_idx)

    # apply perlin noise, instead of np.vectorize, consider using itertools.starmap()
    world = np.vectorize(noise.pnoise2)(
        world_x / scale,
        world_y / scale,
        octaves=int(octaves),
        persistence=persistence,
        lacunarity=lacunarity,
        repeatx=repetition[0],
        repeaty=repetition[1],
        base=np.random.randint(0, 100),
    )
    # world = world * 3
    # 	 Sample particle init from map:
    world[world <= 0.0] = 0.0  # filter negative values
    if cutoff is not None:
        world[world <= cutoff] = 0.0
    linear_idx = np.random.choice(world.size,
                                  size=n,
                                  p=world.ravel() / float(world.sum()))
    x, y = np.unravel_index(linear_idx, shape)
    x = x.reshape(-1, 1)
    y = y.reshape(-1, 1)

    return cp.asarray(np.hstack([x, y]))
Exemplo n.º 7
0
def get_image_init_positions(image, shape: Tuple[int], n: int, flip=False):
    init_image = IMG.open(image).convert("L")
    init_image = init_image.resize(tuple(np.flip(shape)))
    init_image = np.array(init_image) / 255
    if flip:
        init_image = 1 - init_image
    linear_idx = np.random.choice(init_image.size,
                                  size=n,
                                  p=init_image.ravel() /
                                  float(init_image.sum()))
    x, y = np.unravel_index(linear_idx, shape)
    x = x.reshape(-1, 1)
    y = y.reshape(-1, 1)
    return np.hstack([x, y])
Exemplo n.º 8
0
def get_image_init_positions(image_path,
                             shape: Tuple[int],
                             n: int,
                             invert=False):
    import numpy as np
    import cupy as cp
    init_image = IMG.open(image_path).convert("L")
    init_image = init_image.resize(shape)
    init_image = np.array(init_image) / 255
    if invert:
        init_image = 1 - init_image
    linear_idx = np.random.choice(range(init_image.size),
                                  size=n,
                                  p=init_image.ravel() / init_image.sum())
    x, y = np.unravel_index(linear_idx, shape)
    return cp.asarray(np.hstack([x.reshape(-1, 1), y.reshape(-1, 1)]))
Exemplo n.º 9
0
def constrain_probe_sparsity(probe, f):
    """Constrain the probe intensity so no more than f/1 elements are nonzero."""
    if f == 1:
        return probe
    logger.info("Constrained probe intensity spasity to %f", f)
    # First reshape the probe to 3D so it is a single stack of 2D images.
    stack = probe.reshape((-1, *probe.shape[-2:]))
    intensity = np.sum(np.square(np.abs(stack)), axis=0)
    sigma = probe.shape[-2] / 8, probe.shape[-1] / 8
    intensity = cupyx.scipy.ndimage.gaussian_filter(
        input=intensity,
        sigma=sigma,
        mode='wrap',
    )
    # Get the coordinates of the smallest k values
    k = int((1 - f) * probe.shape[-1] * probe.shape[-2])
    smallest = np.argpartition(intensity, k, axis=None)[:k]
    coords = cp.unravel_index(smallest, dims=probe.shape[-2:])
    # Set these k smallest values to zero in all probes
    probe[..., coords[0], coords[1]] = 0
    return probe
Exemplo n.º 10
0
def unravel_index(indices,
                  shape,
                  order='C',
                  requires_grad=False,
                  device='cpu'):
    """Converts a flat index or array of flat indices into a tuple of coordinate arrays.   

    Args:
        indices ([type]): An integer array whose elements are indices into the flattened version of an array of dimensions shape. 
        shape ([type]): The shape of the tensor to use for unraveling indices.
        order (str, optional): Determines whether the indices should be viewed as indexing 
            in row-major (C-style, ``'C'``) or column-major (Fortran-style, ``'F``) order. Defaults to ``'C'``.
        requires_grad (bool): if ``True`` will track gradients.
        device (str): name of the device where the tensor is located. Default to ``'cpu'``.

    Returns:
        Tensor
    """
    if device == 'cpu':
        data = np.unravel_index(indices, shape, order=order)
    else:
        data = cp.unravel_index(indices, shape, order=order)
    return nets.Tensor(data, requires_grad=requires_grad, device=device)
Exemplo n.º 11
0
def phasecorr_gpu(X, cfRefImg, lcorr):
    ''' not being used - no speed up - may be faster with cuda.jit'''
    nimg, Ly, Lx = X.shape
    ly, lx = cfRefImg.shape[-2:]
    lyhalf = int(np.floor(ly / 2))
    lxhalf = int(np.floor(lx / 2))

    # put on GPU
    ref_gpu = cp.asarray(cfRefImg)
    x_gpu = cp.asarray(X)

    # phasecorrelation
    x_gpu = fftn(x_gpu, axes=(1, 2),
                 overwrite_x=True) * np.sqrt(Ly - 1) * np.sqrt(Lx - 1)
    for t in range(x_gpu.shape[0]):
        tmp = x_gpu[t, :, :]
        tmp = cp.multiply(tmp, ref_gpu)
        tmp = cp.divide(tmp, cp.absolute(tmp) + 1e-5)
        x_gpu[t, :, :] = tmp
    x_gpu = ifftn(x_gpu, axes=(1, 2),
                  overwrite_x=True) * np.sqrt(Ly - 1) * np.sqrt(Lx - 1)
    x_gpu = cp.fft.fftshift(cp.real(x_gpu), axes=(1, 2))

    # get max index
    x_gpu = x_gpu[cp.ix_(np.arange(0, nimg, 1, int),
                         np.arange(lyhalf - lcorr, lyhalf + lcorr + 1, 1, int),
                         np.arange(lxhalf - lcorr, lxhalf + lcorr + 1, 1,
                                   int))]
    ix = cp.argmax(cp.reshape(x_gpu, (nimg, -1)), axis=1)
    cmax = x_gpu[np.arange(0, nimg, 1, int), ix]
    ymax, xmax = cp.unravel_index(ix, (2 * lcorr + 1, 2 * lcorr + 1))
    cmax = cp.asnumpy(cmax).flatten()
    ymax = cp.asnumpy(ymax)
    xmax = cp.asnumpy(xmax)
    ymax, xmax = ymax - lcorr, xmax - lcorr
    return ymax, xmax, cmax
Exemplo n.º 12
0
def ulas_multiframe3(corr_sum, proportion=0.4):
    import cupy

    if type(corr_sum) is not cupy.ndarray:
        corr_sum = cupy.array(corr_sum)

    num_frames = len(corr_sum) + 1

    correlations = corr_sum

    # %% foo

    shifts = cupy.zeros((correlations.shape[0], 2))

    for i in range(correlations.shape[0]):
        # find the argmax points, which give the estimated shift
        shifts[i] = cupy.array(
            cupy.unravel_index(cupy.argmax(correlations[i]),
                               correlations[i].shape))

    # bring the shift values from [0,N] to [-N/2, N/2] format
    shifts[shifts > cupy.fix(corr_sum[0].shape[0] / 2)] -= corr_sum[0].shape[0]

    # determine what proportion of the shift estimates to use in the final shift estimation
    # if `proportion` < 1, then we are not using the correlations of frame pairs that
    # are very far from each other, the reason being that further apart frames have
    # less overlap, where the nonoverlapping parts contribute to the correlation as
    # 'noise', and reduce the accuracy.
    # proportion = 0.4

    # normalize the shifts to per frame shift
    shifts = shifts / cupy.tile(cupy.arange(1, shifts.shape[0] + 1), (2, 1)).T

    # estimate the shift using the first `proportion` of the shift array
    shift_est = cupy.asnumpy(
        cupy.mean(shifts[:int(proportion * shifts.shape[0])], axis=0))

    # initialize the array that will take the fourier transform of the correlations of correlations
    correlations_f2 = cupy.zeros((num_frames - 2, corr_sum.shape[1],
                                  corr_sum.shape[2])).astype(cupy.complex128)

    for i in range(num_frames - 2):
        for j in cupy.arange(i + 1, num_frames - 1):
            # compute the correlations between correlations to get a more refined estimate of drift
            correlations_f2[j - i - 1] += cupy.fft.fftn(
                correlations[i]) * cupy.fft.fftn(correlations[j]).conj()
    correlations2 = cupy.fft.ifft2(correlations_f2).real

    # FIXME
    # for i in range(len(correlations2)):
    #     # convolve the correlations with a gaussian to eliminate outlier peaks
    #     correlations2[i] = gaussian_filter(correlations2[i], sigma=1, mode='wrap')

    shifts2 = cupy.zeros((correlations2.shape[0], 2))

    for i in range(correlations2.shape[0]):
        shifts2[i] = cupy.array(
            cupy.unravel_index(cupy.argmax(correlations2[i]),
                               correlations2[i].shape))

    shifts2[shifts2 > cupy.fix(corr_sum[0].shape[0] /
                               2)] -= corr_sum[0].shape[0]
    shifts2 = shifts2 / cupy.tile(cupy.arange(1, shifts2.shape[0] + 1),
                                  (2, 1)).T
    shift_est2 = -cupy.asnumpy(
        cupy.mean(shifts2[:int(proportion * shifts2.shape[0])], axis=0))
    return ((shift_est[1], shift_est[0])), ((shift_est2[1], shift_est2[0]))
Exemplo n.º 13
0
 def unravel_index(indices, shape):
     return cp.unravel_index(indices, shape)
Exemplo n.º 14
0
    def move(self,
             move_preference_matrix,
             move_probability_matrix,
             ratio_random_move=0.1):
        """
        1.  Select all living agents and their neighbours.
        2.  Create a movement matrix. All occupied by agent cells should be unavailable for move.
            add {move_preference_matrix} for values of neighbours.
        3.  If agent does not have any available cells for moving - it should die.
            Drop all died agents from current moving agents.
        4.  10% of the time the agent moves randomly.
            Agent can't go to unavailable cells, so we recalculate probability for available neighbours.
            (sum of prob should be 1).
        5.  Vectorized way to get random indices from array of probs. Like random.choice, but for 2d array.
        6.  Find new flat indexes for random moving agents.
        7.  Find new flat indexes for normal moving agents. Before argmax selection we shuffle neighbours,
            otherwise we will use always first max index.
        8.  Create an array with new agents positions.
        9.  If two agents want to occupy same cell - then we accept only first.
            All agents, which was declined to move because of collision will die.
        10. If agent reach top - it dies too.


        :param move_preference_matrix:  The agent decides which space to move to by adding this move
                                        preference array to the value array of the surrounding environment.

        :param move_probability_matrix:  10% of the time the agent moves randomly to an adjacent space.
                                         It is the move probability matrix.
        :return:
        """
        # (1)
        live_agents_neighbour_flat_positions = self.agents_neighbour_flat_positions[
            self.agents_state]
        # (2)
        move_candidates = self.env.ravel(
        )[live_agents_neighbour_flat_positions].copy()

        is_available = self.is_available_env.ravel(
        )[live_agents_neighbour_flat_positions]
        move_candidates[~is_available] = cp.nan
        move_candidates = move_candidates + cp.asarray(move_preference_matrix)

        # (3)
        should_die = cp.all(cp.isnan(move_candidates.reshape(-1, 27)), axis=1)
        should_die_agents = cp.flatnonzero(self.agents_state)[should_die]

        self.agents_state[should_die_agents] = False

        move_candidates = move_candidates[~should_die]
        live_agents_neighbour_flat_positions = live_agents_neighbour_flat_positions[
            ~should_die]

        # (4)
        is_random_move = cp.random.binomial(
            1, ratio_random_move,
            live_agents_neighbour_flat_positions.shape[0])
        is_random_move = is_random_move.astype(cp.bool)
        random_move_candidates = move_candidates[is_random_move]

        random_move_probs = (~cp.isnan(random_move_candidates) *
                             cp.asarray(move_probability_matrix)).reshape(
                                 -1, 27)
        random_move_probs /= random_move_probs.sum(axis=1)[:, None]

        # (5)
        random_vals = cp.expand_dims(cp.random.rand(
            random_move_probs.shape[0]),
                                     axis=1)
        random_indexes = (random_move_probs.cumsum(axis=1) >
                          random_vals).argmax(axis=1)

        # (6)
        random_live_agents_neighbour_flat_positions = live_agents_neighbour_flat_positions[
            is_random_move]
        random_new_positions = cp.take_along_axis(
            random_live_agents_neighbour_flat_positions.reshape(-1, 27),
            random_indexes[:, None],
            axis=1).T[0]

        # (7)
        normal_move_candidates = move_candidates[~is_random_move]

        # normal_move_indexes = cp.nanargmax(normal_move_candidates.reshape(-1, 27), axis=1)[:, None]
        # smart analog of cp.nanargmax(normal_move_candidates.reshape(-1, 27), axis=1)[:, None]

        normal_flattened_move_candidates = normal_move_candidates.reshape(
            -1, 27)
        normal_shuffled_candidates_idx = cp.random.rand(
            *normal_flattened_move_candidates.shape).argsort(axis=1)
        normal_shuffled_flattened_move_candidates = cp.take_along_axis(
            normal_flattened_move_candidates,
            normal_shuffled_candidates_idx,
            axis=1)
        normal_shuffled_candidates_max_idx = cp.nanargmax(
            normal_shuffled_flattened_move_candidates, axis=1)[:, None]

        normal_move_indexes = cp.take_along_axis(
            normal_shuffled_candidates_idx,
            normal_shuffled_candidates_max_idx,
            axis=1)
        ####

        normal_live_agents_neighbour_flat_positions = live_agents_neighbour_flat_positions[
            ~is_random_move]
        normal_move_new_positions = cp.take_along_axis(
            normal_live_agents_neighbour_flat_positions.reshape(-1, 27),
            normal_move_indexes,
            axis=1).T[0]
        # (8)
        moving_agents_flat_positions = self.agents_flat_positions[
            self.agents_state]
        new_agents_flat_positions = moving_agents_flat_positions.copy()

        new_agents_flat_positions[is_random_move] = random_new_positions

        new_agents_flat_positions[~is_random_move] = normal_move_new_positions

        live_agents_indexes = cp.flatnonzero(self.agents_state)

        # (9)
        _, flat_positions_first_entry = cp.unique(new_agents_flat_positions,
                                                  return_index=True)

        is_live = cp.zeros_like(new_agents_flat_positions).astype(cp.bool)
        is_live[flat_positions_first_entry] = True

        new_agents_flat_positions[~is_live] = moving_agents_flat_positions[
            ~is_live]
        new_agents_positions = cp.array(
            cp.unravel_index(new_agents_flat_positions, self.env.shape)).T

        # (10)
        is_live[new_agents_positions[:, 2] == 1] = False

        self._agents_positions[live_agents_indexes] = new_agents_positions
        self.agents_state[live_agents_indexes] = is_live

        self.is_available_env.ravel()[moving_agents_flat_positions] = True
        self.is_available_env.ravel()[new_agents_flat_positions] = False

        self._agents_positions_all_time.append(
            cp.asnumpy(self._agents_positions))
Exemplo n.º 15
0
    def born(self, number_of_agents, ratio_random_birth):
        """
        Method, which born new agents.
        1.  All new agents should be borned in free cells.
        2.  {BORN_IN_BOTTOM} agents are born at the bottom of ENV.
            There can be hypothetical situations, when we already have died agents in bottom cells.
            To avoid this problem, we filter this cells.
        3.  Generate an indexes of borned agents in envs.
            !!!!!WARNING!!!!!: Perhaps situation, when we have less available cells than number of new agents
            (ex.: if we have 10x10x100 env and want to create 200 agents at bottom).
            We can simple handle it, setting number of new agents like min(free_cells, number_of_agents).
        4.  The remaining agents should not appear into already occupied positions.
            {agent_available_env_bottom} is just a view, really we change {agent_available_env} array.
        5. Receive X,Y,Z positions of borned agents. Specify Z manually, because we now, that it is a bottom.
        6.  Other agents should be borned randomly in the whole envs.
            It is too slow to sample from whole {is_available_env}. So, use simple hack - because envs are
            much bigger, than number of agents - let's generate some random indexes there and just select free.
            Todo: Strictly, it can give us problems in some cases, when there will be too many agents,
            but don't worry about it now.

        7. All agents, which were born on the top will die immediately.
        8. Combine all new agents with others.

        :param number_of_agents: number of agents born each turn
        :param ratio_random_birth: ratio of agent birth locations (i.e. base of environment vs. random)
        """
        # (1)
        # (2)
        born_in_bottom = int(number_of_agents * (1 - ratio_random_birth))
        agent_available_env_bottom = self.is_available_env[:, :, -2]
        available_flat_bottom_positions = cp.flatnonzero(
            agent_available_env_bottom == True)
        # (3)
        selected_flat_bottom_positions = cp.random.choice(
            available_flat_bottom_positions, born_in_bottom, replace=False)
        # (4)
        self.is_available_env[:, :, -2].ravel(
        )[selected_flat_bottom_positions] = False
        # (5)
        bottom_agents_positions = cp.unravel_index(
            selected_flat_bottom_positions,
            (*agent_available_env_bottom.shape, 1))
        bottom_agents_positions = cp.vstack(bottom_agents_positions)
        bottom_agents_positions[2] = (self.is_available_env.shape[2] - 2)

        # (6)
        born_in_random = number_of_agents - born_in_bottom
        random_positions = cp.array([
            # Use numpy function, because it is faster.
            np.random.randint(1, ax_shape - 1, born_in_random * 4)
            for ax_shape in self.is_available_env.shape
        ])
        random_flat_positions = cp.ravel_multi_index(
            random_positions, self.is_available_env.shape)

        is_available = self.is_available_env.ravel()[random_flat_positions]

        selected_flat_uniform_positions = random_flat_positions[
            is_available][:born_in_random]
        uniform_agents_positions = cp.unravel_index(
            selected_flat_uniform_positions, self.is_available_env.shape)
        uniform_agents_positions = cp.vstack(uniform_agents_positions)
        # Todo: This code is correct, but too slow. Replace it with code above.

        # available_flat_uniform_positions = cp.flatnonzero(self.is_available_env)
        # selected_flat_uniform_positions = cp.random.choice(
        #     available_flat_uniform_positions,
        #     number_of_agents - born_in_bottom,
        #     replace=False
        # )
        # uniform_agents_positions = cp.unravel_index(selected_flat_uniform_positions, self.is_available_env.shape)
        # uniform_agents_positions = cp.vstack(uniform_agents_positions)

        # (7)
        new_agent_positions = cp.hstack(
            [uniform_agents_positions, bottom_agents_positions]).T
        new_agent_state = (new_agent_positions[:, 2] != 1).astype(cp.bool)

        # (8)
        if self._agents_positions is None:
            self._agents_positions = new_agent_positions
            self.agents_state = new_agent_state
        else:
            self._agents_positions = cp.vstack(
                [self._agents_positions, new_agent_positions])
            self.agents_state = cp.hstack([self.agents_state, new_agent_state])

        self.is_available_env.ravel()[self.agents_flat_positions] = False
    if (i + 1 == int(iteration) + int(additional_iteration)):
        R_dens = cp_dens_pre
        R_dens.real = R_dens.real * cp_sup

        #		if(complex_constraint_flag != 1):
        #			R_dens.imag=R_dens.imag*cp_sup
        R_dens.imag[:, :, :] = 0

    if ((i + 1 == int(iteration) + int(additional_iteration)) & (OSS_flag != 1)
            & (iteration != "0")):

        for n in range(sta_dens):
            R_dens_real = R_dens.real[n, :, :]

            max_index = cp.unravel_index(cp.argmax(R_dens_real),
                                         R_dens_real.shape)
            R_dens[n, :, :] = cp.roll(R_dens[n, :, :],
                                      int(row / 2) - max_index[0],
                                      axis=0)
            R_dens[n, :, :] = cp.roll(R_dens[n, :, :],
                                      int(col / 2) - max_index[1],
                                      axis=1)

            weight_sum = cp.sum(R_dens_real)
            x_axis_sum = cp.sum(R_dens_real, axis=1)
            y_axis_sum = cp.sum(R_dens_real, axis=0)

            x_sum = cp.sum(x_axis_sum * x)
            y_sum = cp.sum(y_axis_sum * y)

            if (weight_sum != 0.0):
Exemplo n.º 17
0
            if theta_img[i,j] <= theta_max:
                
                count = 0
                ray = cp.zeros((stop + 1 - start, 3), dtype=cp.int)

                for k in range(start, stop + 1):

                    if count > 0:
                        ray[k-start,:] = cp.array([cp.nan, cp.nan, cp.nan])
                    else:
                        theta_slice = thetav[k,:,:]
                        theta_slice[theta_slice > theta_max] = cp.nan
                        phi_slice = phiv[k,:,:]
                        dist = cp.sqrt((theta_slice - theta_img[i,j])**2 + (phi_slice - phi_img[i,j])**2)
                        mini, minj = cp.unravel_index(cp.nanargmin(dist), theta_slice.shape)
                        mini = cp.int(mini); minj = cp.int(minj)
                        # prevent smearing
                        if (mini in boundary) or (minj in boundary):
                            count += 1

                        ray[k-start,0] = k
                        ray[k-start,1] = mini
                        ray[k-start,2] = minj

                not_nans = ~cp.isnan(ray[:,0])
                how_many = cp.sum(not_nans)
                kx = ray[:,0]
                kx = kx[not_nans]
                ix = ray[:,1]
                ix = ix[not_nans]
Exemplo n.º 18
0
def phase_cross_correlation(reference_image,
                            moving_image,
                            *,
                            upsample_factor=1,
                            space="real",
                            return_error=True,
                            reference_mask=None,
                            moving_mask=None,
                            overlap_ratio=0.3):
    """Efficient subpixel image translation registration by cross-correlation.

    This code gives the same precision as the FFT upsampled cross-correlation
    in a fraction of the computation time and with reduced memory requirements.
    It obtains an initial estimate of the cross-correlation peak by an FFT and
    then refines the shift estimation by upsampling the DFT only in a small
    neighborhood of that estimate by means of a matrix-multiply DFT.

    Parameters
    ----------
    reference_image : array
        Reference image.
    moving_image : array
        Image to register. Must be same dimensionality as
        ``reference_image``.
    upsample_factor : int, optional
        Upsampling factor. Images will be registered to within
        ``1 / upsample_factor`` of a pixel. For example
        ``upsample_factor == 20`` means the images will be registered
        within 1/20th of a pixel. Default is 1 (no upsampling).
        Not used if any of ``reference_mask`` or ``moving_mask`` is not None.
    space : string, one of "real" or "fourier", optional
        Defines how the algorithm interprets input data. "real" means
        data will be FFT'd to compute the correlation, while "fourier"
        data will bypass FFT of input data. Case insensitive. Not
        used if any of ``reference_mask`` or ``moving_mask`` is not
        None.
    return_error : bool, optional
        Returns error and phase difference if on, otherwise only
        shifts are returned. Has noeffect if any of ``reference_mask`` or
        ``moving_mask`` is not None. In this case only shifts is returned.
    reference_mask : ndarray
        Boolean mask for ``reference_image``. The mask should evaluate
        to ``True`` (or 1) on valid pixels. ``reference_mask`` should
        have the same shape as ``reference_image``.
    moving_mask : ndarray or None, optional
        Boolean mask for ``moving_image``. The mask should evaluate to ``True``
        (or 1) on valid pixels. ``moving_mask`` should have the same shape
        as ``moving_image``. If ``None``, ``reference_mask`` will be used.
    overlap_ratio : float, optional
        Minimum allowed overlap ratio between images. The correlation for
        translations corresponding with an overlap ratio lower than this
        threshold will be ignored. A lower `overlap_ratio` leads to smaller
        maximum translation, while a higher `overlap_ratio` leads to greater
        robustness against spurious matches due to small overlap between
        masked images. Used only if one of ``reference_mask`` or
        ``moving_mask`` is None.

    Returns
    -------
    shifts : ndarray
        Shift vector (in pixels) required to register ``moving_image``
        with ``reference_image``. Axis ordering is consistent with
        numpy (e.g. Z, Y, X)
    error : float
        Translation invariant normalized RMS error between
        ``reference_image`` and ``moving_image``.
    phasediff : float
        Global phase difference between the two images (should be
        zero if images are non-negative).

    References
    ----------
    .. [1] Manuel Guizar-Sicairos, Samuel T. Thurman, and James R. Fienup,
           "Efficient subpixel image registration algorithms,"
           Optics Letters 33, 156-158 (2008). :DOI:`10.1364/OL.33.000156`
    .. [2] James R. Fienup, "Invariant error metrics for image reconstruction"
           Optics Letters 36, 8352-8357 (1997). :DOI:`10.1364/AO.36.008352`
    .. [3] Dirk Padfield. Masked Object Registration in the Fourier Domain.
           IEEE Transactions on Image Processing, vol. 21(5),
           pp. 2706-2718 (2012). :DOI:`10.1109/TIP.2011.2181402`
    .. [4] D. Padfield. "Masked FFT registration". In Proc. Computer Vision and
           Pattern Recognition, pp. 2918-2925 (2010).
           :DOI:`10.1109/CVPR.2010.5540032`
    """
    if (reference_mask is not None) or (moving_mask is not None):
        return _masked_phase_cross_correlation(reference_image, moving_image,
                                               reference_mask, moving_mask,
                                               overlap_ratio)

    # images must be the same shape
    if reference_image.shape != moving_image.shape:
        raise ValueError("images must be same shape")

    # assume complex data is already in Fourier space
    if space.lower() == 'fourier':
        src_freq = reference_image
        target_freq = moving_image
    # real data needs to be fft'd.
    elif space.lower() == 'real':
        src_freq = fft.fftn(reference_image)
        target_freq = fft.fftn(moving_image)
    else:
        raise ValueError('space argument must be "real" of "fourier"')

    # Whole-pixel shift - Compute cross-correlation by an IFFT
    shape = src_freq.shape
    image_product = src_freq * target_freq.conj()
    cross_correlation = fft.ifftn(image_product)

    # Locate maximum
    maxima = cp.unravel_index(cp.argmax(cp.abs(cross_correlation)),
                              cross_correlation.shape)
    midpoints = cp.asarray([np.fix(axis_size / 2) for axis_size in shape])

    float_dtype = image_product.real.dtype
    shifts = cp.stack([m.astype(float_dtype, copy=False) for m in maxima])
    shifts[shifts > midpoints] -= cp.asarray(shape)[shifts > midpoints]

    if upsample_factor == 1:
        if return_error:
            sabs = cp.abs(src_freq)
            sabs *= sabs
            tabs = cp.abs(target_freq)
            tabs *= tabs
            src_amp = np.sum(sabs) / src_freq.size
            target_amp = np.sum(tabs) / target_freq.size
            CCmax = cross_correlation[maxima]
    # If upsampling > 1, then refine estimate with matrix multiply DFT
    else:
        # Initial shift estimate in upsampled grid
        shifts = cp.around(shifts * upsample_factor) / upsample_factor
        upsampled_region_size = math.ceil(upsample_factor * 1.5)
        # Center of output array at dftshift + 1
        dftshift = np.fix(upsampled_region_size / 2.0)
        upsample_factor = float(upsample_factor)
        # Matrix multiply DFT around the current shift estimate
        sample_region_offset = dftshift - shifts * upsample_factor
        cross_correlation = _upsampled_dft(image_product.conj(),
                                           upsampled_region_size,
                                           upsample_factor,
                                           sample_region_offset).conj()

        # Locate maximum and map back to original pixel grid
        maxima = cp.unravel_index(cp.argmax(cp.abs(cross_correlation)),
                                  cross_correlation.shape)
        CCmax = cross_correlation[maxima]

        maxima = (cp.stack([m.astype(float_dtype, copy=False)
                            for m in maxima]) - dftshift)

        shifts = shifts + maxima / upsample_factor

        if return_error:
            src_amp = cp.abs(src_freq)
            src_amp *= src_amp
            src_amp = cp.sum(src_amp)
            target_amp = cp.abs(target_freq)
            target_amp *= target_amp
            target_amp = cp.sum(target_amp)

    # If its only one row or column the shift along that dimension has no
    # effect. We set to zero.
    for dim in range(src_freq.ndim):
        if shape[dim] == 1:
            shifts[dim] = 0

    if return_error:
        # Redirect user to masked_phase_cross_correlation if NaNs are observed
        if cp.isnan(CCmax) or cp.isnan(src_amp) or cp.isnan(target_amp):
            raise ValueError(
                "NaN values found, please remove NaNs from your "
                "input data or use the `reference_mask`/`moving_mask` "
                "keywords, eg: "
                "phase_cross_correlation(reference_image, moving_image, "
                "reference_mask=~np.isnan(reference_image), "
                "moving_mask=~np.isnan(moving_image))")

        return shifts, _compute_error(CCmax, src_amp, target_amp),\
            _compute_phasediff(CCmax)
    else:
        return shifts