예제 #1
0
    def transform(self, X) -> SparseCumlArray:
        """Impute all missing values in X.

        Parameters
        ----------
        X : {array-like, sparse matrix}, shape (n_samples, n_features)
            The input data to complete.
        """
        check_is_fitted(self)

        X = self._validate_input(X, in_fit=False)
        X_indicator = super()._transform_indicator(X)

        statistics = self.statistics_

        if X.shape[1] != statistics.shape[0]:
            raise ValueError("X has %d features per sample, expected %d" %
                             (X.shape[1], self.statistics_.shape[0]))

        # Delete the invalid columns if strategy is not constant
        if self.strategy == "constant":
            valid_statistics = statistics
        else:
            # same as np.isnan but also works for object dtypes
            invalid_mask = _get_mask(statistics, np.nan)
            valid_mask = np.logical_not(invalid_mask)
            valid_statistics = statistics[valid_mask]
            valid_statistics_indexes = np.flatnonzero(valid_mask)

            if invalid_mask.any():
                missing = np.arange(X.shape[1])[invalid_mask]
                if self.verbose:
                    warnings.warn("Deleting features without "
                                  "observed values: %s" % missing)
                X = X[:, valid_statistics_indexes]

        # Do actual imputation
        if sparse.issparse(X):
            if self.missing_values == 0:
                raise ValueError("Imputation not possible when missing_values "
                                 "== 0 and input is sparse. Provide a dense "
                                 "array instead.")
            else:
                mask = _get_mask(X.data, self.missing_values)
                indexes = np.repeat(np.arange(len(X.indptr) - 1, dtype=np.int),
                                    np.diff(X.indptr).tolist())[mask]

                X.data[mask] = valid_statistics[indexes].astype(X.dtype,
                                                                copy=False)
        else:
            mask = _get_mask(X, self.missing_values)
            if self.strategy == "constant":
                X[mask] = valid_statistics[0]
            else:
                for i, vi in enumerate(valid_statistics_indexes):
                    feature_idxs = np.flatnonzero(mask[:, vi])
                    X[feature_idxs, vi] = valid_statistics[i]

        X = super()._concatenate_indicator(X, X_indicator)
        return X
예제 #2
0
 def __setitem__(self, indices, values):
     if self._array is None:
         self._array = self._ascupy()
     self._array[indices] = values
     self.in_values = cp.flatnonzero(self._array)
     self._max_label = int(cp.max(self.in_values))
     self.out_values = self._array[self.in_values]
예제 #3
0
def fast_iterative_method(self,data):
	"""
	Applies (a variant of) the fast iterative method.
	"""
	updateNext_o = fd.block_expand(data.trigger,self.shape_i,mode='constant',
		constant_values=False).reshape(self.shape_o+(-1,)).any(axis=-1)
	updateNext_o = cp.ascontiguousarray(updateNext_o.astype(np.uint8))
	scorePrev_o = cp.zeros(self.shape_o, dtype='uint8')
	scoreNext_o = scorePrev_o.copy()
	policy = data.policy
	nitermax_o = policy.nitermax_o
	stop = self.InitStop(data)

	# strict_iter_o needed
	for niter_o in range(nitermax_o):
		if stop(updateNext_o): return niter_o
		updateList_o = cp.ascontiguousarray(cp.flatnonzero(updateNext_o), dtype=self.int_t)
		scorePrev_o,scoreNext_o = scoreNext_o,scorePrev_o
		updateNext_o.fill(0); scoreNext_o.fill(0)
		data.kernel((updateList_o.size,),(self.size_i,), 
			KernelArgs(data) + (updateList_o,scorePrev_o,scoreNext_o,updateNext_o))
#		print("------------- scorePrev_o,scoreNext_o,updateNext_o -------------------")
#		print(scorePrev_o)
#		print(scoreNext_o)
#		print(updateNext_o)

	return nitermax_o
예제 #4
0
 def flatnonzero(*args):
     if isinstance(args[0], numpy.ndarray):
         return numpy.flatnonzero(*args)
     elif isinstance(args[0], torch.Tensor):
         raise ValueError('Flatnonzero does not work with PyTorch')
     else:
         return cupy.flatnonzero(*args)
예제 #5
0
def intersect1d(arr1, arr2, assume_unique=False, return_indices=False):
    """Find the intersection of two arrays.
    Returns the sorted, unique values that are in both of the input arrays.

    Parameters
    ----------
    arr1, arr2 : cupy.ndarray
        Input arrays. Arrays will be flattened if they are not in 1D.
    assume_unique : bool
        By default, False. If set True, the input arrays will be
        assumend to be unique, which speeds up the calculation. If set True,
        but the arrays are not unique, incorrect results and out-of-bounds
        indices could result.
    return_indices : bool
       By default, False. If True, the indices which correspond to the
       intersection of the two arrays are returned.

    Returns
    -------
    intersect1d : cupy.ndarray
        Sorted 1D array of common and unique elements.
    comm1 : cupy.ndarray
        The indices of the first occurrences of the common values
        in `arr1`. Only provided if `return_indices` is True.
    comm2 : cupy.ndarray
        The indices of the first occurrences of the common values
        in `arr2`. Only provided if `return_indices` is True.

    See Also
    --------
    numpy.intersect1d

    """
    if not assume_unique:
        if return_indices:
            arr1, ind1 = cupy.unique(arr1, return_index=True)
            arr2, ind2 = cupy.unique(arr2, return_index=True)
        else:
            arr1 = cupy.unique(arr1)
            arr2 = cupy.unique(arr2)
    else:
        arr1 = arr1.ravel()
        arr2 = arr2.ravel()

    if not return_indices:
        mask = _search._exists_kernel(arr1, arr2, arr2.size, False)
        return arr1[mask]

    mask, v1 = _search._exists_and_searchsorted_kernel(arr1, arr2, arr2.size,
                                                       False)
    int1d = arr1[mask]
    arr1_indices = cupy.flatnonzero(mask)
    arr2_indices = v1[mask]

    if not assume_unique:
        arr1_indices = ind1[arr1_indices]
        arr2_indices = ind2[arr2_indices]

    return int1d, arr1_indices, arr2_indices
예제 #6
0
    def _get_missing_features_info(self, X):
        """Compute the imputer mask and the indices of the features
        containing missing values.

        Parameters
        ----------
        X : {ndarray or sparse matrix}, shape (n_samples, n_features)
            The input data with missing values. Note that ``X`` has been
            checked in ``fit`` and ``transform`` before to call this function.

        Returns
        -------
        imputer_mask : {ndarray or sparse matrix}, shape \
        (n_samples, n_features)
            The imputer mask of the original data.

        features_with_missing : ndarray, shape (n_features_with_missing)
            The features containing missing values.

        """
        if sparse.issparse(X):
            mask = _get_mask(X.data, self.missing_values)

            # The imputer mask will be constructed with the same sparse format
            # as X.
            sparse_constructor = (sparse.csr_matrix
                                  if X.format == 'csr' else sparse.csc_matrix)
            imputer_mask = sparse_constructor(
                (mask, X.indices.copy(), X.indptr.copy()),
                shape=X.shape,
                dtype=np.float32)
            # temporarly switch to using float32 as
            # cupy cannot operate with bool as of now

            if self.features == 'missing-only':
                n_missing = imputer_mask.sum(axis=0)

            if self.sparse is False:
                imputer_mask = imputer_mask.toarray()
            elif imputer_mask.format == 'csr':
                imputer_mask = imputer_mask.tocsc()
        else:
            imputer_mask = _get_mask(X, self.missing_values)

            if self.features == 'missing-only':
                n_missing = imputer_mask.sum(axis=0)

            if self.sparse is True:
                imputer_mask = sparse.csc_matrix(imputer_mask)

        if self.features == 'all':
            features_indices = np.arange(X.shape[1])
        else:
            features_indices = np.flatnonzero(n_missing)

        return imputer_mask, features_indices
예제 #7
0
def _cross_entropy(image, threshold, bins=_DEFAULT_ENTROPY_BINS):
    """Compute cross-entropy between distributions above and below a threshold.

    Parameters
    ----------
    image : array
        The input array of values.
    threshold : float
        The value dividing the foreground and background in ``image``.
    bins : int or array of float, optional
        The number of bins or the bin edges. (Any valid value to the ``bins``
        argument of ``cp.histogram`` will work here.) For an exact calculation,
        each unique value should have its own bin. The default value for bins
        ensures exact handling of uint8 images: ``bins=256`` results in
        aliasing problems due to bin width not being equal to 1.

    Returns
    -------
    nu : float
        The cross-entropy target value as defined in [1]_.

    Notes
    -----
    See Li and Lee, 1993 [1]_; this is the objective function ``threshold_li``
    minimizes. This function can be improved but this implementation most
    closely matches equation 8 in [1]_ and equations 1-3 in [2]_.

    References
    ----------
    .. [1] Li C.H. and Lee C.K. (1993) "Minimum Cross Entropy Thresholding"
           Pattern Recognition, 26(4): 617-625
           :DOI:`10.1016/0031-3203(93)90115-D`
    .. [2] Li C.H. and Tam P.K.S. (1998) "An Iterative Algorithm for Minimum
           Cross Entropy Thresholding" Pattern Recognition Letters, 18(8): 771-776
           :DOI:`10.1016/S0167-8655(98)00057-9`
    """
    bins = cp.asarray(bins)  # required for _DEFAULT_ENTROPY_BINS tuple
    try:
        # use CuPy's implementation when available
        histogram, bin_edges = cp.histogram(image, bins=bins, density=True)
    except TypeError:
        histogram, bin_edges = cnp.histogram(image, bins=bins, density=True)
    try:
        # use CuPy's implementation when available
        bin_centers = cp.convolve(bin_edges, [0.5, 0.5], mode="valid")
    except AttributeError:
        bin_centers = cnp.convolve(bin_edges, [0.5, 0.5], mode="valid")
    t = cp.flatnonzero(bin_centers > threshold)[0]
    m0a = cp.sum(histogram[:t])  # 0th moment, background
    m0b = cp.sum(histogram[t:])
    m1a = cp.sum(histogram[:t] * bin_centers[:t])  # 1st moment, background
    m1b = cp.sum(histogram[t:] * bin_centers[t:])
    mua = m1a / m0a  # mean value, background
    mub = m1b / m0b
    nu = -m1a * cp.log(mua) - m1b * cp.log(mub)
    return nu
예제 #8
0
def _minor_reduce(X, min_or_max):
    fminmax = ufunc_dic[min_or_max]

    major_index = np.flatnonzero(np.diff(X.indptr))
    values = cpu_np.zeros(major_index.shape[0], dtype=X.dtype)
    ptrs = X.indptr[major_index]

    start = ptrs[0]
    for i, end in enumerate(ptrs[1:]):
        values[i] = fminmax(X.data[start:end])
        start = end
    values[-1] = fminmax(X.data[end:])

    return major_index, np.array(values)
예제 #9
0
def global_iteration(self,data):
	"""
	Solves the eikonal equation by applying repeatedly the updates on the whole domain.
	"""	
	updateNow_o  = cp.ones(	self.shape_o,   dtype='uint8')
	updateNext_o = cp.zeros(self.shape_o,   dtype='uint8')
	updateList_o = cp.ascontiguousarray(cp.flatnonzero(updateNow_o),dtype=self.int_t)
	nitermax_o = data.policy.nitermax_o
	stop = self.InitStop(data)

	for niter_o in range(nitermax_o):
		data.kernel((updateList_o.size,),(self.size_i,), 
			KernelArgs(data) + (updateList_o,updateNext_o))
		if stop(updateNext_o): return niter_o
		updateNext_o.fill(0)
	return nitermax_o
def global_iteration(self, data):
    """
	Solves the eikonal equation by applying repeatedly the updates on the whole domain.
	"""
    updateNow_o = cp.ones(self.shape_o, dtype='uint8')
    updateNext_o = cp.zeros(self.shape_o, dtype='uint8')
    updateList_o = cp.ascontiguousarray(cp.flatnonzero(updateNow_o),
                                        dtype=self.int_t)
    nitermax_o = data.policy.nitermax_o

    for niter_o in range(nitermax_o):
        val_old = data.args['values'].copy()
        data.kernel((updateList_o.size, ), (self.size_i, ),
                    KernelArgs(data) + (updateList_o, updateNext_o))
        if cp.any(updateNext_o): updateNext_o.fill(0)
        else: return niter_o
    return nitermax_o
예제 #11
0
    def __getitem__(self, index):
        scalar = cp.isscalar(index)
        if scalar:
            index = cp.asarray([index])
        elif isinstance(index, slice):
            start = index.start or 0  # treat None or 0 the same way
            stop = index.stop if index.stop is not None else len(self)
            step = index.step
            index = cp.arange(start, stop, step)
        if index.dtype == bool:
            index = cp.flatnonzero(index)

        out = map_array(
            index,
            self.in_values.astype(index.dtype, copy=False),
            self.out_values,
        )

        if scalar:
            out = out[0]  # TODO: call .item() to transfer 0-dim array to host?
        return out
예제 #12
0
def _minor_reduce(X, min_or_max):
    if min_or_max == 'min':
        min_or_max = np.min
    else:
        min_or_max = np.max

    major_index = np.flatnonzero(np.diff(X.indptr))

    # reduceat tries casts X.indptr to intp, which errors
    # if it is int64 on a 32 bit system.
    # Reinitializing prevents this where possible, see #13737
    X = type(X)((X.data, X.indices, X.indptr), shape=X.shape)

    value = cpu_np.zeros(len(X.indptr) - 1, dtype=X.dtype)

    start = X.indptr[0]
    for i, end in enumerate(X.indptr[1:]):
        value[i] = min_or_max(X.data[start:end])
        start = end

    value = np.array(value)
    return major_index, value
예제 #13
0
    def born(self, number_of_agents, ratio_random_birth):
        """
        Method, which born new agents.
        1.  All new agents should be borned in free cells.
        2.  {BORN_IN_BOTTOM} agents are born at the bottom of ENV.
            There can be hypothetical situations, when we already have died agents in bottom cells.
            To avoid this problem, we filter this cells.
        3.  Generate an indexes of borned agents in envs.
            !!!!!WARNING!!!!!: Perhaps situation, when we have less available cells than number of new agents
            (ex.: if we have 10x10x100 env and want to create 200 agents at bottom).
            We can simple handle it, setting number of new agents like min(free_cells, number_of_agents).
        4.  The remaining agents should not appear into already occupied positions.
            {agent_available_env_bottom} is just a view, really we change {agent_available_env} array.
        5. Receive X,Y,Z positions of borned agents. Specify Z manually, because we now, that it is a bottom.
        6.  Other agents should be borned randomly in the whole envs.
            It is too slow to sample from whole {is_available_env}. So, use simple hack - because envs are
            much bigger, than number of agents - let's generate some random indexes there and just select free.
            Todo: Strictly, it can give us problems in some cases, when there will be too many agents,
            but don't worry about it now.

        7. All agents, which were born on the top will die immediately.
        8. Combine all new agents with others.

        :param number_of_agents: number of agents born each turn
        :param ratio_random_birth: ratio of agent birth locations (i.e. base of environment vs. random)
        """
        # (1)
        # (2)
        born_in_bottom = int(number_of_agents * (1 - ratio_random_birth))
        agent_available_env_bottom = self.is_available_env[:, :, -2]
        available_flat_bottom_positions = cp.flatnonzero(
            agent_available_env_bottom == True)
        # (3)
        selected_flat_bottom_positions = cp.random.choice(
            available_flat_bottom_positions, born_in_bottom, replace=False)
        # (4)
        self.is_available_env[:, :, -2].ravel(
        )[selected_flat_bottom_positions] = False
        # (5)
        bottom_agents_positions = cp.unravel_index(
            selected_flat_bottom_positions,
            (*agent_available_env_bottom.shape, 1))
        bottom_agents_positions = cp.vstack(bottom_agents_positions)
        bottom_agents_positions[2] = (self.is_available_env.shape[2] - 2)

        # (6)
        born_in_random = number_of_agents - born_in_bottom
        random_positions = cp.array([
            # Use numpy function, because it is faster.
            np.random.randint(1, ax_shape - 1, born_in_random * 4)
            for ax_shape in self.is_available_env.shape
        ])
        random_flat_positions = cp.ravel_multi_index(
            random_positions, self.is_available_env.shape)

        is_available = self.is_available_env.ravel()[random_flat_positions]

        selected_flat_uniform_positions = random_flat_positions[
            is_available][:born_in_random]
        uniform_agents_positions = cp.unravel_index(
            selected_flat_uniform_positions, self.is_available_env.shape)
        uniform_agents_positions = cp.vstack(uniform_agents_positions)
        # Todo: This code is correct, but too slow. Replace it with code above.

        # available_flat_uniform_positions = cp.flatnonzero(self.is_available_env)
        # selected_flat_uniform_positions = cp.random.choice(
        #     available_flat_uniform_positions,
        #     number_of_agents - born_in_bottom,
        #     replace=False
        # )
        # uniform_agents_positions = cp.unravel_index(selected_flat_uniform_positions, self.is_available_env.shape)
        # uniform_agents_positions = cp.vstack(uniform_agents_positions)

        # (7)
        new_agent_positions = cp.hstack(
            [uniform_agents_positions, bottom_agents_positions]).T
        new_agent_state = (new_agent_positions[:, 2] != 1).astype(cp.bool)

        # (8)
        if self._agents_positions is None:
            self._agents_positions = new_agent_positions
            self.agents_state = new_agent_state
        else:
            self._agents_positions = cp.vstack(
                [self._agents_positions, new_agent_positions])
            self.agents_state = cp.hstack([self.agents_state, new_agent_state])

        self.is_available_env.ravel()[self.agents_flat_positions] = False
예제 #14
0
    def move(self,
             move_preference_matrix,
             move_probability_matrix,
             ratio_random_move=0.1):
        """
        1.  Select all living agents and their neighbours.
        2.  Create a movement matrix. All occupied by agent cells should be unavailable for move.
            add {move_preference_matrix} for values of neighbours.
        3.  If agent does not have any available cells for moving - it should die.
            Drop all died agents from current moving agents.
        4.  10% of the time the agent moves randomly.
            Agent can't go to unavailable cells, so we recalculate probability for available neighbours.
            (sum of prob should be 1).
        5.  Vectorized way to get random indices from array of probs. Like random.choice, but for 2d array.
        6.  Find new flat indexes for random moving agents.
        7.  Find new flat indexes for normal moving agents. Before argmax selection we shuffle neighbours,
            otherwise we will use always first max index.
        8.  Create an array with new agents positions.
        9.  If two agents want to occupy same cell - then we accept only first.
            All agents, which was declined to move because of collision will die.
        10. If agent reach top - it dies too.


        :param move_preference_matrix:  The agent decides which space to move to by adding this move
                                        preference array to the value array of the surrounding environment.

        :param move_probability_matrix:  10% of the time the agent moves randomly to an adjacent space.
                                         It is the move probability matrix.
        :return:
        """
        # (1)
        live_agents_neighbour_flat_positions = self.agents_neighbour_flat_positions[
            self.agents_state]
        # (2)
        move_candidates = self.env.ravel(
        )[live_agents_neighbour_flat_positions].copy()

        is_available = self.is_available_env.ravel(
        )[live_agents_neighbour_flat_positions]
        move_candidates[~is_available] = cp.nan
        move_candidates = move_candidates + cp.asarray(move_preference_matrix)

        # (3)
        should_die = cp.all(cp.isnan(move_candidates.reshape(-1, 27)), axis=1)
        should_die_agents = cp.flatnonzero(self.agents_state)[should_die]

        self.agents_state[should_die_agents] = False

        move_candidates = move_candidates[~should_die]
        live_agents_neighbour_flat_positions = live_agents_neighbour_flat_positions[
            ~should_die]

        # (4)
        is_random_move = cp.random.binomial(
            1, ratio_random_move,
            live_agents_neighbour_flat_positions.shape[0])
        is_random_move = is_random_move.astype(cp.bool)
        random_move_candidates = move_candidates[is_random_move]

        random_move_probs = (~cp.isnan(random_move_candidates) *
                             cp.asarray(move_probability_matrix)).reshape(
                                 -1, 27)
        random_move_probs /= random_move_probs.sum(axis=1)[:, None]

        # (5)
        random_vals = cp.expand_dims(cp.random.rand(
            random_move_probs.shape[0]),
                                     axis=1)
        random_indexes = (random_move_probs.cumsum(axis=1) >
                          random_vals).argmax(axis=1)

        # (6)
        random_live_agents_neighbour_flat_positions = live_agents_neighbour_flat_positions[
            is_random_move]
        random_new_positions = cp.take_along_axis(
            random_live_agents_neighbour_flat_positions.reshape(-1, 27),
            random_indexes[:, None],
            axis=1).T[0]

        # (7)
        normal_move_candidates = move_candidates[~is_random_move]

        # normal_move_indexes = cp.nanargmax(normal_move_candidates.reshape(-1, 27), axis=1)[:, None]
        # smart analog of cp.nanargmax(normal_move_candidates.reshape(-1, 27), axis=1)[:, None]

        normal_flattened_move_candidates = normal_move_candidates.reshape(
            -1, 27)
        normal_shuffled_candidates_idx = cp.random.rand(
            *normal_flattened_move_candidates.shape).argsort(axis=1)
        normal_shuffled_flattened_move_candidates = cp.take_along_axis(
            normal_flattened_move_candidates,
            normal_shuffled_candidates_idx,
            axis=1)
        normal_shuffled_candidates_max_idx = cp.nanargmax(
            normal_shuffled_flattened_move_candidates, axis=1)[:, None]

        normal_move_indexes = cp.take_along_axis(
            normal_shuffled_candidates_idx,
            normal_shuffled_candidates_max_idx,
            axis=1)
        ####

        normal_live_agents_neighbour_flat_positions = live_agents_neighbour_flat_positions[
            ~is_random_move]
        normal_move_new_positions = cp.take_along_axis(
            normal_live_agents_neighbour_flat_positions.reshape(-1, 27),
            normal_move_indexes,
            axis=1).T[0]
        # (8)
        moving_agents_flat_positions = self.agents_flat_positions[
            self.agents_state]
        new_agents_flat_positions = moving_agents_flat_positions.copy()

        new_agents_flat_positions[is_random_move] = random_new_positions

        new_agents_flat_positions[~is_random_move] = normal_move_new_positions

        live_agents_indexes = cp.flatnonzero(self.agents_state)

        # (9)
        _, flat_positions_first_entry = cp.unique(new_agents_flat_positions,
                                                  return_index=True)

        is_live = cp.zeros_like(new_agents_flat_positions).astype(cp.bool)
        is_live[flat_positions_first_entry] = True

        new_agents_flat_positions[~is_live] = moving_agents_flat_positions[
            ~is_live]
        new_agents_positions = cp.array(
            cp.unravel_index(new_agents_flat_positions, self.env.shape)).T

        # (10)
        is_live[new_agents_positions[:, 2] == 1] = False

        self._agents_positions[live_agents_indexes] = new_agents_positions
        self.agents_state[live_agents_indexes] = is_live

        self.is_available_env.ravel()[moving_agents_flat_positions] = True
        self.is_available_env.ravel()[new_agents_flat_positions] = False

        self._agents_positions_all_time.append(
            cp.asnumpy(self._agents_positions))
예제 #15
0
def adaptive_gauss_siedel_iteration(self,data):
	"""
	Solves the eikonal equation by propagating updates, ignoring causality. 
	"""
	
	update_o = fd.block_expand(data.trigger,self.shape_i,mode='constant',
		constant_values=False).reshape(self.shape_o+(-1,)).any(axis=-1)
	update_o = cp.ascontiguousarray(update_o.astype(np.uint8))
	policy = data.policy
	nitermax_o = policy.nitermax_o
	stop = self.InitStop(data)

	"""Pruning drops the complexity of one scheme iteration from N_act+eps*N to N_act, 
	where N is the number of points, N_act is the number of active points, and eps is a 
	small but positive constant related with the block size. 
	However it usually has no effect on performance, or a slight negative effect, due
	to the smallness of eps. 

	Nevertheless, pruning allows the bound_active_blocks method,
	which does, sometimes, have a significant positive effect on performance.
	It is somewhat related to the Group marching method, and tries to have similar 
	arrival times among the front active points, to take advantage of causality.
	"""
	if data.traits['pruning_macro']:
		updatePrev_o = update_o * (2*self.ndim+1) # Seeds cause their own initial update
		updateNext_o = np.full_like(update_o,0)
		updateList_o = cp.ascontiguousarray(cp.flatnonzero(updatePrev_o), dtype=self.int_t)

		if policy.bound_active_blocks:
			policy.minChgPrev_thres = np.inf
			policy.minChgNext_thres = np.inf
			SetModuleConstant(data.module,'minChgPrev_thres',policy.minChgPrev_thres,self.float_t)
			SetModuleConstant(data.module,'minChgNext_thres',policy.minChgNext_thres,self.float_t)
			minChgPrev_o = cp.full(self.shape_o,np.inf,dtype=self.float_t)
			minChgNext_o = minChgPrev_o.copy()
			def minChg(): return (minChgPrev_o,minChgNext_o)
		else:
			def minChg(): return tuple()

		for niter_o in range(nitermax_o):
			updateList_o = np.repeat(updateList_o,2*self.ndim+1)
			if updateList_o.size==0: return niter_o
			data.kernel((updateList_o.size,),(self.size_i,), 
				KernelArgs(data) + minChg() + (updateList_o,updatePrev_o,updateNext_o))

			"""
			print("--------------- Called kernel ---------------")
			show = np.zeros_like(updateNext_o)
			l=updateList_o
			flat(show)[ l[l<self.size_o] ]=1 # Active
			flat(show)[ l[l>=self.size_o]-self.size_o ]=2 # Frozen
			print(show); #print(np.max(self.block['valuesq']))
			"""

#			print("after kernel: \n",updateNext_o,"\n")
			updatePrev_o,updateNext_o = updateNext_o,updatePrev_o
			updateList_o = updateList_o[updateList_o!=-1]
			if policy.bound_active_blocks: 
				set_minChg_thres(self,data,updateList_o,minChgNext_o)
				minChgPrev_o,minChgNext_o = minChgNext_o,minChgPrev_o

	else: # No pruning
		for niter_o in range(nitermax_o):
			if stop(update_o): return niter_o
			updateList_o = cp.ascontiguousarray(cp.flatnonzero(update_o), dtype=self.int_t)
			update_o.fill(0)
#			if updateList_o.size==0: return niter_o
			data.kernel((updateList_o.size,),(self.size_i,), 
				KernelArgs(data) + (updateList_o,update_o))


	return nitermax_o
def adaptive_gauss_siedel_iteration(self, data):
    """
	Solves the eikonal equation by propagating updates, ignoring causality. 
	"""

    trigger = data.trigger
    if trigger.shape == self.shape:
        trigger = misc.block_expand(data.trigger,
                                    self.shape_i,
                                    mode='constant',
                                    constant_values=False)
    trigger = np.any(trigger.reshape(self.shape_o + (-1, )), axis=-1)
    update_o = cp.ascontiguousarray(trigger.astype(np.uint8))
    policy = data.policy
    nitermax_o = policy.nitermax_o
    if policy.count_updates:
        nupdate_o = cp.zeros(self.shape_o, dtype=self.int_t)
        data.stats["nupdate_o"] = nupdate_o
    """Pruning drops the complexity from N+eps*N^(1+1/d) to N, where N is the number 
	of points and eps is a small but positive constant related with the block size. 
	However it usually has no effect on performance, or a slight negative effect, due
	to the smallness of eps. Nevertheless, pruning allows the bound_active_blocks method,
	which does, sometimes, have a significant positive effect on performance."""
    if data.traits['pruning_macro']:
        updatePrev_o = update_o * (2 * self.ndim + 1
                                   )  # Seeds cause their own initial update
        updateNext_o = np.full_like(update_o, 0)
        updateList_o = cp.ascontiguousarray(cp.flatnonzero(updatePrev_o),
                                            dtype=self.int_t)

        if policy.bound_active_blocks:
            policy.minChgPrev_thres = np.inf
            policy.minChgNext_thres = np.inf
            SetModuleConstant(data.module, 'minChgPrev_thres',
                              policy.minChgPrev_thres, self.float_t)
            SetModuleConstant(data.module, 'minChgNext_thres',
                              policy.minChgNext_thres, self.float_t)
            minChgPrev_o = cp.full(self.shape_o, np.inf, dtype=self.float_t)
            minChgNext_o = minChgPrev_o.copy()

            def minChg():
                return (minChgPrev_o, minChgNext_o)
        else:

            def minChg():
                return tuple()

        for niter_o in range(nitermax_o):

            #print(updatePrev_o)

            updateList_o = np.repeat(updateList_o, 2 * self.ndim + 1)
            if updateList_o.size == 0: return niter_o
            data.kernel((updateList_o.size, ), (self.size_i, ),
                        KernelArgs(data) + minChg() +
                        (updateList_o, updatePrev_o, updateNext_o))
            """
			print("--------------- Called kernel ---------------")
			show = np.zeros_like(updateNext_o)
			l=updateList_o
			flat(show)[ l[l<self.size_o] ]=1 # Active
			flat(show)[ l[l>=self.size_o]-self.size_o ]=2 # Frozen
			print(show); #print(np.max(self.block['valuesq']))
			"""

            #			print("after kernel: \n",updateNext_o,"\n")
            updatePrev_o, updateNext_o = updateNext_o, updatePrev_o
            updateList_o = updateList_o[updateList_o != -1]
            if policy.bound_active_blocks:
                self.set_minChg_thres(data, updateList_o, minChgNext_o)
                minChgPrev_o, minChgNext_o = minChgNext_o, minChgPrev_o

    else:  # No pruning
        for niter_o in range(nitermax_o):
            updateList_o = cp.ascontiguousarray(cp.flatnonzero(update_o),
                                                dtype=self.int_t)
            #			print(update_o.astype(int)); print()
            if policy.count_updates: nupdate_o += update_o
            update_o.fill(0)
            if updateList_o.size == 0: return niter_o
            #			for key,value in self.block.items(): print(key,type(value))
            data.kernel((updateList_o.size, ), (self.size_i, ),
                        KernelArgs(data) + (updateList_o, update_o))


#			print(self.block['values'])
#			print(self.block['values'],self.block['valuesNext'],self.block['values'] is self.block['valuesNext'])

    return nitermax_o