Exemplo n.º 1
0
def FSITM(HDR, LDR, alpha=None):

    NumPixels = LDR.size

    if alpha is None:
        r = cp.floor(NumPixels / (2.**18))
        if r > 1.:
            alpha = 1. - (1. / r)
        else:
            alpha = 0.

    minNonzero = cp.min(HDR[HDR > 0])
    LogH = cp.log(cp.maximum(HDR, minNonzero))

    # float is needed for further calculation
    LogH = cp.around((LogH - LogH.min()) * 255. /
                     (LogH.max() - LogH.min())).astype(cp.float)

    if alpha > 0.:
        PhaseHDR_CH = phasecong100(HDR, 2, 2, 8, 8)
        PhaseLDR_CH8 = phasecong100(LDR, 2, 2, 8, 8)
    else:  # so, if image size is smaller than 512x512?
        PhaseHDR_CH = 0
        PhaseLDR_CH8 = 0

    PhaseLogH = phasecong100(LogH, 2, 2, 2, 2)
    PhaseH = alpha * PhaseHDR_CH + (1 - alpha) * PhaseLogH

    PhaseLDR_CH2 = phasecong100(LDR, 2, 2, 2, 2)
    PhaseL = alpha * PhaseLDR_CH8 + (1 - alpha) * PhaseLDR_CH2
    Q = cp.sum(
        cp.logical_or(cp.logical_and(PhaseL <= 0, PhaseH <= 0),
                      cp.logical_and(PhaseL > 0, PhaseH > 0))) / NumPixels
    return Q
Exemplo n.º 2
0
def binary_hit_or_miss(input, structure1=None, structure2=None, output=None,
                       origin1=0, origin2=None):
    """
    Multidimensional binary hit-or-miss transform.

    The hit-or-miss transform finds the locations of a given pattern
    inside the input image.

    Args:
        input (cupy.ndarray): Binary image where a pattern is to be detected.
        structure1 (cupy.ndarray, optional): Part of the structuring element to
            be fitted to the foreground (non-zero elements) of ``input``. If no
            value is provided, a structure of square connectivity 1 is chosen.
        structure2 (cupy.ndarray, optional): Second part of the structuring
            element that has to miss completely the foreground. If no value is
            provided, the complementary of ``structure1`` is taken.
        output (cupy.ndarray, dtype or None, optional): Array of the same shape
            as input, into which the output is placed. By default, a new array
            is created.
        origin1 (int or tuple of ints, optional): Placement of the first part
            of the structuring element ``structure1``, by default 0 for a
            centered structure.
        origin2 (int or tuple of ints or None, optional): Placement of the
            second part of the structuring element ``structure2``, by default 0
            for a centered structure. If a value is provided for ``origin1``
            and not for ``origin2``, then ``origin2`` is set to ``origin1``.

    Returns:
        cupy.ndarray: Hit-or-miss transform of ``input`` with the given
        structuring element (``structure1``, ``structure2``).

    .. warning::

        This function may synchronize the device.

    .. seealso:: :func:`scipy.ndimage.binary_hit_or_miss`
    """
    if structure1 is None:
        structure1 = generate_binary_structure(input.ndim, 1)
    if structure2 is None:
        structure2 = cupy.logical_not(structure1)
    origin1 = _util._fix_sequence_arg(origin1, input.ndim, 'origin1', int)
    if origin2 is None:
        origin2 = origin1
    else:
        origin2 = _util._fix_sequence_arg(origin2, input.ndim, 'origin2', int)

    tmp1 = _binary_erosion(input, structure1, 1, None, None, 0, origin1, 0,
                           False)
    inplace = isinstance(output, cupy.ndarray)
    result = _binary_erosion(input, structure2, 1, None, output, 0, origin2, 1,
                             False)
    if inplace:
        cupy.logical_not(output, output)
        cupy.logical_and(tmp1, output, output)
    else:
        cupy.logical_not(result, result)
        return cupy.logical_and(tmp1, result)
Exemplo n.º 3
0
def _preprocess(labels):

    label_values, inv_idx = cp.unique(labels, return_inverse=True)
    if not (label_values == 0).any():
        warn('Random walker only segments unlabeled areas, where '
             'labels == 0. No zero valued areas in labels were '
             'found. Returning provided labels.',
             stacklevel=2)

        return labels, None, None, None, None

    # If some labeled pixels are isolated inside pruned zones, prune them
    # as well and keep the labels for the final output

    null_mask = labels == 0
    pos_mask = labels > 0
    mask = labels >= 0

    fill = ndi.binary_propagation(null_mask, mask=mask)
    isolated = cp.logical_and(pos_mask, cp.logical_not(fill))

    pos_mask[isolated] = False

    # If the array has pruned zones, be sure that no isolated pixels
    # exist between pruned zones (they could not be determined)
    if label_values[0] < 0 or cp.any(isolated):  # synchronize!
        isolated = cp.logical_and(
            cp.logical_not(ndi.binary_propagation(pos_mask, mask=mask)),
            null_mask)

        labels[isolated] = -1
        if cp.all(isolated[null_mask]):
            warn('All unlabeled pixels are isolated, they could not be '
                 'determined by the random walker algorithm.',
                 stacklevel=2)
            return labels, None, None, None, None

        mask[isolated] = False
        mask = cp.atleast_3d(mask)

    else:
        mask = None

    # Reorder label values to have consecutive integers (no gaps)
    zero_idx = cp.searchsorted(label_values, cp.array(0))
    labels = cp.atleast_3d(inv_idx.reshape(labels.shape) - zero_idx)

    nlabels = label_values[zero_idx + 1:].shape[0]

    inds_isolated_seeds = cp.nonzero(isolated)
    isolated_values = labels[inds_isolated_seeds]

    return labels, nlabels, mask, inds_isolated_seeds, isolated_values
Exemplo n.º 4
0
    def getProj(self, obs, center_pixel, rz, z, ry, rx):
        patch = self.getPatch(obs, center_pixel, torch.zeros_like(rz))
        patch = np.round(patch.cpu().numpy(), 5)
        patch = cp.array(patch)
        projections = []
        size = self.patch_size
        zs = cp.array(z.numpy()) + cp.array(
            [(-size / 2 + j) * self.heightmap_resolution for j in range(size)])
        zs = zs.reshape((zs.shape[0], 1, 1, zs.shape[1]))
        zs = zs.repeat(size, 1).repeat(size, 2)
        c = patch.reshape(patch.shape[0], self.patch_size, self.patch_size,
                          1).repeat(size, 3)
        ori_occupancy = c > zs
        # transform into points
        point_w_d = cp.argwhere(ori_occupancy)

        rz_id = (rz.expand(-1, self.num_rz) - self.rzs).abs().argmin(1)
        ry_id = (ry.expand(-1, self.num_ry) - self.rys).abs().argmin(1)
        rx_id = (rx.expand(-1, self.num_rx) - self.rxs).abs().argmin(1)

        dimension = point_w_d[:, 0]
        point = point_w_d[:, 1:4]

        rz_id = cp.array(rz_id)
        ry_id = cp.array(ry_id)
        rx_id = cp.array(rx_id)
        mapped_point = self.map[rz_id[dimension], ry_id[dimension],
                                rx_id[dimension], point[:, 0], point[:, 1],
                                point[:, 2]].T
        rotated_point = mapped_point.T[(cp.logical_and(
            0 < mapped_point.T, mapped_point.T < size)).all(1)]
        d = dimension[(cp.logical_and(
            0 < mapped_point.T, mapped_point.T < size)).all(1)].T.astype(int)

        for i in range(patch.shape[0]):
            point = rotated_point[d == i].T
            occupancy = cp.zeros((size, size, size))
            if point.shape[0] > 0:
                occupancy[point[0], point[1], point[2]] = 1

            occupancy = median_filter(occupancy, size=2)
            occupancy = cp.ceil(occupancy)

            projection = cp.stack(
                (occupancy.sum(0), occupancy.sum(1), occupancy.sum(2)))
            projections.append(projection)

        return torch.tensor(cp.stack(projections)).float().to(self.device)
Exemplo n.º 5
0
def _build_laplacian(data, spacing, mask, beta, multichannel):
    l_x, l_y, l_z = data.shape[:3]
    edges = _make_graph_edges_3d(l_x, l_y, l_z)
    weights = _compute_weights_3d(data, spacing, beta=beta, eps=1.e-10,
                                  multichannel=multichannel)
    assert weights.dtype == data.dtype
    if mask is not None:
        # Remove edges of the graph connected to masked nodes, as well
        # as corresponding weights of the edges.
        mask0 = cp.concatenate([mask[..., :-1].ravel(), mask[:, :-1].ravel(),
                                mask[:-1].ravel()])
        mask1 = cp.concatenate([mask[..., 1:].ravel(), mask[:, 1:].ravel(),
                                mask[1:].ravel()])
        ind_mask = cp.logical_and(mask0, mask1)
        edges, weights = edges[:, ind_mask], weights[ind_mask]

        # Reassign edges labels to 0, 1, ... edges_number - 1
        _, inv_idx = cp.unique(edges, return_inverse=True)
        edges = inv_idx.reshape(edges.shape)

    # Build the sparse linear system
    pixel_nb = l_x * l_y * l_z
    i_indices = edges.ravel()
    j_indices = edges[::-1].ravel()
    data = cp.concatenate((weights, weights))
    lap = sparse.coo_matrix((data, (i_indices, j_indices)),
                            shape=(pixel_nb, pixel_nb))
    # need CSR instead of COO for indexing used later in _build_linear_system
    lap = lap.tocsr()
    lap.setdiag(-cp.ravel(lap.sum(axis=0)))
    return lap
Exemplo n.º 6
0
def local_cm(y_y_pred, unique_labels, sample_weight):

    y_true, y_pred = y_y_pred
    labels = unique_labels

    n_labels = labels.size

    # Assume labels are monotonically increasing for now.

    # intersect y_pred, y_true with labels, eliminate items not in labels
    ind = cp.logical_and(y_pred < n_labels, y_true < n_labels)
    y_pred = y_pred[ind]
    y_true = y_true[ind]

    if sample_weight is None:
        sample_weight = cp.ones(y_true.shape[0], dtype=np.int64)
    else:
        sample_weight = cp.asarray(sample_weight)

    sample_weight = sample_weight[ind]

    cm = cp.sparse.coo_matrix(
        (sample_weight, (y_true, y_pred)),
        shape=(n_labels, n_labels),
        dtype=cp.float32,
    ).toarray()

    return cp.nan_to_num(cm)
Exemplo n.º 7
0
 def test_01_01_circle(self):
     """Test that the Canny filter finds the outlines of a circle"""
     i, j = cp.mgrid[-200:200, -200:200].astype(float) / 200
     c = cp.abs(cp.sqrt(i * i + j * j) - 0.5) < 0.02
     result = feature.canny(c.astype(float), 4, 0, 0,
                            cp.ones(c.shape, bool))
     #
     # erode and dilate the circle to get rings that should contain the
     # outlines
     #
     # TODO: grlee77: only implemented brute_force=True, so added that to
     #                these tests
     cd = binary_dilation(c, iterations=3, brute_force=True)
     ce = binary_erosion(c, iterations=3, brute_force=True)
     cde = cp.logical_and(cd, cp.logical_not(ce))
     self.assertTrue(cp.all(cde[result]))
     #
     # The circle has a radius of 100. There are two rings here, one
     # for the inside edge and one for the outside. So that's
     # 100 * 2 * 2 * 3 for those places where pi is still 3.
     # The edge contains both pixels if there's a tie, so we
     # bump the count a little.
     point_count = cp.sum(result)
     self.assertTrue(point_count > 1200)
     self.assertTrue(point_count < 1600)
Exemplo n.º 8
0
 def updateSublattice(self, sublattice):
     boltzmanFactor = np.exp(2 * self.interactionEnergies /
                             (self.k * self.t))
     evenDist = np.random.uniform(0, 1, size=self.spec)
     temp1 = np.greater(self.interactionEnergies, self.ground)
     temp2 = np.greater(boltzmanFactor, evenDist)
     criteria = np.logical_and(sublattice, np.logical_or(temp1, temp2))
     self.system = np.where(criteria, -self.system, self.system)
     self.updateEnergies()
Exemplo n.º 9
0
def lobe_calc(data4DF, Four_Y, Four_X, FourXY, rsize, cutoff, chunks):
    stops = np.zeros(chunks + 1, dtype=np.int)
    stops[0:chunks] = np.arange(0, data4DF.shape[-1],
                                (data4DF.shape[-1] / chunks))
    stops[chunks] = data4DF.shape[-1]

    left_image = cp.zeros_like(FourXY, dtype=np.complex64)
    rightimage = cp.zeros_like(FourXY, dtype=np.complex64)
    d_zero = FourXY < cutoff

    for cc in range(chunks):
        startval = stops[cc]
        stop_val = stops[cc + 1]
        gpu_4Dchunk = cp.asarray(data4DF[:, :, startval:stop_val])
        rcalc = rsize[startval:stop_val, :]
        for pp in range(rcalc.shape[0]):
            ii, jj = rcalc[pp, :]
            xq = Four_X[ii, jj]
            yq = Four_Y[ii, jj]

            cbd = gpu_4Dchunk[:, :, pp]
            cbd_phase = cp.angle(cbd)
            cbd_ampli = cp.absolute(cbd)

            d_plus = (((Four_X + xq)**2) + ((Four_Y + yq)**2))**0.5
            d_minu = (((Four_X - xq)**2) + ((Four_Y - yq)**2))**0.5

            ll = cp.logical_and((d_plus < cutoff), (d_minu > cutoff))
            ll = cp.logical_and(ll, d_zero)

            rr = cp.logical_and((d_plus > cutoff), (d_minu < cutoff))
            rr = cp.logical_and(rr, d_zero)

            left_trotter = cp.multiply(cbd_ampli[ll],
                                       cp.exp((1j) * cbd_phase[ll]))
            righttrotter = cp.multiply(cbd_ampli[rr],
                                       cp.exp((1j) * cbd_phase[rr]))

            left_image[ii, jj] = cp.sum(left_trotter)
            rightimage[ii, jj] = cp.sum(righttrotter)

    del gpu_4Dchunk, d_plus, d_minu, ll, rr, left_trotter, righttrotter, cbd, cbd_phase, cbd_ampli, d_zero, rcalc
    return left_image, rightimage
Exemplo n.º 10
0
def logical_and(x1: Array, x2: Array, /) -> Array:
    """
    Array API compatible wrapper for :py:func:`np.logical_and <numpy.logical_and>`.

    See its docstring for more information.
    """
    if x1.dtype not in _boolean_dtypes or x2.dtype not in _boolean_dtypes:
        raise TypeError("Only boolean dtypes are allowed in logical_and")
    # Call result type here just to raise on disallowed type combinations
    _result_type(x1.dtype, x2.dtype)
    x1, x2 = Array._normalize_two_args(x1, x2)
    return Array._new(np.logical_and(x1._array, x2._array))
Exemplo n.º 11
0
def digitize(x, bins):
    # With right = Flase and bins in increasing order
    out = np.full(shape=x.shape, fill_value=0, dtype=np.int32)
    for i in range(1, len(bins)):
        bool_arr = np.logical_and(bins[i - 1] <= x, x < bins[i])
        matched = np.where(bool_arr)
        out[matched] = i

    bool_arr = x >= bins[-1]
    matched = np.where(bool_arr)
    out[matched] = len(bins)
    return out
Exemplo n.º 12
0
def test_salt_and_pepper():
    seed = 42
    cam = img_as_float(camerad)
    cam_noisy = random_noise(cam,
                             seed=seed,
                             mode='s&p',
                             amount=0.15,
                             salt_vs_pepper=0.25)
    saltmask = cp.logical_and(cam != cam_noisy, cam_noisy == 1.)
    peppermask = cp.logical_and(cam != cam_noisy, cam_noisy == 0.)

    # Ensure all changes are to 0. or 1.
    assert_allclose(cam_noisy[saltmask], cp.ones(int(saltmask.sum())))
    assert_allclose(cam_noisy[peppermask], cp.zeros(int(peppermask.sum())))

    # Ensure approximately correct amount of noise was added
    proportion = float(saltmask.sum() + peppermask.sum()) / (cam.shape[0] *
                                                             cam.shape[1])
    assert 0.11 < proportion <= 0.18

    # Verify the relative amount of salt vs. pepper is close to expected
    assert 0.18 < saltmask.sum() / peppermask.sum() < 0.35
Exemplo n.º 13
0
    def _process_pair(self, x: int, y: int):
        n_bins = self.n_bins

        # compute mask of amplitude quantile for each sample in the data
        for i in range(n_bins):
            self.mask_buffer[i] = (self.data_amplitude_labels[x]
                                   == i) & (self.data_thresholded[x])
            self.mask_buffer[i + n_bins] = (self.data_amplitude_labels[y]
                                            == i) & (self.data_thresholded[y])

        # inner product of those masks is the same as compute sum of logical for each pair but without cycles => faster
        # however I dont like bool -> int type casting
        self.frequency_samples[:, :, x, y] = cp.inner(
            self.mask_buffer[:n_bins].astype(int),
            self.mask_buffer[n_bins:~0].astype(int))
        self.frequency_samples[:, :, y, x] = self.frequency_samples[:, :, x, y]

        min_count = int(self.frequency_samples[:, :, x, y].min())

        data_x = self.data_preprocessed[x]
        data_y = self.data_conj[y]

        for i, j in itertools.product(range(n_bins), range(n_bins)):
            cp.logical_and(self.mask_buffer[i],
                           self.mask_buffer[j + n_bins],
                           out=self.mask_buffer[~0])

            # select data according to amplitude mask of both channels and truncate it to avoid PLV bias
            vals_x = data_x[self.mask_buffer[~0]][:min_count]
            vals_y = data_y[self.mask_buffer[~0]][:min_count]

            # just in case there are some label combinations without any samples; unluckly though
            # if vals_x.shape[0] == 0 or vals_y.shape[0] == 0:
            #     continue

            self.frequency_plv[i, j, x,
                               y] = self.frequency_plv[i, j, y, x] = cp.inner(
                                   vals_x, vals_y) / min_count
Exemplo n.º 14
0
def generate_EEImatrix(A, args):
    ngene = A.shape[0]
    ncell = A.shape[1]
    is_nonzeroMat = A > 0
    p_nonzero = np.sum(is_nonzeroMat, axis=1) / ncell
    p_zero = np.sum(A == 0, axis=1) / ncell
    np.savetxt(args.output + "_prob_nonzero.txt", p_nonzero, delimiter="\t")
    np.savetxt(args.output + "_prob_zero.txt", p_zero, delimiter="\t")

    if (args.gpu):  # cupy
        print("using GPU for EEI calculation.")
        import cupy as cp
        p_nonzero = cp.asarray(p_nonzero)
        p_zero = cp.asarray(p_zero)
        is_nonzeroMat = cp.asarray(is_nonzeroMat)
        notA = cp.asarray(np.logical_not(A))

        Prob_joint = p_nonzero * p_zero[:, np.newaxis]
        Count_excl = cp.zeros((ngene, ngene), dtype=np.int64)
        for i in range(ngene):
            Count_excl[i] = cp.sum(cp.logical_and(is_nonzeroMat[i], notA),
                                   axis=1)

        Prob_joint = cp.asnumpy(Prob_joint)
        Count_excl = cp.asnumpy(Count_excl)
    else:  #numpy
        print("using CPU for EEI calculation.")
        # 各遺伝子ペアで排他的発現をする確率の初期化と確率計算
        Prob_joint = p_nonzero * p_zero[:, np.newaxis]
        # 1回の行列演算で排他的発現(g1,g2)=(1,0) と(g2,g1)=(1,0)=(g1,g2)=(0,1)を持つサンプル数をカウント
        notA = np.logical_not(A)
        Count_excl = []
        for row in is_nonzeroMat:
            # Aと(NOT A)の転置行列をかけた行列の、各要素のサンプル数をカウント
            Count_excl.extend(np.sum(np.logical_and(row, notA), axis=1))
        Count_excl = np.array(Count_excl).reshape(ngene, ngene)

    np.savetxt(args.output + "_data_exclusive.txt", Count_excl, delimiter="\t")
    print(
        "Count_the number of samples which two genes are expressed exclusively----"
    )

    EEI = genMatrix_MultiProcess(Prob_joint,
                                 Count_excl,
                                 "EEI",
                                 ngene,
                                 ncell,
                                 ncore=args.threads)
    return EEI
Exemplo n.º 15
0
    def __call__(self, xi, method=None):
        """
        Interpolation at coordinates

        Parameters
        ----------
        xi : ndarray of shape (..., ndim)
            The coordinates to sample the gridded data at

        method : str
            The method of interpolation to perform. Supported are "linear" and
            "nearest".

        """
        method = self.method if method is None else method
        if method not in ["linear", "nearest"]:
            raise ValueError("Method '%s' is not defined" % method)

        ndim = len(self.grid)
        xi = _ndim_coords_from_arrays(xi, ndim=ndim)
        if xi.shape[-1] != len(self.grid):
            raise ValueError("The requested sample points xi have dimension "
                             "%d, but this RegularGridInterpolator has "
                             "dimension %d" % (xi.shape[1], ndim))

        xi_shape = xi.shape
        xi = xi.reshape(-1, xi_shape[-1])

        if self.bounds_error:
            for i, p in enumerate(xi.T):
                if not cp.logical_and(cp.all(self.grid[i][0] <= p),
                                      cp.all(p <= self.grid[i][-1])):
                    raise ValueError(
                        "One of the requested xi is out of bounds "
                        "in dimension %d" % i)

        indices, norm_distances, out_of_bounds = self._find_indices(xi.T)
        if method == "linear":
            result = self._evaluate_linear(indices, norm_distances,
                                           out_of_bounds)
        elif method == "nearest":
            result = self._evaluate_nearest(indices, norm_distances,
                                            out_of_bounds)
        if not self.bounds_error and self.fill_value is not None:
            result[out_of_bounds] = self.fill_value

        return result.reshape(xi_shape[:-1] + self.values.shape[ndim:])
Exemplo n.º 16
0
def evaluate_accuracy(h, t, suggestion=10):
    index_prod = cupy.argsort(h.data,
                              axis=1)[:, ::-1][:, :suggestion]  # h: Variable
    y_prod = cupy.zeros(h.shape, dtype=np.int32)
    for i, index_one_batch in enumerate(index_prod):
        y_prod[i, index_one_batch] = 1
    #print(y_prod)
    y_true = t  # t: ndarray

    valid = cupy.any(y_true, axis=1)
    y_and = cupy.logical_and(y_prod, y_true)
    num = cupy.sum(y_and, axis=1)
    den = cupy.sum(y_true, axis=1)
    #print(num)
    #print(den)
    return cupy.mean(num[valid]/den[valid]), \
        cupy.mean(num/suggestion)
Exemplo n.º 17
0
def isposinf(x, out=None):
    """Test element-wise for positive infinity, return result as bool array.

    Parameters
    ----------
    x : cupy.ndarray
        Input array.
    out : cupy.ndarray
        A location into which the result is stored. If provided,
        it should have a shape that input broadcasts to.
        By default, None, a freshly- allocated boolean array,
        is returned.

    Returns
    -------
    y : cupy.ndarray
        Boolean array of same shape as ``x``.

    Examples
    --------
    >>> cupy.isposinf(0)
    array(False)
    >>> cupy.isposinf(cupy.inf)
    array(True)
    >>> cupy.isposinf(cupy.array([-cupy.inf, -4, cupy.nan, 0, 4, cupy.inf]))
    array([False, False, False, False, False,  True])

    See Also
    --------
    numpy.isposinf

    """

    is_inf = isinf(x)
    try:
        signbit = ~cupy.signbit(x)
    except TypeError as e:
        dtype = x.dtype
        raise TypeError(f'This operation is not supported for {dtype} values '
                        'because it would be ambiguous.') from e

    # TODO(khushi-411): Use `out` instead of `out=out` (see #6393)
    return cupy.logical_and(is_inf, signbit, out=out)
Exemplo n.º 18
0
 def test_01_02_circle_with_noise(self):
     """Test that the Canny filter finds the circle outlines
      in a noisy image"""
     cp.random.seed(0)
     i, j = cp.mgrid[-200:200, -200:200].astype(float) / 200
     c = cp.abs(cp.sqrt(i * i + j * j) - 0.5) < 0.02
     cf = c.astype(float) * 0.5 + cp.random.uniform(size=c.shape) * 0.5
     result = F.canny(cf, 4, 0.1, 0.2, cp.ones(c.shape, bool))
     #
     # erode and dilate the circle to get rings that should contain the
     # outlines
     #
     cd = binary_dilation(c, iterations=4, brute_force=True)
     ce = binary_erosion(c, iterations=4, brute_force=True)
     cde = cp.logical_and(cd, cp.logical_not(ce))
     self.assertTrue(cp.all(cde[result]))
     point_count = cp.sum(result)
     self.assertTrue(point_count > 1200)
     self.assertTrue(point_count < 1600)
Exemplo n.º 19
0
 def edges_finder(self, values):
     values = cp.array(values)
     vec1 = (cp.abs(values[1:] + values[:-1]) > 0)
     vec2 = (cp.abs(values[1:] * values[:-1]) == 0)
     edges = cp.logical_and(vec1, vec2)
     edges = cp.asnumpy(edges)
     # The previous logical operation detects where voiced/unvoiced transitions
     # occur. Thus, a 'True' in the edges[n] sample indicates that the sample
     # value[n+1] has a different state than value[n](i.e. if values[n] is
     # voiced, then values[n+1] is unvoiced - and vice-versa). Consequently,
     # the last sample from edges array will always be 'False' and is not
     # calculated (because "there is no n+1 sample" for it. That's why
     # len(edges) = len(values)-1). However, just for sake of comprehension
     # (and also to avoid python warnings about array length mismatchs), I
     # add a 'False' to edges the array. But in pratice, this 'False' is
     # useless.
     edges = np.append(edges, [False])
     index = np.arange(len(values))
     index = index[edges > 0]
     return index.tolist()
Exemplo n.º 20
0
def get_label_range_or_mask(index, start, stop, step):
    if (not (start is None and stop is None)
            and type(index) is cudf.core.index.DatetimeIndex
            and index.is_monotonic is False):
        start = pd.to_datetime(start)
        stop = pd.to_datetime(stop)
        if start is not None and stop is not None:
            if start > stop:
                return slice(0, 0, None)
            # TODO: Once Index binary ops are updated to support logical_and,
            # can use that instead of using cupy.
            boolean_mask = cp.logical_and((index >= start), (index <= stop))
        elif start is not None:
            boolean_mask = index >= start
        else:
            boolean_mask = index <= stop
        return boolean_mask
    else:
        start, stop = index.find_label_range(start, stop)
        return slice(start, stop, step)
Exemplo n.º 21
0
def _local_cm(inputs, labels, use_sample_weight):
    if use_sample_weight:
        y_true, y_pred, sample_weight = inputs
    else:
        y_true, y_pred = inputs
        sample_weight = cp.ones(y_true.shape[0], dtype=y_true.dtype)

    y_true, _ = make_monotonic(y_true, labels, copy=True)
    y_pred, _ = make_monotonic(y_pred, labels, copy=True)

    n_labels = labels.size

    # intersect y_pred, y_true with labels, eliminate items not in labels
    ind = cp.logical_and(y_pred < n_labels, y_true < n_labels)
    y_pred = y_pred[ind]
    y_true = y_true[ind]
    sample_weight = sample_weight[ind]
    cm = cupyx.scipy.sparse.coo_matrix((sample_weight, (y_true, y_pred)),
                                       shape=(n_labels, n_labels),
                                       dtype=cp.float64).toarray()
    return cp.nan_to_num(cm)
Exemplo n.º 22
0
 def createWedge(self, subunit_num=0):
     ###Initialize the location of the protofilament to remove
     theta0=np.arctan2(self.com[0], self.com[1])+\
     np.deg2rad(subunit_num*self.twist_per_subunit)
         
     z0=(self.com[2]+self.rise_per_subunit*subunit_num)/self.pixel_size
     
     ###Define the length along the protofilament in terms of subunits 
     zsubunits=(self.zline.copy()-z0)*self.pixel_size/self.dimer_repeat_dist
     
     ###Define the angle of the center of the protofilament along the length of the segment
     theta=np.deg2rad((-self.helical_twist)*zsubunits)+theta0
     
     ###Initialize the wedge mask
     wedge=np.zeros(self.vol_dim.tolist())
     
     ###Define the size of the wedgemask
     fudge=np.deg2rad(360.0/(self.num_pfs*2))
     
     ###Generate the wedge mask
     for i in range(len(theta)):
         temp1=np.remainder(theta[i]-fudge+2*np.pi,2*np.pi)-2*np.pi
         temp2=np.remainder(theta[i]+fudge+2*np.pi,2*np.pi)-2*np.pi
         angles=[temp1, temp2]
         if max(angles)-min(angles)>2*fudge+.2:
             above=max(angles)
             below=min(angles)
             inds=np.logical_or(self.radmatrix>above,self.radmatrix<below)
         else:
             above=min(angles)
             below=max(angles)
             inds=np.logical_and(self.radmatrix>above,self.radmatrix<below)
             
         wedge[i,:,:][inds]=1
         
     return wedge
Exemplo n.º 23
0
def evaluate_chunks(
        results: [cp.ndarray, cp.ndarray,
                  cp.ndarray],  # closest triangle, distance, projection
        all_pts: cp.ndarray = None,
        vertices: cp.ndarray = None,
        edges: cp.ndarray = None,
        edge_norms: cp.ndarray = None,
        edge_normssq: cp.ndarray = None,
        normals: cp.ndarray = None,
        norms: cp.ndarray = None,
        normssq: cp.ndarray = None,
        zero_tensor: cp.ndarray = None,
        one_tensor: cp.ndarray = None,
        tris: cp.ndarray = None,
        vertex_normals: cp.ndarray = None,
        bounding_box: dict = None,
        chunk_size: int = None,
        num_verts: int = None) -> None:

    #
    # Expand vertex normals if non empty
    if vertex_normals is not None:
        vertex_normals = vertex_normals[tris]
        vertex_normals = cp.tile(cp.expand_dims(vertex_normals, axis=2),
                                 (1, 1, chunk_size, 1))

    # begin = time.time()
    #
    # Load and extend the batch
    num_chunks = all_pts.shape[0] // chunk_size
    for i in range(num_chunks):
        #
        # Get subset of the query points
        start_index = i * chunk_size
        end_index = (i + 1) * chunk_size
        pts = all_pts[start_index:end_index, :]

        #
        # Match the dimensions to those assumed above.
        #    REPEATED       REPEATED
        # [triangle_index, vert_index, querypoint_index, coordinates]
        pts = cp.tile(cp.expand_dims(pts, axis=(0, 1)), (num_verts, 3, 1, 1))

        #
        # Compute the differences between
        # vertices on each triangle and the
        # points of interest
        #
        # [triangle_index, vert_index, querypoint_index, coordinates]
        # ===================
        # [:,0,:,:] = p - p1
        # [:,1,:,:] = p - p2
        # [:,2,:,:] = p - p3
        diff_vectors = pts - vertices

        #
        # Compute alpha, beta, gamma
        barycentric = cp.empty(diff_vectors.shape)

        #
        # gamma = u x (p - p1)
        barycentric[:, 2, :, :] = cp.cross(edges[:, 0, :, :],
                                           diff_vectors[:, 0, :, :])
        # beta = (p - p1) x v
        barycentric[:, 1, :, :] = cp.cross(diff_vectors[:, 0, :, :],
                                           edges[:, 1, :, :])
        # alpha = w x (p - p2)
        barycentric[:, 0, :, :] = cp.cross(edges[:, 2, :, :],
                                           diff_vectors[:, 1, :, :])
        barycentric = cp.divide(
            cp.sum(cp.multiply(barycentric, normals), axis=3), normssq)

        #
        # Test conditions
        less_than_one = cp.less_equal(barycentric, one_tensor)
        more_than_zero = cp.greater_equal(barycentric, zero_tensor)

        #
        #     if 0 <= gamma and gamma <= 1
        #    and 0 <= beta and beta <= 1
        #    and 0 <= alpha and alpha <= 1:
        cond1 = cp.logical_and(less_than_one, more_than_zero)

        #
        #     if gamma <= 0:
        cond2 = cp.logical_not(more_than_zero[:, 2, :])
        cond2 = cp.tile(cp.expand_dims(cond2, axis=1), (1, 3, 1))

        #
        #     if beta <= 0:
        cond3 = cp.logical_not(more_than_zero[:, 1, :])
        cond3 = cp.tile(cp.expand_dims(cond3, axis=1), (1, 3, 1))

        #
        #     if alpha <= 0:
        cond4 = cp.logical_not(more_than_zero[:, 0, :])
        cond4 = cp.tile(cp.expand_dims(cond4, axis=1), (1, 3, 1))

        #
        # Get the projections for each case
        xi = cp.empty(barycentric.shape)
        barycentric_ext = cp.tile(cp.expand_dims(barycentric, axis=3),
                                  (1, 1, 1, 3))
        proj = cp.sum(cp.multiply(barycentric_ext, vertices), axis=1)
        #
        #     if 0 <= gamma and gamma <= 1
        #    and 0 <= beta and beta <= 1
        #    and 0 <= alpha and alpha <= 1:
        xi[cond1] = barycentric[cond1]

        #
        # if gamma <= 0:
        #  x = p - p1
        #  u = p2 - p1
        #  a = p1
        #  b = p2
        t2 = cp.divide(
            #
            # u.dot(x)
            cp.sum(cp.multiply(edges[:, 0, :, :], diff_vectors[:, 0, :, :]),
                   axis=2),
            edge_normssq[:, 0])
        xi2 = cp.zeros((t2.shape[0], 3, t2.shape[1]))
        xi2[:, 0, :] = -t2 + 1
        xi2[:, 1, :] = t2
        #
        t2 = cp.tile(cp.expand_dims(t2, axis=2), (1, 1, 3))
        lz = cp.less(t2, cp.zeros(t2.shape))
        go = cp.greater(t2, cp.ones(t2.shape))
        proj2 = vertices[:, 0, :, :] + cp.multiply(t2, edges[:, 0, :, :])
        proj2[lz] = vertices[:, 0, :, :][lz]
        proj2[go] = vertices[:, 1, :, :][go]
        #
        xi[cond2] = xi2[cond2]
        proj[cp.swapaxes(cond2, 1, 2)] = proj2[cp.swapaxes(cond2, 1, 2)]

        #
        # if beta <= 0:
        #  x = p - p1
        #  v = p3 - p1
        #  a = p1
        #  b = p3
        t3 = cp.divide(
            #
            # v.dot(x)
            cp.sum(cp.multiply(edges[:, 1, :, :], diff_vectors[:, 0, :, :]),
                   axis=2),
            edge_normssq[:, 1])
        xi3 = cp.zeros((t3.shape[0], 3, t3.shape[1]))
        xi3[:, 0, :] = -t3 + 1
        xi3[:, 2, :] = t3
        #
        t3 = cp.tile(cp.expand_dims(t3, axis=2), (1, 1, 3))
        lz = cp.less(t3, cp.zeros(t3.shape))
        go = cp.greater(t3, cp.ones(t3.shape))
        proj3 = vertices[:, 0, :, :] + cp.multiply(t3, edges[:, 1, :, :])
        proj3[lz] = vertices[:, 0, :, :][lz]
        proj3[go] = vertices[:, 2, :, :][go]
        #
        xi[cond3] = xi3[cond3]
        proj[cp.swapaxes(cond3, 1, 2)] = proj3[cp.swapaxes(cond3, 1, 2)]

        #
        #     if alpha <= 0:
        #  y = p - p2
        #  w = p3 - p2
        #  a = p2
        #  b = p3
        t4 = cp.divide(
            #
            # w.dot(y)
            cp.sum(cp.multiply(edges[:, 2, :, :], diff_vectors[:, 1, :, :]),
                   axis=2),
            edge_normssq[:, 2])
        xi4 = cp.zeros((t4.shape[0], 3, t4.shape[1]))
        xi4[:, 1, :] = -t4 + 1
        xi4[:, 2, :] = t4
        #
        t4 = cp.tile(cp.expand_dims(t4, axis=2), (1, 1, 3))
        lz = cp.less(t4, cp.zeros(t4.shape))
        go = cp.greater(t4, cp.ones(t4.shape))
        proj4 = vertices[:, 1, :, :] + cp.multiply(t4, edges[:, 2, :, :])
        proj4[lz] = vertices[:, 1, :, :][lz]
        proj4[go] = vertices[:, 2, :, :][go]
        #
        xi[cond4] = xi4[cond4]
        proj[cp.swapaxes(cond4, 1, 2)] = proj4[cp.swapaxes(cond4, 1, 2)]

        vec_to_point = pts[:, 0, :, :] - proj
        distances = cp.linalg.norm(vec_to_point, axis=2)

        # n = "\n"
        # print(f"{pts[:,0,:,:]=}")
        # print(f"{proj=}")
        # print(f"{pts[:,0,:,:] - proj=}")
        # print(f"{distances=}")

        min_distances = cp.min(distances, axis=0)

        closest_triangles = cp.argmin(distances, axis=0)

        projections = proj[closest_triangles, np.arange(chunk_size), :]

        #
        # Distinguish close triangles
        is_close = cp.isclose(distances, min_distances)

        #
        # Determine sign
        signed_normal = normals[:, 0, :, :]
        if vertex_normals is not None:
            signed_normal = cp.sum(vertex_normals.transpose() * xi.transpose(),
                                   axis=2).transpose()

        is_negative = cp.less_equal(
            cp.sum(cp.multiply(vec_to_point, signed_normal), axis=2), 0.)

        #
        # Combine
        is_close_and_negative = cp.logical_and(is_close, is_negative)

        #
        # Determine if inside
        is_inside = cp.all(cp.logical_or(is_close_and_negative,
                                         cp.logical_not(is_close)),
                           axis=0)

        #
        # Overwrite the signs of points
        # that are outside of the box
        if bounding_box is not None:
            #
            # Extract
            rotation_matrix = cp.asarray(bounding_box['rotation_matrix'])
            translation_vector = cp.asarray(bounding_box['translation_vector'])
            size = cp.asarray(bounding_box['size'])
            #
            # Transform
            transformed_pts = cp.dot(
                all_pts[start_index:end_index, :] - translation_vector,
                rotation_matrix)

            #
            # Determine if outside bbox
            inside_bbox = cp.all(cp.logical_and(
                cp.less_equal(0., transformed_pts),
                cp.less_equal(transformed_pts, size)),
                                 axis=1)

            #
            # Treat points outside bbox as
            # being outside of lumen
            print(f"{inside_bbox=}")
            is_inside = cp.logical_and(is_inside, inside_bbox)

        #
        # Apply sign to indicate whether the distance is
        # inside or outside the mesh.
        min_distances[is_inside] = -1 * min_distances[is_inside]

        #
        # Emplace results
        # [triangle_index, vert_index, querypoint_index, coordinates]
        results[0][start_index:end_index] = closest_triangles
        results[1][start_index:end_index] = min_distances
        results[2][start_index:end_index, :] = projections
Exemplo n.º 24
0
def confusion_matrix(y_true,
                     y_pred,
                     labels=None,
                     sample_weight=None,
                     normalize=None) -> CumlArray:
    """Compute confusion matrix to evaluate the accuracy of a classification.

    Parameters
    ----------
    y_true : array-like (device or host) shape = (n_samples,)
        or (n_samples, n_outputs)
        Ground truth (correct) target values.
    y_pred : array-like (device or host) shape = (n_samples,)
        or (n_samples, n_outputs)
        Estimated target values.
    labels : array-like (device or host) shape = (n_classes,), optional
        List of labels to index the matrix. This may be used to reorder or
        select a subset of labels. If None is given, those that appear at least
        once in y_true or y_pred are used in sorted order.
    sample_weight : array-like (device or host) shape = (n_samples,), optional
        Sample weights.
    normalize : string in [‘true’, ‘pred’, ‘all’]
        Normalizes confusion matrix over the true (rows), predicted (columns)
        conditions or all the population. If None, confusion matrix will not be
        normalized.

    Returns
    -------
    C : array-like (device or host) shape = (n_classes, n_classes)
        Confusion matrix.
    """
    y_true, n_rows, n_cols, dtype = \
        input_to_cuml_array(y_true, check_dtype=[cp.int32, cp.int64])

    y_pred, _, _, _ = \
        input_to_cuml_array(y_pred, check_dtype=dtype,
                            check_rows=n_rows, check_cols=n_cols)

    if labels is None:
        labels = sorted_unique_labels(y_true, y_pred)
        n_labels = len(labels)
    else:
        labels, n_labels, _, _ = \
            input_to_cupy_array(labels, check_dtype=dtype, check_cols=1)
    if sample_weight is None:
        sample_weight = cp.ones(n_rows, dtype=dtype)
    else:
        sample_weight, _, _, _ = \
            input_to_cupy_array(sample_weight,
                                check_dtype=[cp.float32, cp.float64,
                                             cp.int32, cp.int64],
                                check_rows=n_rows, check_cols=n_cols)

    if normalize not in ['true', 'pred', 'all', None]:
        msg = "normalize must be one of " \
              f"{{'true', 'pred', 'all', None}}, got {normalize}."
        raise ValueError(msg)

    with using_output_type("cupy"):
        y_true, _ = make_monotonic(y_true, labels, copy=True)
        y_pred, _ = make_monotonic(y_pred, labels, copy=True)

    # intersect y_pred, y_true with labels, eliminate items not in labels
    ind = cp.logical_and(y_pred < n_labels, y_true < n_labels)
    y_pred = y_pred[ind]
    y_true = y_true[ind]
    sample_weight = sample_weight[ind]

    cm = cupyx.scipy.sparse.coo_matrix((sample_weight, (y_true, y_pred)),
                                       shape=(n_labels, n_labels),
                                       dtype=np.float64).toarray()

    # Choose the accumulator dtype to always have high precision
    if sample_weight.dtype.kind in {'i', 'u', 'b'}:
        cm = cm.astype(np.int64)

    with np.errstate(all='ignore'):
        if normalize == 'true':
            cm = cp.divide(cm, cm.sum(axis=1, keepdims=True))
        elif normalize == 'pred':
            cm = cp.divide(cm, cm.sum(axis=0, keepdims=True))
        elif normalize == 'all':
            cm = cp.divide(cm, cm.sum())
        cm = cp.nan_to_num(cm)

    return cm
Exemplo n.º 25
0
    tic = time.time()
    for i in range(ni):
        for j in range(nj):

            # Only do work for pixels in inside the "circle"
            if cp.sqrt((i - ni0)**2 + (j - nj0)**2) <= nj0:

                # Set initial tolerance for the "closeness" of
                # theta and phi to theta_img[i,j] and phi[i,j] resp.
                # theta_tol = theta_tol_array[i,j]
                # phi_tol = phi_tol_array[i,j]
                tol = 8e-3

                #isolate ray along theta, phi
                theta_ray = cp.isclose(thetav, theta_img[i, j], atol=tol)
                select_thetas = cp.logical_and(limit_theta, theta_ray)
                phi_ray = cp.isclose(phiv, phi_img[i, j], atol=tol)
                select_ql = cp.logical_and(select_thetas, phi_ray)
                qlk, qli, qlj = cp.where(select_ql == True)

                #sort ray by radius
                radii = rv[qlk, qli, qlj]
                sort = cp.argsort(radii)
                qlk = qlk[sort]
                qli = qli[sort]
                qlj = qlj[sort]

                # Now shift points of view
                for num in cp.arange(0, 11, 1):

                    #move camera position 6 entries forward in y-direction
Exemplo n.º 26
0
def interpn(points,
            values,
            xi,
            method="linear",
            bounds_error=True,
            fill_value=cp.nan):
    """
    Multidimensional interpolation on regular grids.

    Parameters
    ----------
    points : tuple of ndarray of float, with shapes (m1, ), ..., (mn, )
        The points defining the regular grid in n dimensions.

    values : array_like, shape (m1, ..., mn, ...)
        The data on the regular grid in n dimensions.

    xi : ndarray of shape (..., ndim)
        The coordinates to sample the gridded data at

    method : str, optional
        The method of interpolation to perform. Supported are "linear" and
        "nearest", and "splinef2d". "splinef2d" is only supported for
        2-dimensional data.

    bounds_error : bool, optional
        If True, when interpolated values are requested outside of the
        domain of the input data, a ValueError is raised.
        If False, then `fill_value` is used.

    fill_value : number, optional
        If provided, the value to use for points outside of the
        interpolation domain. If None, values outside
        the domain are extrapolated.  Extrapolation is not supported by method
        "splinef2d".

    Returns
    -------
    values_x : ndarray, shape xi.shape[:-1] + values.shape[ndim:]
        Interpolated values at input coordinates.

    Notes
    -----

    .. versionadded:: 0.14

    See also
    --------
    NearestNDInterpolator : Nearest neighbor interpolation on unstructured
                            data in N dimensions

    LinearNDInterpolator : Piecewise linear interpolant on unstructured data
                           in N dimensions

    RegularGridInterpolator : Linear and nearest-neighbor Interpolation on a
                              regular grid in arbitrary dimensions

    RectBivariateSpline : Bivariate spline approximation over a rectangular mesh

    """
    # sanity check 'method' kwarg
    if method not in ["linear", "nearest", "splinef2d"]:
        raise ValueError("interpn only understands the methods 'linear', "
                         "'nearest', and 'splinef2d'. You provided %s." %
                         method)

    values = cp.asarray(values)

    ndim = values.ndim
    if method == "splinef2d":
        if ndim > 2:
            raise ValueError(
                "The method spline2fd can only be used for 2-dimensional "
                "input data")
        if not bounds_error and fill_value is None:
            raise ValueError(
                "The method spline2fd does not support extrapolation.")
        # sanity check input grid
        for i, p in enumerate(points):
            if not cp.all(cp.diff(p) > 0.0):
                raise ValueError("The points in dimension %d must be strictly "
                                 "ascending" % i)
            if not cp.asarray(p).ndim == 1:
                raise ValueError("The points in dimension %d must be "
                                 "1-dimensional" % i)
            if not values.shape[i] == len(p):
                raise ValueError("There are %d points and %d values in "
                                 "dimension %d" % (len(p), values.shape[i], i))

    # sanity check consistency of input dimensions
    if len(points) > ndim:
        raise ValueError("There are %d point arrays, but values has %d "
                         "dimensions" % (len(points), ndim))
    if len(points) != ndim and method == "splinef2d":
        raise ValueError("The method spline2fd can only be used for "
                         "scalar data with one point per coordinate")

    grid = tuple([cp.asarray(p) for p in points])

    # sanity check requested xi
    xi = _ndim_coords_from_arrays(xi, ndim=len(grid))
    if xi.shape[-1] != len(grid):
        raise ValueError("The requested sample points xi have dimension "
                         "%d, but this RegularGridInterpolator has "
                         "dimension %d" % (xi.shape[1], len(grid)))

    if bounds_error:
        for i, p in enumerate(xi.T):
            if not cp.logical_and(cp.all(grid[i][0] <= p),
                                  cp.all(p <= grid[i][-1])):
                raise ValueError("One of the requested xi is out of bounds "
                                 "in dimension %d" % i)

    # perform interpolation
    if method == "linear":
        interp = RegularGridInterpolator(
            points,
            values,
            method="linear",
            bounds_error=bounds_error,
            fill_value=fill_value,
        )
        return interp(xi)
    elif method == "nearest":
        interp = RegularGridInterpolator(
            points,
            values,
            method="nearest",
            bounds_error=bounds_error,
            fill_value=fill_value,
        )
        return interp(xi)
    elif method == "splinef2d":
        raise NotImplementedError("splinef2d case has not been implemented.")
Exemplo n.º 27
0
def _prominent_peaks(image,
                     min_xdistance=1,
                     min_ydistance=1,
                     threshold=None,
                     num_peaks=cp.inf):
    """Return peaks with non-maximum suppression.

    Identifies most prominent features separated by certain distances.
    Non-maximum suppression with different sizes is applied separately
    in the first and second dimension of the image to identify peaks.

    Parameters
    ----------
    image : (M, N) ndarray
        Input image.
    min_xdistance : int
        Minimum distance separating features in the x dimension.
    min_ydistance : int
        Minimum distance separating features in the y dimension.
    threshold : float
        Minimum intensity of peaks. Default is `0.5 * max(image)`.
    num_peaks : int
        Maximum number of peaks. When the number of peaks exceeds `num_peaks`,
        return `num_peaks` coordinates based on peak intensity.

    Returns
    -------
    intensity, xcoords, ycoords : tuple of array
        Peak intensity values, x and y indices.
    """

    img = image.copy()
    rows, cols = img.shape

    if threshold is None:
        threshold = 0.5 * cp.max(img)

    ycoords_size = 2 * min_ydistance + 1
    xcoords_size = 2 * min_xdistance + 1
    img_max = ndi.maximum_filter1d(img,
                                   size=ycoords_size,
                                   axis=0,
                                   mode="constant",
                                   cval=0)
    img_max = ndi.maximum_filter1d(img_max,
                                   size=xcoords_size,
                                   axis=1,
                                   mode="constant",
                                   cval=0)
    mask = img == img_max
    img *= mask
    img_t = img > threshold

    warnings.warn(
        "host/device transfer required. TODO: implement measure.label")

    if False:
        label_img = cpu_measure.label(cp.asnumpy(img_t))
    else:
        # can use cupyimg.ndimage.label instead.
        # have to specify structure to match skimage's default connectivity
        label_img, _ = ndi.label(img_t, structure=cp.ones((3, 3)))
        # props = measure.regionprops(label_img, img_max)

    regionprops_on_cpu = False
    if regionprops_on_cpu:
        img_max = cp.asnumpy(img_max)
        label_img = cp.asnumpy(label_img)
        props = cpu_measure.regionprops(label_img, img_max)
    else:
        props = measure.regionprops(label_img, img_max)

    # Sort the list of peaks by intensity, not left-right, so larger peaks
    # in Hough space cannot be arbitrarily suppressed by smaller neighbors
    props = sorted(props, key=lambda x: x.max_intensity)[::-1]
    coords = cp.asarray([np.round(p.centroid) for p in props], dtype=int)

    img_peaks = []
    ycoords_peaks = []
    xcoords_peaks = []

    # relative coordinate grid for local neighbourhood suppression
    ycoords_ext, xcoords_ext = cp.mgrid[-min_ydistance:min_ydistance + 1,
                                        -min_xdistance:min_xdistance + 1]

    for ycoords_idx, xcoords_idx in coords:
        accum = img_max[ycoords_idx, xcoords_idx]
        if accum > threshold:
            # absolute coordinate grid for local neighbourhood suppression
            ycoords_nh = ycoords_idx + ycoords_ext
            xcoords_nh = xcoords_idx + xcoords_ext

            # no reflection for distance neighbourhood
            ycoords_in = cp.logical_and(ycoords_nh > 0, ycoords_nh < rows)
            ycoords_nh = ycoords_nh[ycoords_in]
            xcoords_nh = xcoords_nh[ycoords_in]

            # reflect xcoords and assume xcoords are continuous,
            # e.g. for angles:
            # (..., 88, 89, -90, -89, ..., 89, -90, -89, ...)
            xcoords_low = xcoords_nh < 0
            ycoords_nh[xcoords_low] = rows - ycoords_nh[xcoords_low]
            xcoords_nh[xcoords_low] += cols
            xcoords_high = xcoords_nh >= cols
            ycoords_nh[xcoords_high] = rows - ycoords_nh[xcoords_high]
            xcoords_nh[xcoords_high] -= cols

            # suppress neighbourhood
            img_max[ycoords_nh, xcoords_nh] = 0

            # add current feature to peaks
            img_peaks.append(accum)
            ycoords_peaks.append(ycoords_idx)
            xcoords_peaks.append(xcoords_idx)

    img_peaks = cp.array(img_peaks)
    ycoords_peaks = cp.array(ycoords_peaks)
    xcoords_peaks = cp.array(xcoords_peaks)

    if num_peaks < len(img_peaks):
        idx_maxsort = cp.argsort(img_peaks)[::-1][:num_peaks]
        img_peaks = img_peaks[idx_maxsort]
        ycoords_peaks = ycoords_peaks[idx_maxsort]
        xcoords_peaks = xcoords_peaks[idx_maxsort]

    return img_peaks, xcoords_peaks, ycoords_peaks
Exemplo n.º 28
0
def deltaE_cmc(lab1, lab2, kL=1, kC=1):
    """Color difference from the  CMC l:c standard.

    This color difference was developed by the Colour Measurement Committee
    (CMC) of the Society of Dyers and Colourists (United Kingdom). It is
    intended for use in the textile industry.

    The scale factors `kL`, `kC` set the weight given to differences in
    lightness and chroma relative to differences in hue.  The usual values are
    ``kL=2``, ``kC=1`` for "acceptability" and ``kL=1``, ``kC=1`` for
    "imperceptibility".  Colors with ``dE > 1`` are "different" for the given
    scale factors.

    Parameters
    ----------
    lab1 : array_like
        reference color (Lab colorspace)
    lab2 : array_like
        comparison color (Lab colorspace)

    Returns
    -------
    dE : array_like
        distance between colors `lab1` and `lab2`

    Notes
    -----
    deltaE_cmc the defines the scales for the lightness, hue, and chroma
    in terms of the first color.  Consequently
    ``deltaE_cmc(lab1, lab2) != deltaE_cmc(lab2, lab1)``

    References
    ----------
    .. [1] https://en.wikipedia.org/wiki/Color_difference
    .. [2] http://www.brucelindbloom.com/index.html?Eqn_DeltaE_CIE94.html
    .. [3] F. J. J. Clarke, R. McDonald, and B. Rigg, "Modification to the
           JPC79 colour-difference formula," J. Soc. Dyers Colour. 100, 128-132
           (1984).
    """
    L1, C1, h1 = cp.rollaxis(lab2lch(lab1), -1)[:3]
    L2, C2, h2 = cp.rollaxis(lab2lch(lab2), -1)[:3]

    dC = C1 - C2
    dL = L1 - L2
    dH2 = get_dH2(lab1, lab2)

    T = cp.where(cp.logical_and(cp.rad2deg(h1) >= 164, cp.rad2deg(h1) <= 345),
                 0.56 + 0.2 * cp.abs(np.cos(h1 + cp.deg2rad(168))),
                 0.36 + 0.4 * cp.abs(np.cos(h1 + cp.deg2rad(35)))
                 )
    c1_4 = C1 ** 4
    F = cp.sqrt(c1_4 / (c1_4 + 1900))

    SL = cp.where(L1 < 16, 0.511, 0.040975 * L1 / (1. + 0.01765 * L1))
    SC = 0.638 + 0.0638 * C1 / (1. + 0.0131 * C1)
    SH = SC * (F * T + 1 - F)

    dE2 = (dL / (kL * SL)) ** 2
    dE2 += (dC / (kC * SC)) ** 2
    dE2 += dH2 / (SH ** 2)
    return cp.sqrt(cp.maximum(dE2, 0, out=dE2), out=dE2)
Exemplo n.º 29
0
# if f_type < 4:
f_nearbus=ipt_switch[1,1].astype(cp.int)
bus_idx=bus_int[f_nearbus-1]
bus_idx=bus_idx.astype(cp.int)
Bb[bus_idx-1]=10000000.0
# fY11=reduce_y(flag=1)
cp.fill_diagonal(Y_1,Y_1.diagonal()+yl+Gb+jay*Bb)
Y_1[g_bus[:,None]-1,g_bus-1]=Y_1[g_bus[:,None]-1,g_bus-1]+permmod[:]
frecV1=-cp.linalg.solve(Y_1,Y_c)
temp=cp.matmul(Y_b,frecV1)
fY11=Y_a+temp.T
#print(fY11)
# if f_type < 4:
f_farbus=ipt_switch[1,2].astype(cp.int)
Bb[bus_idx-1]=0.0
i=cp.where(cp.logical_and(from_bus==f_nearbus, to_bus==f_farbus))
rx[i]=10000000.0
j=cp.where(cp.logical_and(from_bus==f_farbus, to_bus==f_nearbus))
rx[j]=10000000.0
# fY11=reduce_y(flag=2)
z=r+jay*rx
yy=1/z
cp.fill_diagonal(yyfull, yy)
Y_dummy=cp.matmul(yyfull,c_line.T)
Y=cp.matmul(c_line,Y_dummy)+Y_2

cp.fill_diagonal(Y,Y.diagonal()+yl+Gb+jay*Bb)
Y[g_bus[:,None]-1,g_bus-1]=Y[g_bus[:,None]-1,g_bus-1]+permmod[:]
posfrecV1=-cp.linalg.solve(Y,Y_c)
temp=cp.matmul(Y_b,posfrecV1)
posfY11=Y_a+temp.T
Exemplo n.º 30
0
def run_simulation(input_filename,
                   pixel_layout,
                   detector_properties,
                   output_filename='',
                   n_tracks=100000):
    """
    Command-line interface to run the simulation of a pixelated LArTPC

    Args:
        input_filename (str): path of the edep-sim input file
        output_filename (str): path of the HDF5 output file. If not specified
            the output is added to the input file.
        pixel_layout (str): path of the YAML file containing the pixel
            layout and connection details.
        detector_properties (str): path of the YAML file containing
            the detector properties
        n_tracks (int): number of tracks to be simulated
    """
    start_simulation = time()

    from cupy.cuda.nvtx import RangePush, RangePop

    RangePush("run_simulation")

    print(logo)
    print("**************************\nLOADING SETTINGS AND INPUT\n**************************")
    print("Pixel layout file:", pixel_layout)
    print("Detector propeties file:", detector_properties)
    print("edep-sim input file:", input_filename)
    RangePush("load_detector_properties")
    consts.load_detector_properties(detector_properties, pixel_layout)
    RangePop()

    RangePush("load_larndsim_modules")
    # Here we load the modules after loading the detector properties
    # maybe can be implemented in a better way?
    from larndsim import quenching, drifting, detsim, pixels_from_track, fee
    RangePop()

    RangePush("load_hd5_file")
    # First of all we load the edep-sim output
    # For this sample we need to invert $z$ and $y$ axes
    with h5py.File(input_filename, 'r') as f:
        tracks = np.array(f['segments'])
    RangePop()

    RangePush("slicing_and_swapping")
    tracks = tracks[:n_tracks]

    x_start = np.copy(tracks['x_start'] )
    x_end = np.copy(tracks['x_end'])
    x = np.copy(tracks['x'])

    tracks['x_start'] = np.copy(tracks['z_start'])
    tracks['x_end'] = np.copy(tracks['z_end'])
    tracks['x'] = np.copy(tracks['z'])

    tracks['z_start'] = x_start
    tracks['z_end'] = x_end
    tracks['z'] = x
    RangePop()

    TPB = 256
    BPG = ceil(tracks.shape[0] / TPB)

    print("*******************\nSTARTING SIMULATION\n*******************")
    # We calculate the number of electrons after recombination (quenching module)
    # and the position and number of electrons after drifting (drifting module)
    print("Quenching electrons...",end='')
    start_quenching = time()
    RangePush("quench")
    quenching.quench[BPG,TPB](tracks, consts.birks)
    RangePop()
    end_quenching = time()
    print(f" {end_quenching-start_quenching:.2f} s")

    print("Drifting electrons...",end='')
    start_drifting = time()
    RangePush("drift")
    drifting.drift[BPG,TPB](tracks)
    RangePop()
    end_drifting = time()
    print(f" {end_drifting-start_drifting:.2f} s")
    step = 1
    adc_tot_list = cp.empty((0,fee.MAX_ADC_VALUES))
    adc_tot_ticks_list = cp.empty((0,fee.MAX_ADC_VALUES))
    MAX_TRACKS_PER_PIXEL = 5
    backtracked_id_tot = cp.empty((0,fee.MAX_ADC_VALUES,MAX_TRACKS_PER_PIXEL))
    unique_pix_tot = cp.empty((0,2))
    tot_events = 0
    
    tot_evids = np.unique(tracks['eventID'])
    # We divide the sample in portions that can be processed by the GPU
    tracks_batch_runtimes = []
    for ievd in tqdm(range(0, tot_evids.shape[0], step), desc='Simulating pixels...'):
        start_tracks_batch = time()
        first_event = tot_evids[ievd]
        last_event = tot_evids[min(ievd+step, tot_evids.shape[0]-1)]

        if first_event == last_event:
            last_event += 1

        evt_tracks = tracks[(tracks['eventID']>=first_event) & (tracks['eventID']<last_event)]
        first_trk_id = np.where(tracks['eventID']==evt_tracks['eventID'][0])[0][0]
        
        for itrk in range(0, evt_tracks.shape[0], 600):
            selected_tracks = evt_tracks[itrk:itrk+600]

            RangePush("event_id_map")
            # Here we build a map between tracks and event IDs
            event_ids = selected_tracks['eventID']
            unique_eventIDs = np.unique(event_ids)
            event_id_map = np.searchsorted(unique_eventIDs, event_ids)
            RangePop()

            # We find the pixels intersected by the projection of the tracks on
            # the anode plane using the Bresenham's algorithm. We also take into
            # account the neighboring pixels, due to the transverse diffusion of the charges.
            RangePush("pixels_from_track")
            longest_pix = ceil(max(selected_tracks["dx"])/consts.pixel_pitch)
            max_radius = ceil(max(selected_tracks["tran_diff"])*5/consts.pixel_pitch)
            MAX_PIXELS = int((longest_pix*4+6)*max_radius*1.5)
            MAX_ACTIVE_PIXELS = int(longest_pix*1.5)
            active_pixels = cp.full((selected_tracks.shape[0], MAX_ACTIVE_PIXELS, 2), -1, dtype=np.int32)
            neighboring_pixels = cp.full((selected_tracks.shape[0], MAX_PIXELS, 2), -1, dtype=np.int32)
            n_pixels_list = cp.zeros(shape=(selected_tracks.shape[0]))
            threadsperblock = 128
            blockspergrid = ceil(selected_tracks.shape[0] / threadsperblock)

            if not active_pixels.shape[1]:
                continue
                
            pixels_from_track.get_pixels[blockspergrid,threadsperblock](selected_tracks,
                                                                        active_pixels,
                                                                        neighboring_pixels,
                                                                        n_pixels_list,
                                                                        max_radius+1)
            RangePop()

            RangePush("unique_pix")
            shapes = neighboring_pixels.shape
            joined = neighboring_pixels.reshape(shapes[0]*shapes[1],2)
            unique_pix = cupy_unique_axis0(joined)
            unique_pix = unique_pix[(unique_pix[:,0] != -1) & (unique_pix[:,1] != -1),:]
            RangePop()
            
            if not unique_pix.shape[0]:
                continue

            RangePush("time_intervals")
            # Here we find the longest signal in time and we store an array with the start in time of each track
            max_length = cp.array([0])
            track_starts = cp.empty(selected_tracks.shape[0])
            # d_track_starts = cuda.to_device(track_starts)
            threadsperblock = 128
            blockspergrid = ceil(selected_tracks.shape[0] / threadsperblock)
            detsim.time_intervals[blockspergrid,threadsperblock](track_starts, max_length,  event_id_map, selected_tracks)
            RangePop()

            RangePush("tracks_current")
            # Here we calculate the induced current on each pixel
            signals = cp.zeros((selected_tracks.shape[0],
                                neighboring_pixels.shape[1],
                                cp.asnumpy(max_length)[0]), dtype=np.float32)
            threadsperblock = (1,1,64)
            blockspergrid_x = ceil(signals.shape[0] / threadsperblock[0])
            blockspergrid_y = ceil(signals.shape[1] / threadsperblock[1])
            blockspergrid_z = ceil(signals.shape[2] / threadsperblock[2])
            blockspergrid = (blockspergrid_x, blockspergrid_y, blockspergrid_z)
            detsim.tracks_current[blockspergrid,threadsperblock](signals,
                                                                 neighboring_pixels,
                                                                 selected_tracks)
            RangePop()

            RangePush("pixel_index_map")
            # Here we create a map between tracks and index in the unique pixel array
            pixel_index_map = cp.full((selected_tracks.shape[0], neighboring_pixels.shape[1]), -1)
            compare = neighboring_pixels[..., np.newaxis, :] == unique_pix
            indices = cp.where(cp.logical_and(compare[..., 0], compare[..., 1]))
            pixel_index_map[indices[0], indices[1]] = indices[2]
            RangePop()

            RangePush("sum_pixels_signals")
            # Here we combine the induced current on the same pixels by different tracks
            threadsperblock = (8,8,8)
            blockspergrid_x = ceil(signals.shape[0] / threadsperblock[0])
            blockspergrid_y = ceil(signals.shape[1] / threadsperblock[1])
            blockspergrid_z = ceil(signals.shape[2] / threadsperblock[2])
            blockspergrid = (blockspergrid_x, blockspergrid_y, blockspergrid_z)
            pixels_signals = cp.zeros((len(unique_pix), len(consts.time_ticks)*3))
            detsim.sum_pixel_signals[blockspergrid,threadsperblock](pixels_signals,
                                                                    signals,
                                                                    track_starts,
                                                                    pixel_index_map)
            RangePop()

            RangePush("get_adc_values")
            # Here we simulate the electronics response (the self-triggering cycle) and the signal digitization
            time_ticks = cp.linspace(0, len(unique_eventIDs)*consts.time_interval[1]*3, pixels_signals.shape[1]+1)
            integral_list = cp.zeros((pixels_signals.shape[0], fee.MAX_ADC_VALUES))
            adc_ticks_list = cp.zeros((pixels_signals.shape[0], fee.MAX_ADC_VALUES))
            TPB = 128
            BPG = ceil(pixels_signals.shape[0] / TPB)

            rng_states = create_xoroshiro128p_states(TPB * BPG, seed=ievd)
            
            fee.get_adc_values[BPG,TPB](pixels_signals,
                                        time_ticks,
                                        integral_list,
                                        adc_ticks_list,
                                        consts.time_interval[1]*3*tot_events,
                                        rng_states)
            adc_list = fee.digitize(integral_list)
            RangePop()

            RangePush("track_pixel_map")
            # Mapping between unique pixel array and track array index
            track_pixel_map = cp.full((unique_pix.shape[0], MAX_TRACKS_PER_PIXEL), -1)
            TPB = 32
            BPG = ceil(unique_pix.shape[0] / TPB)
            detsim.get_track_pixel_map[BPG, TPB](track_pixel_map, unique_pix, neighboring_pixels)
            RangePop()

            RangePush("backtracking")
            # Here we backtrack the ADC counts to the Geant4 tracks
            TPB = 128
            BPG = ceil(adc_list.shape[0] / TPB)
            backtracked_id = cp.full((adc_list.shape[0], adc_list.shape[1], MAX_TRACKS_PER_PIXEL), -1)
            detsim.backtrack_adcs[BPG,TPB](selected_tracks,
                                           adc_list,
                                           adc_ticks_list,
                                           track_pixel_map,
                                           event_id_map,
                                           unique_eventIDs,
                                           backtracked_id,
                                           first_trk_id+itrk)
            RangePop()
            adc_tot_list = cp.concatenate((adc_tot_list, adc_list), axis=0)
            adc_tot_ticks_list = cp.concatenate((adc_tot_ticks_list, adc_ticks_list), axis=0)
            unique_pix_tot = cp.concatenate((unique_pix_tot, unique_pix), axis=0)
            backtracked_id_tot = cp.concatenate((backtracked_id_tot, backtracked_id), axis=0)
        
        tot_events += step

        end_tracks_batch = time()
        tracks_batch_runtimes.append(end_tracks_batch - start_tracks_batch)

    print(f"- total time: {sum(tracks_batch_runtimes):.2f} s")
    if len(tracks_batch_runtimes) > 1:
        print(f"- excluding first iteration: {sum(tracks_batch_runtimes[1:]):.2f} s")

    RangePush("Exporting to HDF5")
    # Here we export the result in a HDF5 file.
    fee.export_to_hdf5(cp.asnumpy(adc_tot_list),
                       cp.asnumpy(adc_tot_ticks_list),
                       cp.asnumpy(unique_pix_tot),
                       cp.asnumpy(backtracked_id_tot),
                       output_filename)
    RangePop()

    with h5py.File(output_filename, 'a') as f:
        f.create_dataset("tracks", data=tracks)
    
    print("Output saved in:", output_filename)

    RangePop()
    end_simulation = time()
    print(f"run_simulation elapsed time: {end_simulation-start_simulation:.2f} s")