Exemple #1
0
    def noncentral_f(self, dfnum, dfden, nonc, size=None, dtype=float):
        """Returns an array of samples drawn from the noncentral F distribution.

        .. warning::

            This function may synchronize the device.

        .. seealso::
            :func:`cupy.random.noncentral_f` for full documentation,
            :meth:`numpy.random.RandomState.noncentral_f
            <numpy.random.mtrand.RandomState.noncentral_f>`
        """
        dfnum, dfden, nonc = \
            cupy.asarray(dfnum), cupy.asarray(dfden), cupy.asarray(nonc)
        if cupy.any(dfnum <= 0):  # synchronize!
            raise ValueError('dfnum <= 0')
        if cupy.any(dfden <= 0):  # synchronize!
            raise ValueError('dfden <= 0')
        if cupy.any(nonc < 0):  # synchronize!
            raise ValueError('nonc < 0')
        if size is None:
            size = cupy.broadcast(dfnum, dfden, nonc).shape
        y = cupy.empty(shape=size, dtype=dtype)
        _kernels.noncentral_f_kernel(dfnum, dfden, nonc, self._rk_seed, y)
        self._update_seed(y.size)
        return y
Exemple #2
0
def test_unsharp_masking_with_different_ranges(shape, offset, multichannel,
                                               preserve):
    radius = 2.0
    amount = 1.0
    dtype = np.int16
    array = (cp.random.random(shape) * 5 + offset).astype(dtype)
    negative = cp.any(array < 0)
    output = unsharp_mask(array, radius, amount, multichannel, preserve)
    if preserve is False:
        assert cp.any(output <= 1)
        assert cp.any(output >= -1)
        if negative is False:
            assert cp.any(output >= 0)
    assert output.dtype in [np.float32, np.float64]
    assert output.shape == shape
    def test_percentile_memory_access(self, dtype):
        # Create an allocator that guarantees array allocated in
        # cupy.percentile call will be followed by a NaN
        original_allocator = cuda.get_allocator()

        def controlled_allocator(size):
            memptr = original_allocator(size)
            base_size = memptr.mem.size
            assert base_size % 512 == 0
            item_size = dtype().itemsize
            shape = (base_size // item_size, )
            x = cupy.ndarray(memptr=memptr, shape=shape, dtype=dtype)
            x.fill(cupy.nan)
            return memptr

        # Check that percentile still returns non-NaN results
        a = testing.shaped_random((5, ), cupy, dtype)
        q = cupy.array((0, 100), dtype=dtype)

        cuda.set_allocator(controlled_allocator)
        try:
            percentiles = cupy.percentile(a, q, axis=None, method='linear')
        finally:
            cuda.set_allocator(original_allocator)

        assert not cupy.any(cupy.isnan(percentiles))
Exemple #4
0
    def prepare_train_data(self):
        batch_size = self.train_batch_size

        is_neg = cp.logical_not(self._train_labels)

        # Do not store verification matrix if using the negatives generation shortcut
        neg_mat = None if self.use_neg_trick else cp.array(self._neg_mat)

        # If there are no negative samples in the local portion of the training data, do nothing
        any_neg = cp.any(is_neg)
        if any_neg:
            self._train_users[is_neg], self._train_items[is_neg] = generate_negatives(
                self._train_users[is_neg], neg_mat, self.num_items, use_trick=self.use_neg_trick
            )

        shuffled_order = cp.random.permutation(self._train_users.shape[0])
        self._train_users = self._train_users[shuffled_order]
        self._train_items = self._train_items[shuffled_order]
        self._train_labels = self._train_labels[shuffled_order]
        is_neg = cp.logical_not(self._train_labels)
        
        # Manually create batches
        split_indices = np.arange(batch_size, self._train_users.shape[0], batch_size)
        self.train_users_batches = np.split(self._train_users, split_indices)
        self.train_items_batches = np.split(self._train_items, split_indices)
        self.train_labels_batches = np.split(self._train_labels, split_indices)
Exemple #5
0
def _masked_column_median(arr, masked_value):
    """Compute the median of each column in the 2D array arr, ignoring any
    instances of masked_value"""
    mask = _get_mask(arr, masked_value)
    if arr.size == 0:
        return cp.full(arr.shape[1], cp.nan)
    arr_sorted = arr.copy()
    if not cp.isnan(masked_value):
        # If nan is not the missing value, any column with nans should
        # have a median of nan
        nan_cols = cp.any(cp.isnan(arr), axis=0)
        arr_sorted[mask] = cp.nan
    else:
        nan_cols = cp.full(arr.shape[1], False)
    # nans are always sorted to end of array
    arr_sorted = cp.sort(arr_sorted, axis=0)

    count_missing_values = mask.sum(axis=0)
    # Ignore missing values in determining "halfway" index of sorted
    # array
    n_elems = arr.shape[0] - count_missing_values

    # If no elements remain after removing missing value, median for
    # that colum is nan
    nan_cols = cp.logical_or(nan_cols, n_elems <= 0)

    col_index = cp.arange(arr_sorted.shape[1])
    median = (arr_sorted[cp.floor_divide(n_elems - 1, 2), col_index] +
              arr_sorted[cp.floor_divide(n_elems, 2), col_index]) / 2

    median[nan_cols] = cp.nan
    return median
Exemple #6
0
def launch_superpose_deltas(positions, shape):
    array = cp.zeros((shape[0], shape[1]), dtype=cp.float32)

    if len(positions) == 0:
        return array

    if (cp.any(positions[:, 0] < 0) or cp.any(positions[:, 1] < 0) or cp.any(positions[:, 0] > shape[0]) or cp.any(
            positions[:, 1] > shape[1])):
        raise RuntimeError()

    rounded = cp.floor(positions).astype(cp.int32)

    threadsperblock = 32
    blockspergrid = (positions.shape[0] + (threadsperblock - 1)) // threadsperblock
    superpose_deltas[blockspergrid, threadsperblock](array, positions, rounded)
    return array
Exemple #7
0
def check_finite(array, force_all_finite=True):
    """Checks that the input is finite if necessary

    Parameters
    ----------
    array : object
        Input object to check / convert.
    force_all_finite : boolean or 'allow-nan', (default=True)
        Whether to raise an error on np.inf, np.nan, pd.NA in array. The
        possibilities are:
        - True: Force all values of array to be finite.
        - False: accepts np.inf, np.nan, pd.NA in array.
        - 'allow-nan': accepts only np.nan and pd.NA values in array. Values
          cannot be infinite.
           ``force_all_finite`` accepts the string ``'allow-nan'``.

    Returns
    -------
    None or raise error
    """
    if force_all_finite is True:
        if not cp.all(cp.isfinite(array)):
            raise ValueError("Non-finite value encountered in array")
    elif force_all_finite == 'allow-nan':
        if cp.any(cp.isinf(array)):
            raise ValueError("Non-finite value encountered in array")
Exemple #8
0
def hilbert2(x, N=None):
    """
    Compute the '2-D' analytic signal of `x`

    Parameters
    ----------
    x : array_like
        2-D signal data.
    N : int or tuple of two ints, optional
        Number of Fourier components. Default is ``x.shape``

    Returns
    -------
    xa : ndarray
        Analytic signal of `x` taken along axes (0,1).

    References
    ----------
    .. [1] Wikipedia, "Analytic signal",
        https://en.wikipedia.org/wiki/Analytic_signal

    """
    x = atleast_2d(x)
    if x.ndim > 2:
        raise ValueError("x must be 2-D.")
    if iscomplexobj(x):
        raise ValueError("x must be real.")
    if N is None:
        N = x.shape
    elif isinstance(N, int):
        if N <= 0:
            raise ValueError("N must be positive.")
        N = (N, N)
    elif len(N) != 2 or cp.any(cp.asarray(N) <= 0):
        raise ValueError(
            "When given as a tuple, N must hold exactly two positive integers"
        )

    Xf = fftpack.fft2(x, N, axes=(0, 1))
    h1 = zeros(N[0], "d")
    h2 = zeros(N[1], "d")
    for p in range(2):
        h = eval("h%d" % (p + 1))
        N1 = N[p]
        if N1 % 2 == 0:
            h[0] = h[N1 // 2] = 1
            h[1 : N1 // 2] = 2
        else:
            h[0] = 1
            h[1 : (N1 + 1) // 2] = 2
        exec("h%d = h" % (p + 1), globals(), locals())

    h = h1[:, newaxis] * h2[newaxis, :]
    k = x.ndim
    while k > 2:
        h = h[:, newaxis]
        k -= 1
    x = fftpack.ifft2(Xf * h, axes=(0, 1))
    return x
Exemple #9
0
def test_niblack_sauvola_pathological_image():
    # For certain values, floating point error can cause
    # E(X^2) - (E(X))^2 to be negative, and taking the square root of this
    # resulted in NaNs. Here we check that these are safely caught.
    # see https://github.com/scikit-image/scikit-image/issues/3007
    value = 0.03082192 + 2.19178082e-09
    src_img = cp.full((4, 4), value).astype(cp.float64)
    assert not cp.any(cp.isnan(threshold_niblack(src_img)))
Exemple #10
0
    def negative_binomial(self, n, p, size=None, dtype=int):
        """Returns an array of samples drawn from the negative binomial distribution.

        .. seealso::
            :func:`cupy.random.negative_binomial` for full documentation,
            :meth:`numpy.random.RandomState.negative_binomial`
        """
        n = cupy.asarray(n)
        p = cupy.asarray(p)
        if cupy.any(n <= 0):
            raise ValueError("n <= 0")
        if cupy.any(p < 0):
            raise ValueError("p < 0")
        if cupy.any(p > 1):
            raise ValueError("p > 1")
        y = self.gamma(n, (1 - p) / p, size)
        return self.poisson(y, dtype=dtype)
Exemple #11
0
def _assert_non_negative(image):

    if cp.any(image < 0):
        raise ValueError(
            "Image Correction methods work correctly only on "
            "images with non-negative values. Use "
            "skimage.exposure.rescale_intensity."
        )
Exemple #12
0
def test_out_argument():
    for func in (binary.binary_erosion, binary.binary_dilation):
        strel = cp.ones((3, 3), dtype=cp.uint8)
        img = cp.ones((10, 10))
        out = cp.zeros_like(img)
        out_saved = out.copy()
        func(img, strel, out=out)
        assert cp.any(out != out_saved)
        testing.assert_array_equal(out, func(img, strel))
Exemple #13
0
def _cholesky(B):
    """
    Wrapper around `cupy.linalg.cholesky` that raises LinAlgError if there are
    NaNs in the output
    """
    R = cupy.linalg.cholesky(B)
    if cupy.any(cupy.isnan(R)):
        raise numpy.linalg.LinAlgError
    return R
Exemple #14
0
    def triangular(self, left, mode, right, size=None, dtype=float):
        """Returns an array of samples drawn from the triangular distribution.

        .. seealso::
            :func:`cupy.random.triangular` for full documentation,
            :meth:`numpy.random.RandomState.triangular`
        """
        left, mode, right = \
            cupy.asarray(left), cupy.asarray(mode), cupy.asarray(right)
        if cupy.any(left > mode):
            raise ValueError("left > mode")
        if cupy.any(mode > right):
            raise ValueError("mode > right")
        if cupy.any(left == right):
            raise ValueError("left == right")
        if size is None:
            size = cupy.broadcast(left, mode, right).shape
        x = self.random_sample(size=size, dtype=dtype)
        return RandomState._triangular_kernel(left, mode, right, x)
Exemple #15
0
def cupy_unique_axis0(array):
    # axis is still not supported for cupy.unique, this
    # is a workaround
    if len(array.shape) != 2:
        raise ValueError("Input array must be 2D.")
    sortarr     = array[cp.lexsort(array.T[::-1])]
    mask        = cp.empty(array.shape[0], dtype=cp.bool_)
    mask[0]     = True
    mask[1:]    = cp.any(sortarr[1:] != sortarr[:-1], axis=1)
    return sortarr[mask]
Exemple #16
0
    def noncentral_chisquare(self, df, nonc, size=None, dtype=float):
        """Returns an array of samples drawn from the noncentral chi-square
        distribution.

        .. seealso::
            :func:`cupy.random.noncentral_chisquare` for full documentation,
            :meth:`numpy.random.RandomState.noncentral_chisquare`
        """
        df, nonc = cupy.asarray(df), cupy.asarray(nonc)
        if cupy.any(df <= 0):
            raise ValueError("df <= 0")
        if cupy.any(nonc < 0):
            raise ValueError("nonc < 0")
        if size is None:
            size = cupy.broadcast(df, nonc).shape
        y = cupy.empty(shape=size, dtype=dtype)
        _kernels.noncentral_chisquare_kernel(df, nonc, self.rk_seed, y)
        self.rk_seed += numpy.prod(size)
        return y
Exemple #17
0
    def logseries(self, p, size=None, dtype=int):
        """Returns an array of samples drawn from a log series distribution.

        .. seealso::
            :func:`cupy.random.logseries` for full documentation,
            :meth:`numpy.random.RandomState.logseries`

        """
        p = cupy.asarray(p)
        if cupy.any(p <= 0):
            raise ValueError('p <= 0.0')
        if cupy.any(p >= 1):
            raise ValueError('p >= 1.0')
        if size is None:
            size = p.shape
        y = cupy.empty(shape=size, dtype=dtype)
        _kernels.logseries_kernel(p, self.rk_seed, y)
        self.rk_seed += numpy.prod(size)
        return y
Exemple #18
0
def test_csr_norms(norm, ref_norm, dtype, seed, shape):
    X = np.random.RandomState(seed).randn(*shape).astype(dtype)
    X_csr = sp.csr_matrix(X)
    X_csr_gpu = cupyx.scipy.sparse.csr_matrix(X_csr)

    norm(X_csr_gpu)
    ref_norm(X_csr)

    # checks that array have been changed inplace
    assert cp.any(cp.not_equal(X_csr_gpu.todense(), cp.array(X)))

    cp.testing.assert_array_almost_equal(X_csr_gpu.todense(), X_csr.todense())
Exemple #19
0
def _preprocess(labels):

    label_values, inv_idx = cp.unique(labels, return_inverse=True)
    if not (label_values == 0).any():
        warn('Random walker only segments unlabeled areas, where '
             'labels == 0. No zero valued areas in labels were '
             'found. Returning provided labels.',
             stacklevel=2)

        return labels, None, None, None, None

    # If some labeled pixels are isolated inside pruned zones, prune them
    # as well and keep the labels for the final output

    null_mask = labels == 0
    pos_mask = labels > 0
    mask = labels >= 0

    fill = ndi.binary_propagation(null_mask, mask=mask)
    isolated = cp.logical_and(pos_mask, cp.logical_not(fill))

    pos_mask[isolated] = False

    # If the array has pruned zones, be sure that no isolated pixels
    # exist between pruned zones (they could not be determined)
    if label_values[0] < 0 or cp.any(isolated):  # synchronize!
        isolated = cp.logical_and(
            cp.logical_not(ndi.binary_propagation(pos_mask, mask=mask)),
            null_mask)

        labels[isolated] = -1
        if cp.all(isolated[null_mask]):
            warn('All unlabeled pixels are isolated, they could not be '
                 'determined by the random walker algorithm.',
                 stacklevel=2)
            return labels, None, None, None, None

        mask[isolated] = False
        mask = cp.atleast_3d(mask)

    else:
        mask = None

    # Reorder label values to have consecutive integers (no gaps)
    zero_idx = cp.searchsorted(label_values, cp.array(0))
    labels = cp.atleast_3d(inv_idx.reshape(labels.shape) - zero_idx)

    nlabels = label_values[zero_idx + 1:].shape[0]

    inds_isolated_seeds = cp.nonzero(isolated)
    isolated_values = labels[inds_isolated_seeds]

    return labels, nlabels, mask, inds_isolated_seeds, isolated_values
Exemple #20
0
    def noncentral_f(self, dfnum, dfden, nonc, size=None, dtype=float):
        """Returns an array of samples drawn from the noncentral F distribution.

        .. seealso::
            :func:`cupy.random.noncentral_f` for full documentation,
            :meth:`numpy.random.RandomState.noncentral_f`
        """
        dfnum, dfden, nonc = \
            cupy.asarray(dfnum), cupy.asarray(dfden), cupy.asarray(nonc)
        if cupy.any(dfnum <= 0):
            raise ValueError('dfnum <= 0')
        if cupy.any(dfden <= 0):
            raise ValueError('dfden <= 0')
        if cupy.any(nonc < 0):
            raise ValueError('nonc < 0')
        if size is None:
            size = cupy.broadcast(dfnum, dfden, nonc).shape
        y = cupy.empty(shape=size, dtype=dtype)
        _kernels.noncentral_f_kernel(dfnum, dfden, nonc, self.rk_seed, y)
        self.rk_seed += numpy.prod(size, dtype=self.rk_seed.dtype)
        return y
Exemple #21
0
    def negative_binomial(self, n, p, size=None, dtype=int):
        """Returns an array of samples drawn from the negative binomial distribution.

        .. warning::

            This function may synchronize the device.

        .. seealso::
            - :func:`cupy.random.negative_binomial` for full documentation
            - :meth:`numpy.random.RandomState.negative_binomial`
        """
        n = cupy.asarray(n)
        p = cupy.asarray(p)
        if cupy.any(n <= 0):  # synchronize!
            raise ValueError('n <= 0')
        if cupy.any(p < 0):  # synchronize!
            raise ValueError('p < 0')
        if cupy.any(p > 1):  # synchronize!
            raise ValueError('p > 1')
        y = self.gamma(n, (1 - p) / p, size)
        return self.poisson(y, dtype=dtype)
Exemple #22
0
def test_copy_crop():
    arr = cp.arange(45).reshape(9, 5)
    out0 = crop(arr, 1, copy=True)
    assert out0.flags.c_contiguous
    out0[0, 0] = 100
    assert not cp.any(arr == 100)
    assert not cp.may_share_memory(arr, out0)

    out1 = crop(arr, 1)
    out1[0, 0] = 100
    assert arr[1, 1] == 100
    assert cp.may_share_memory(arr, out1)
Exemple #23
0
def evaluate_individual_fitness(individual: cupy.ndarray,
                                formula: cupy.ndarray) -> cupy.ndarray:
    """
    Evaluates the fitness of an individual as a function of how many clauses in the formula it satisfies. For example
    if an individual satisfies half of the clauses, its fitness will be 0.5

    :param individual: The individual for which we compute the fitness
    :param formula: The CNF formula to satisfy
    :return: The value of the fitness of an individual, or the ratio of clauses of the formula it satisfies
    """
    return cupy.mean(cupy.any(individual == formula, axis=1),
                     dtype=cupy.float16)
 def ndtri(self,y0):
     out = cp.zeros(y0.shape)
     cond1 = y0 == 0.0
     cond2 = y0 == 1.0
     if cp.any(cond1) == True:
         y0[cond1] = -cp.inf
     if cp.any(cond2) == True:
         y0[cond2] = cp.inf
     code = cp.ones(y0.shape, dtype=bool)
     cond3 = y0 > (1.0 - 0.13533528323661269189)
     if cp.any(cond3) == True:
         y0[cond3] = 1.0 - y0[cond3]
         code[cond3] = 0
     cond4 = y0 > 0.13533528323661269189
     cond5 = y0 <= 0.13533528323661269189
     x = cp.sqrt(-2.0 * cp.log(y0))
     cond6 = (x < 8.0) & cond5
     cond7 = (x >= 8.0) & cond5
     x0 = x - cp.log(x) / x
     z = 1.0 / x
     if cp.any(cond6) == True:
         x1 = x0[cond6] - z[cond6] * cp.polyval(P1,z[cond6]) / cp.polyval(Q1, z[cond6])
         out[cond6] = x1
     if cp.any(cond7) == True:
         x2 = x0[cond7] - z[cond7] * cp.polyval(P2, z[cond7]) / cp.polyval(Q2, z[cond7])
         out[cond7] = x2
     out[code] = -out[code]
     if cp.any(cond4) == True:
         y = y0[cond4]
         y = y - 0.5
         y2 = y * y
         x = y + y * (y2 * cp.polyval(P0,y2) / cp.polyval(Q0, y2))
         x = x * s2pi
         out[cond4] = x
     return out
Exemple #25
0
    def move_slimes():
        slime_dir = cp.array(
            [cp.sin(SlimeWorld.slime_angle),
             cp.cos(SlimeWorld.slime_angle)]).T
        SlimeWorld.slime_pos += slime_dir

        # Change direction if out of bounds
        xoob = (SlimeWorld.slime_pos[:, 0] < 0) | (SlimeWorld.slime_pos[:, 0] >
                                                   SlimeWorld.WIDTH - 1)
        yoob = (SlimeWorld.slime_pos[:, 1] < 0) | (SlimeWorld.slime_pos[:, 1] >
                                                   SlimeWorld.HEIGHT - 1)

        # Bounce off walls
        if cp.any(xoob):
            slime_dir[xoob, 0] *= -1
        if cp.any(yoob):
            slime_dir[yoob, 1] *= -1
        if cp.any(xoob | yoob):
            SlimeWorld.slime_angle = cp.arctan2(slime_dir[:, 0], slime_dir[:,
                                                                           1])
            # Clip if out of bounds
            SlimeWorld.clip(SlimeWorld.slime_pos)
Exemple #26
0
    def weibull(self, a, size=None, dtype=float):
        """Returns an array of samples drawn from the weibull distribution.

        .. seealso::
            :func:`cupy.random.weibull` for full documentation,
            :meth:`numpy.random.RandomState.weibull`
        """
        a = cupy.asarray(a)
        if cupy.any(a < 0):
            raise ValueError("a < 0")
        x = self.standard_exponential(size, dtype)
        cupy.power(x, 1. / a, out=x)
        return x
Exemple #27
0
def evaluate_population(population: cupy.ndarray,
                        formula: cupy.ndarray) -> cupy.ndarray:
    """
    Evaluates the fitness of every individual in a population and returns the fitness values in the order of the
    individuals in the population matrix.

    :param population: A matrix where each row represents an individual to be evaluated
    :param formula: The CNF formula to satisfy
    :return: The values of fitness of each individual in the input population as an array
    """
    return cupy.mean(cupy.any(population[:, cupy.newaxis] == formula, axis=2),
                     axis=1,
                     dtype=cupy.float16)
Exemple #28
0
    def test_init(self):
        cd = self.init_cuda_dict()
        cd.bpg = ceil(len(self.test_keys) / cd.tpb)
        assert cp.all(cd.contains(self.test_keys))
        assert cp.all(cd[self.test_keys] == self.test_values)

        cd.bpg = ceil(len(self.test_lookup_keys_avail) / cd.tpb)
        assert cp.all(cd.contains(self.test_lookup_keys_avail))
        assert cp.all(cd[self.test_lookup_keys_avail] == self.test_lookup_values_avail)

        cd.bpg = ceil(len(self.test_lookup_keys_unavail) / cd.tpb)
        assert cp.all(cd[self.test_lookup_keys_unavail] == self.test_default[0])
        assert not cp.any(cd.contains(self.test_lookup_keys_unavail))
 def _truncnorm_ppf(self,q, N):
     out = cp.zeros(cp.shape(q))
     delta = self._truncnorm_get_delta(N)
     cond1 = delta > 0
     cond2 = (delta > 0) & (self.a > 0)
     cond21 = (delta > 0) & (self.a<=0)
     if cp.any(cond1) == True:
         sa = self.norm_sf(a[cond2])
         out[:,cond2] = -self.ndtri((1 - q[:,cond2]) * sa)
     if cp.any(cond21) == True:
         na = norm_cdf(self.a[cond21])
         out[:,cond21] = self._ndtri(q[:,cond21]  + na * (1.0 - q[:,cond21]))
     cond3 = ~cond1 & cp.isinf(self.b)
     cond4 = ~cond1 & cp.isinf(self.a)
     if cp.any(cond3) == True:
         out[:,cond3] = -self._norm_ilogcdf(cp.log1p(-q[:,cond3]) + self.norm_logsf(self.a[cond3]))
     if cp.any(cond4) == True:
         out[:,cond4] = self._norm_ilogcdf(cp.log(q) + self.norm_logcdf(self.b))
     cond5 = out < self.a
     if cp.any(cond5) == True:
         out[cond5] = ((cond5) * self.a)[cond5]
     return out
Exemple #30
0
    def triangular(self, left, mode, right, size=None, dtype=float):
        """Returns an array of samples drawn from the triangular distribution.

        .. warning::

            This function may synchronize the device.

        .. seealso::
            - :func:`cupy.random.triangular` for full documentation
            - :meth:`numpy.random.RandomState.triangular`
        """
        left, mode, right = \
            cupy.asarray(left), cupy.asarray(mode), cupy.asarray(right)
        if cupy.any(left > mode):  # synchronize!
            raise ValueError('left > mode')
        if cupy.any(mode > right):  # synchronize!
            raise ValueError('mode > right')
        if cupy.any(left == right):  # synchronize!
            raise ValueError('left == right')
        if size is None:
            size = cupy.broadcast(left, mode, right).shape
        x = self.random_sample(size=size, dtype=dtype)
        return RandomState._triangular_kernel(left, mode, right, x)