コード例 #1
0
 def net(self,par,**args):
     for k,v in args.items():
         if k=='X_train':
             X=cp.asarray(v)
     y_linear = cp.dot(X, par['weights']) + par['bias']
     y_linear=cp.minimum(y_linear,-cp.log(cp.finfo(float).eps))
     y_linear=cp.maximum(y_linear,-cp.log(1./cp.finfo(float).tiny-1.0))
     #y_linear = cp.dot(X, par['weights']) 
     yhat = self.sigmoid(y_linear)
     return yhat
コード例 #2
0
 def log_likelihood(self,par,**args):
     for k,v in args.items():
         if k=='X_train':
             X=cp.asarray(v)
         elif k=='y_train':
             y=cp.asarray(v)
     y_linear = cp.dot(X, par['weights']) + par['bias']
     y_linear=cp.minimum(y_linear,-cp.log(cp.finfo(float).eps))
     y_linear=cp.maximum(y_linear,-cp.log(1./cp.finfo(float).tiny-1.0))
     return cp.sum(self.cross_entropy(y_linear,y))
コード例 #3
0
def _check_nan_inf(x, dtype, neg=None):
    if dtype.char in 'FD':
        dtype = cupy.dtype(dtype.char.lower())
    if dtype.char not in 'efd':
        x = 0
    elif x is None and neg is not None:
        x = cupy.finfo(dtype).min if neg else cupy.finfo(dtype).max
    elif cupy.isnan(x):
        x = cupy.nan
    elif cupy.isinf(x):
        x = cupy.inf * (-1)**(x < 0)
    return cupy.asanyarray(x, dtype)
コード例 #4
0
ファイル: _lobpcg.py プロジェクト: viantirreau/cupy
def _b_orthonormalize(B, blockVectorV, blockVectorBV=None, retInvR=False):
    """B-orthonormalize the given block vector using Cholesky."""
    normalization = blockVectorV.max(axis=0) + cupy.finfo(
        blockVectorV.dtype).eps
    blockVectorV = blockVectorV / normalization
    if blockVectorBV is None:
        if B is not None:
            blockVectorBV = B(blockVectorV)
        else:
            blockVectorBV = blockVectorV
    else:
        blockVectorBV = blockVectorBV / normalization
    VBV = cupy.matmul(blockVectorV.T.conj(), blockVectorBV)
    try:
        # VBV is a Cholesky factor
        VBV = _cholesky(VBV)
        VBV = linalg.inv(VBV.T)
        blockVectorV = cupy.matmul(blockVectorV, VBV)
        if B is not None:
            blockVectorBV = cupy.matmul(blockVectorBV, VBV)
        else:
            blockVectorBV = None
    except numpy.linalg.LinAlgError:
        # LinAlg Error: cholesky transformation might fail in rare cases
        # raise ValueError("cholesky has failed")
        blockVectorV = None
        blockVectorBV = None
        VBV = None

    if retInvR:
        return blockVectorV, blockVectorBV, VBV, normalization
    else:
        return blockVectorV, blockVectorBV
コード例 #5
0
ファイル: _canny.py プロジェクト: grlee77/cucim
def smooth_with_function_and_mask(image, function, mask):
    """Smooth an image with a linear function, ignoring masked pixels.

    Parameters
    ----------
    image : array
        Image you want to smooth.
    function : callable
        A function that does image smoothing.
    mask : array
        Mask with 1's for significant pixels, 0's for masked pixels.

    Notes
    ------
    This function calculates the fractional contribution of masked pixels
    by applying the function to the mask (which gets you the fraction of
    the pixel data that's due to significant points). We then mask the image
    and apply the function. The resulting values will be lower by the
    bleed-over fraction, so you can recalibrate by dividing by the function
    on the mask to recover the effect of smoothing from just the significant
    pixels.
    """
    bleed_over = function(mask.astype(cp.float32))
    masked_image = cp.zeros(image.shape, image.dtype)
    masked_image[mask] = image[mask]
    smoothed_image = function(masked_image)
    output_image = smoothed_image / (bleed_over + cp.finfo(cp.float32).eps)
    return output_image
コード例 #6
0
def gaussian2D(shape, sigma=1):
    m, n = [(ss - 1.) / 2. for ss in shape]
    y, x = cupy.ogrid[-m:m + 1, -n:n + 1]

    h = cupy.exp(-(x * x + y * y) / (2 * sigma * sigma))
    h[h < cupy.finfo(h.dtype).eps * h.max()] = 0
    return h
コード例 #7
0
ファイル: hpl.py プロジェクト: shibacow/hpl_test
def run_hpl(n,nr,tol=16):
    """
Run the High-performance  LINPACK test on a matrix of size n x n, nr number of times and ensures that the the maximum of the three residuals is strictly less than the prescribed tol erance (defaults to 16).
This function returns the  performance in GFlops/Sec.
    """
    mempool = cn.get_default_memory_pool()
    if args.type=='fp32':
        accuracy=cn.float32
    if args.type=='fp64':
        accuracy=cn.float64
    a = cn.random.rand(n, n).astype(accuracy);
    b = cn.random.rand(n, 1).astype(accuracy);
    x,t = iterate_func(nr,cn.linalg.solve, a, b,n,mempool)
    eps = cn.finfo(accuracy).eps
    r = cn.dot(a, x)-b
    r0 = cn.linalg.norm(r, cn.inf)
    r1 = r0/(eps * cn.linalg.norm(a, 1) * n)
    r2 = r0/(eps * cn.linalg.norm(a, cn.inf) * cn.linalg.norm(x, cn.inf) * n)
    performance  = (1e-9* (2.0/3.0 * n * n * n+ 3.0/2.0 * n * n) *nr/t)
    verified     = np.max((r0.get(), r1.get(), r2.get())) < 16
    umem = 4 * mempool.used_bytes() // (1024*1024)
    msg='performance={} umem={} verified={} r0={} r1={} r2={}'.format(performance,umem,verified,r0,r1,r2)
    logging.info(msg)
    if not verified:
        err="Solution did not meet the prescribed tolerance {}".format(tol)
        raise RuntimeError(err)
    return performance,umem
コード例 #8
0
def test_cmc():
    data = load_ciede2000_data()
    N = len(data)
    lab1 = np.zeros((N, 3))
    lab1[:, 0] = data['L1']
    lab1[:, 1] = data['a1']
    lab1[:, 2] = data['b1']

    lab2 = np.zeros((N, 3))
    lab2[:, 0] = data['L2']
    lab2[:, 1] = data['a2']
    lab2[:, 2] = data['b2']

    lab1 = cp.asarray(lab1)
    lab2 = cp.asarray(lab2)

    dE2 = deltaE_cmc(lab1, lab2)
    # fmt: off
    oracle = cp.asarray([
        1.73873611, 2.49660844, 3.30494501, 0.85735576, 0.88332927,
        0.97822692, 3.50480874, 2.87930032, 6.5783807, 6.57838075,
        6.5783808, 6.57838086, 6.67492321, 6.67492326, 6.67492331,
        4.66852997, 42.10875485, 39.45889064, 38.36005919, 33.93663807,
        1.14400168, 1.00600419, 1.11302547, 1.05335328, 1.42822951,
        1.2548143, 1.76838061, 2.02583367, 3.08695508, 1.74893533,
        1.90095165, 1.70258148, 1.80317207, 2.44934417
    ])
    # fmt: on

    assert_allclose(dE2, oracle, rtol=1.0e-8)

    # Equal or close colors make `delta_e.get_dH2` function to return
    # negative values resulting in NaNs when passed to sqrt (see #1908
    # issue on Github):
    lab1 = lab2
    expected = cp.zeros_like(oracle)
    assert_array_almost_equal(deltaE_cmc(lab1, lab2), expected, decimal=6)

    lab2[0, 0] += cp.finfo(float).eps
    assert_array_almost_equal(deltaE_cmc(lab1, lab2), expected, decimal=6)

    # Single item case:
    lab1 = lab2 = cp.array([0., 1.59607713, 0.87755709])
    assert_array_equal(deltaE_cmc(lab1, lab2), 0)

    lab2[0] += cp.finfo(float).eps
    assert_array_equal(deltaE_cmc(lab1, lab2), 0)
コード例 #9
0
def inversion_eps(dt):
    '''
  Inversion epsilon for type `dt`. 
  Adopted from scipy.sparse.linalg.svds.
  '''
    t = dt.char.lower()
    factor = {'f': 1E3, 'd': 1E6}
    return factor[t] * cupy.finfo(t).eps
コード例 #10
0
ファイル: _lobpcg.py プロジェクト: viantirreau/cupy
def _report_nonhermitian(M, name):
    """
    Report if `M` is not a hermitian matrix given its type.
    """

    md = M - M.T.conj()

    nmd = linalg.norm(md, 1)
    tol = 10 * cupy.finfo(M.dtype).eps
    tol *= max(1, float(linalg.norm(M, 1)))
    if nmd > tol:
        print('matrix %s of the type %s is not sufficiently Hermitian:' %
              (name, M.dtype))
        print('condition: %.e < %e' % (nmd, tol))
コード例 #11
0
ファイル: _lobpcg.py プロジェクト: toslunar/cupy
def _report_nonhermitian(M, name):
    """
    Report if `M` is not a hermitian matrix given its type.
    """

    md = M - M.T.conj()

    nmd = linalg.norm(md, 1)
    tol = 10 * cupy.finfo(M.dtype).eps
    tol *= max(1, float(linalg.norm(M, 1)))
    if nmd > tol:
        warnings.warn(
            f'Matrix {name} of the type {M.dtype} is not Hermitian: '
            f'condition: {nmd} < {tol} fails.',
            UserWarning,
            stacklevel=4)
コード例 #12
0
ファイル: test_upfirdn.py プロジェクト: MannyKayy/cupyimg
    def test_modes(self, size, h_len, mode, dtype):
        random_state = cp.random.RandomState(5)
        x = random_state.randn(size).astype(dtype)
        if dtype in (cp.complex64, cp.complex128):
            x += 1j * random_state.randn(size)
        h = cp.arange(1, 1 + h_len, dtype=x.real.dtype)

        y = upfirdn(h, x, up=1, down=1, mode=mode)
        # expected result: pad the input, filter with zero padding, then crop
        npad = h_len - 1
        if mode in ["antisymmetric", "antireflect", "smooth", "line"]:
            # use _pad_test test function for modes not supported by cp.pad.
            xpad = _pad_test(x, npre=npad, npost=npad, mode=mode)
        else:
            xpad = cp.pad(x, npad, mode=mode)
        ypad = upfirdn(h, xpad, up=1, down=1, mode="constant")
        y_expected = ypad[npad:-npad]

        atol = rtol = cp.finfo(dtype).eps * 1e2
        assert_allclose(y, y_expected, atol=atol, rtol=rtol)
コード例 #13
0
ファイル: test_upfirdn.py プロジェクト: MannyKayy/cupyimg
    def test_vs_lfilter(self):
        # Check that up=1.0 gives same answer as lfilter + slicing
        random_state = cp.random.RandomState(17)
        try_types = (int, cp.float32, cp.complex64, float, complex)
        size = 10000
        down_factors = [2, 11, 79]

        for dtype in try_types:
            x = random_state.randn(size).astype(dtype)
            if dtype in (cp.complex64, cp.complex128):
                x += 1j * random_state.randn(size)

            tol = cp.finfo(cp.float32).eps * 100

            for down in down_factors:
                h = firwin(31, 1.0 / down, window="hamming")
                yl = cp.asarray(lfilter(h, 1.0, x.get())[::down])
                h = cp.asarray(h)
                y = upfirdn(h, x, up=1, down=down)
                assert_allclose(yl, y[: yl.size], atol=tol, rtol=tol)
コード例 #14
0
def binary_log_loss(y_true, y_prob):
    """Compute binary logistic loss for classification.
    This is identical to log_loss in binary classification case,
    but is kept for its use in multilabel case.
    Parameters
    ----------
    y_true : array-like or label indicator matrix
        Ground truth (correct) labels.
    y_prob : array-like of float, shape = (n_samples, 1)
        Predicted probabilities, as returned by a classifier's
        predict_proba method.
    Returns
    -------
    loss : float
        The degree to which the samples are correctly predicted.
    """
    eps = np.finfo(y_prob.dtype).eps
    y_prob = np.clip(y_prob, eps, 1 - eps)
    return -(xlogy(y_true, y_prob) +
             xlogy(1 - y_true, 1 - y_prob)).sum() / y_prob.shape[0]
コード例 #15
0
ファイル: _data_type_functions.py プロジェクト: takagi/cupy
def finfo(type: Union[Dtype, Array], /) -> finfo_object:
    """
    Array API compatible wrapper for :py:func:`np.finfo <numpy.finfo>`.

    See its docstring for more information.
    """
    fi = np.finfo(type)  # type: ignore
    # Note: The types of the float data here are float, whereas in NumPy they
    # are scalars of the corresponding float dtype.
    try:
        tiny = fi.smallest_normal
    except AttributeError:  # for backward compatibility
        tiny = fi.tiny
    return finfo_object(
        fi.bits,
        float(fi.eps),
        float(fi.max),
        float(fi.min),
        float(tiny),
    )
コード例 #16
0
def soft_mask(v, voxel_size, num_subunit_residues,
              helical_repeat_distance=None, repeats_to_include=0,
              filter_resolution=20, expansion_factor=1.2, 
              expansion_radius=0, print_progress=True, return_mask=False):

    full_expansion_radius = expansion_radius + filter_resolution/2

# avg AA mol wt. in g/mol, density in g/cm3
    avg_aa_molwt = 110
    protein_density = 1.4

# 2
    print helical_repeat_distance
    v_thresh = np.zeros(v.shape)
    v_thresh[:] = v[:]

    sz = np.array(v.shape).astype(int)

    total_molwt = num_subunit_residues*avg_aa_molwt/6.023e23
    if helical_repeat_distance != None:
        total_molwt = total_molwt * sz[2]*voxel_size / helical_repeat_distance
    total_vol = np.prod(sz) * voxel_size**3                  # vol in A3
    mol_vol = total_molwt/protein_density / (1.0e-24)        # vol in A3
    mol_vol_frac = mol_vol/total_vol
    target_vol_frac = mol_vol_frac*expansion_factor

    thresh = find_binary_threshold(v_thresh, target_vol_frac)
    true_frac = (0.0 + np.sum(v_thresh >= thresh)) / v_thresh.size

    if repeats_to_include != 0:
        zdim = np.round(repeats_to_include * helical_repeat_distance/voxel_size)
    else:
        zdim = sz[2]

    if zdim > sz[2] - 4*np.ceil(filter_resolution/voxel_size):
        zdim = sz[2] - 4*np.ceil(filter_resolution/voxel_size)

    zdim = zdim.astype(int)

    v_thresh[:,:,0:np.floor(sz[2]/2).astype(int) - np.floor(zdim/2).astype(int)] = 0
    v_thresh[:,:,np.floor(sz[2]/2).astype(int) - np.floor(zdim/2).astype(int) + 1 + zdim - 1:] = 0
    v_thresh[v_thresh < thresh] = 0

    if print_progress:
        print 'Target volume fraction: {}'.format(target_vol_frac)
        print 'Achieved volume fraction: {}'.format(true_frac)
        print 'Designated threshold: {}'.format(thresh)

    progress_bar = tqdm(total=5)

    v_thresh = fftpack.fftn(v_thresh)
    progress_bar.update(1)
# 3
    cosmask_filter = np.fft.fftshift(spherical_cosmask(sz, 0, np.ceil(filter_resolution/voxel_size)))
    cosmask_filter = fftpack.fftn(cosmask_filter) / np.sum(cosmask_filter)
    progress_bar.update(1)

    v_thresh = v_thresh * cosmask_filter
    v_thresh = fftpack.ifftn(v_thresh)
    progress_bar.update(1)
    v_thresh = np.real(v_thresh)
    v_thresh[np.abs(v_thresh) < 10*np.finfo(type(v_thresh.ravel()[0])).eps] = 0

    v_thresh[v_thresh != 0] = 1

# The extent of blurring is equal to the diameter of the cosmask sphere; 
#  if we want this to equal the expected falloff for filter_resolution, 
#  we therefore need to divide filter_res by 4 to get the 
#  desired radius for spherical_cosmask.

    v_thresh = fftpack.fftn(v_thresh)
    progress_bar.update(1)

    v_thresh = v_thresh * cosmask_filter
    v_thresh = fftpack.ifftn(v_thresh)
    progress_bar.update(1)
    v_thresh = np.real(v_thresh)
    v_thresh[np.abs(v_thresh) < 10*np.finfo(type(v_thresh.ravel()[0])).eps] = 0

    if return_mask:
        v[:,:,:] = v_thresh
    else:
        v *= v_thresh

    return v_thresh
コード例 #17
0
ファイル: convolve.py プロジェクト: awthomp/cusignal-dev
def choose_conv_method(in1, in2, mode="full", measure=False):
    """
    Find the fastest convolution/correlation method.

    This primarily exists to be called during the ``method='auto'`` option in
    `convolve` and `correlate`, but can also be used when performing many
    convolutions of the same input shapes and dtypes, determining
    which method to use for all of them, either to avoid the overhead of the
    'auto' option or to use accurate real-world measurements.

    Parameters
    ----------
    in1 : array_like
        The first argument passed into the convolution function.
    in2 : array_like
        The second argument passed into the convolution function.
    mode : str {'full', 'valid', 'same'}, optional
        A string indicating the size of the output:

        ``full``
           The output is the full discrete linear convolution
           of the inputs. (Default)
        ``valid``
           The output consists only of those elements that do not
           rely on the zero-padding.
        ``same``
           The output is the same size as `in1`, centered
           with respect to the 'full' output.
    measure : bool, optional
        If True, run and time the convolution of `in1` and `in2` with both
        methods and return the fastest. If False (default), predict the fastest
        method using precomputed values.

    Returns
    -------
    method : str
        A string indicating which convolution method is fastest, either
        'direct' or 'fft'
    times : dict, optional
        A dictionary containing the times (in seconds) needed for each method.
        This value is only returned if ``measure=True``.

    See Also
    --------
    convolve
    correlate

    Examples
    --------
    Estimate the fastest method for a given input:

    >>> import cusignal
    >>> import cupy as cp
    >>> a = cp.random.randn(1000)
    >>> b = cp.random.randn(1000000)
    >>> method = cusignal.choose_conv_method(a, b, mode='same')
    >>> method
    'fft'

    This can then be applied to other arrays of the same dtype and shape:

    >>> c = cp.random.randn(1000)
    >>> d = cp.random.randn(1000000)
    >>> # `method` works with correlate and convolve
    >>> corr1 = cusignal.correlate(a, b, mode='same', method=method)
    >>> corr2 = cusignal.correlate(c, d, mode='same', method=method)
    >>> conv1 = cusignal.convolve(a, b, mode='same', method=method)
    >>> conv2 = cusignal.convolve(c, d, mode='same', method=method)

    """
    volume = cp.asarray(in1)
    kernel = cp.asarray(in2)

    if measure:
        times = {}
        for method in ("fft", "direct"):
            times[method] = _timeit_fast(
                lambda: convolve(volume, kernel, mode=mode, method=method))

        chosen_method = "fft" if times["fft"] < times["direct"] else "direct"
        return chosen_method, times

    # fftconvolve doesn't support complex256
    fftconv_unsup = "complex256" if sys.maxsize > 2**32 else "complex192"
    if hasattr(cp, fftconv_unsup):
        if volume.dtype == fftconv_unsup or kernel.dtype == fftconv_unsup:
            return "direct"

    # for integer input,
    # catch when more precision required than float provides (representing an
    # integer as float can lose precision in fftconvolve if larger than 2**52)
    if any([_numeric_arrays([x], kinds="ui") for x in [volume, kernel]]):
        max_value = int(cp.abs(volume).max()) * int(cp.abs(kernel).max())
        max_value *= int(min(volume.size, kernel.size))
        if max_value > 2**cp.finfo("float").nmant - 1:
            return "direct"

    if _numeric_arrays([volume, kernel], kinds="b"):
        return "direct"

    if _numeric_arrays([volume, kernel]):
        if _fftconv_faster(volume, kernel, mode):
            return "fft"

    return "direct"
コード例 #18
0
ファイル: peak.py プロジェクト: mritools/cupyimg
def peak_local_max(
    image,
    min_distance=1,
    threshold_abs=None,
    threshold_rel=None,
    exclude_border=True,
    indices=True,
    num_peaks=cp.inf,
    footprint=None,
    labels=None,
    num_peaks_per_label=cp.inf,
    p_norm=cp.inf,
):
    """Find peaks in an image as coordinate list or boolean mask.

    Peaks are the local maxima in a region of `2 * min_distance + 1`
    (i.e. peaks are separated by at least `min_distance`).

    If both `threshold_abs` and `threshold_rel` are provided, the maximum
    of the two is chosen as the minimum intensity threshold of peaks.

    .. versionchanged:: 0.18
        Prior to version 0.18, peaks of the same height within a radius of
        `min_distance` were all returned, but this could cause unexpected
        behaviour. From 0.18 onwards, an arbitrary peak within the region is
        returned. See issue gh-2592.

    Parameters
    ----------
    image : ndarray
        Input image.
    min_distance : int, optional
        The minimal allowed distance separating peaks. To find the
        maximum number of peaks, use `min_distance=1`.
    threshold_abs : float, optional
        Minimum intensity of peaks. By default, the absolute threshold is
        the minimum intensity of the image.
    threshold_rel : float, optional
        Minimum intensity of peaks, calculated as `max(image) * threshold_rel`.
    exclude_border : int, tuple of ints, or bool, optional
        If positive integer, `exclude_border` excludes peaks from within
        `exclude_border`-pixels of the border of the image.
        If tuple of non-negative ints, the length of the tuple must match the
        input array's dimensionality.  Each element of the tuple will exclude
        peaks from within `exclude_border`-pixels of the border of the image
        along that dimension.
        If True, takes the `min_distance` parameter as value.
        If zero or False, peaks are identified regardless of their distance
        from the border.
    indices : bool, optional
        If True, the output will be an array representing peak
        coordinates. The coordinates are sorted according to peaks
        values (Larger first). If False, the output will be a boolean
        array shaped as `image.shape` with peaks present at True
        elements. ``indices`` is deprecated and will be removed in
        version 0.20. Default behavior will be to always return peak
        coordinates. You can obtain a mask as shown in the example
        below.
    num_peaks : int, optional
        Maximum number of peaks. When the number of peaks exceeds `num_peaks`,
        return `num_peaks` peaks based on highest peak intensity.
    footprint : ndarray of bools, optional
        If provided, `footprint == 1` represents the local region within which
        to search for peaks at every point in `image`.
    labels : ndarray of ints, optional
        If provided, each unique region `labels == value` represents a unique
        region to search for peaks. Zero is reserved for background.
    num_peaks_per_label : int, optional
        Maximum number of peaks for each label.
    p_norm : float
        Which Minkowski p-norm to use. Should be in the range [1, inf].
        A finite large p may cause a ValueError if overflow can occur.
        ``inf`` corresponds to the Chebyshev distance and 2 to the
        Euclidean distance.

    Returns
    -------
    output : ndarray or ndarray of bools

        * If `indices = True`  : (row, column, ...) coordinates of peaks.
        * If `indices = False` : Boolean array shaped like `image`, with peaks
          represented by True values.

    Notes
    -----
    The peak local maximum function returns the coordinates of local peaks
    (maxima) in an image. Internally, a maximum filter is used for finding local
    maxima. This operation dilates the original image. After comparison of the
    dilated and original image, this function returns the coordinates or a mask
    of the peaks where the dilated image equals the original image.

    See also
    --------
    skimage.feature.corner_peaks

    Examples
    --------
    >>> import cupy as cp
    >>> img1 = cp.zeros((7, 7))
    >>> img1[3, 4] = 1
    >>> img1[3, 2] = 1.5
    >>> img1
    array([[0. , 0. , 0. , 0. , 0. , 0. , 0. ],
           [0. , 0. , 0. , 0. , 0. , 0. , 0. ],
           [0. , 0. , 0. , 0. , 0. , 0. , 0. ],
           [0. , 0. , 1.5, 0. , 1. , 0. , 0. ],
           [0. , 0. , 0. , 0. , 0. , 0. , 0. ],
           [0. , 0. , 0. , 0. , 0. , 0. , 0. ],
           [0. , 0. , 0. , 0. , 0. , 0. , 0. ]])

    >>> peak_local_max(img1, min_distance=1)
    array([[3, 2],
           [3, 4]])

    >>> peak_local_max(img1, min_distance=2)
    array([[3, 2]])

    >>> img2 = cp.zeros((20, 20, 20))
    >>> img2[10, 10, 10] = 1
    >>> img2[15, 15, 15] = 1
    >>> peak_idx = peak_local_max(img2, exclude_border=0)
    >>> peak_idx
    array([[10, 10, 10],
           [15, 15, 15]])

    >>> peak_mask = cp.zeros_like(img2, dtype=bool)
    >>> peak_mask[tuple(peak_idx.T)] = True
    >>> np.argwhere(peak_mask)
    array([[10, 10, 10],
           [15, 15, 15]])

    """
    if (footprint is None or footprint.size == 1) and min_distance < 1:
        warnings.warn(
            "When min_distance < 1, peak_local_max acts as finding "
            "image > max(threshold_abs, threshold_rel * max(image)).",
            RuntimeWarning,
            stacklevel=2,
        )

    border_width = _get_excluded_border_width(image, min_distance,
                                              exclude_border)

    threshold = _get_threshold(image, threshold_abs, threshold_rel)

    if footprint is None:
        size = 2 * min_distance + 1
        footprint = cp.ones((size, ) * image.ndim, dtype=bool)
    else:
        footprint = cp.asarray(footprint)

    if labels is None:
        # Non maximum filter
        mask = _get_peak_mask(image, footprint, threshold)

        mask = _exclude_border(mask, border_width)

        # Select highest intensities (num_peaks)
        coordinates = _get_high_intensity_peaks(image, mask, num_peaks,
                                                min_distance, p_norm)

    else:
        _labels = _exclude_border(labels.astype(int), border_width)

        if np.issubdtype(image.dtype, np.floating):
            bg_val = cp.finfo(image.dtype).min
        else:
            bg_val = cp.iinfo(image.dtype).min

        # For each label, extract a smaller image enclosing the object of
        # interest, identify num_peaks_per_label peaks
        labels_peak_coord = []

        # For each label, extract a smaller image enclosing the object of
        # interest, identify num_peaks_per_label peaks and mark them in
        # variable out.
        # TODO: use GPU version of find_objects
        try:
            objects = cupyx_ndi.find_objects(_labels)
        except AttributeError:
            objects = cpu_find_objects(cp.asnumpy(_labels))

        for label_idx, roi in enumerate(objects):
            if roi is None:
                continue

            # Get roi mask
            label_mask = labels[roi] == label_idx + 1
            # Extract image roi
            img_object = image[roi]
            # Ensure masked values don't affect roi's local peaks
            img_object[np.logical_not(label_mask)] = bg_val

            mask = _get_peak_mask(img_object, footprint, threshold, label_mask)

            coordinates = _get_high_intensity_peaks(img_object, mask,
                                                    num_peaks_per_label,
                                                    min_distance, p_norm)

            # transform coordinates in global image indices space
            for idx, s in enumerate(roi):
                coordinates[:, idx] += s.start

            labels_peak_coord.append(coordinates)

        if labels_peak_coord:
            coordinates = cp.vstack(labels_peak_coord)
        else:
            coordinates = cp.empty((0, 2), dtype=int)

        if len(coordinates) > num_peaks:
            out = cp.zeros_like(image, dtype=np.bool_)
            out[tuple(coordinates.T)] = True
            coordinates = _get_high_intensity_peaks(image, out, num_peaks,
                                                    min_distance, p_norm)

    if indices:
        return coordinates
    else:
        out = cp.zeros_like(image, dtype=np.bool_)
        out[tuple(coordinates.T)] = True
        return out
コード例 #19
0
def minres(A, b, x0=None, shift=0.0, tol=1e-5, maxiter=None,
           M=None, callback=None, check=False):
    """Uses MINimum RESidual iteration to solve  ``Ax = b``.

    Args:
        A (ndarray, spmatrix or LinearOperator): The real or complex matrix of
            the linear system with shape ``(n, n)``.
        b (cupy.ndarray): Right hand side of the linear system with shape
            ``(n,)`` or ``(n, 1)``.
        x0 (cupy.ndarray): Starting guess for the solution.
        shift (int or float): If shift != 0 then the method solves
            ``(A - shift*I)x = b``
        tol (float): Tolerance for convergence.
        maxiter (int): Maximum number of iterations.
        M (ndarray, spmatrix or LinearOperator): Preconditioner for ``A``.
            The preconditioner should approximate the inverse of ``A``.
            ``M`` must be :class:`cupy.ndarray`,
            :class:`cupyx.scipy.sparse.spmatrix` or
            :class:`cupyx.scipy.sparse.linalg.LinearOperator`.
        callback (function): User-specified function to call after each
            iteration. It is called as ``callback(xk)``, where ``xk`` is the
            current solution vector.

    Returns:
        tuple:
            It returns ``x`` (cupy.ndarray) and ``info`` (int) where ``x`` is
            the converged solution and ``info`` provides convergence
            information.

    .. seealso:: :func:`scipy.sparse.linalg.minres`
    """
    A, M, x, b = _make_system(A, M, x0, b)

    matvec = A.matvec
    psolve = M.matvec

    n = b.shape[0]

    if maxiter is None:
        maxiter = n * 5

    istop = 0
    itn = 0
    Anorm = 0
    Acond = 0
    rnorm = 0
    ynorm = 0

    xtype = x.dtype

    eps = cupy.finfo(xtype).eps

    Ax = matvec(x)
    r1 = b - Ax
    y = psolve(r1)

    beta1 = cupy.inner(r1, y)

    if beta1 < 0:
        raise ValueError('indefinite preconditioner')
    elif beta1 == 0:
        return x, 0

    beta1 = cupy.sqrt(beta1)
    beta1 = beta1.get().item()

    if check:
        # see if A is symmetric
        if not _check_symmetric(A, Ax, x, eps):
            raise ValueError('non-symmetric matrix')

        # see if M is symmetric
        if not _check_symmetric(M, y, r1, eps):
            raise ValueError('non-symmetric preconditioner')

    oldb = 0
    beta = beta1
    dbar = 0
    epsln = 0
    qrnorm = beta1
    phibar = beta1
    rhs1 = beta1
    rhs2 = 0
    tnorm2 = 0
    gmax = 0
    gmin = cupy.finfo(xtype).max
    cs = -1
    sn = 0
    w = cupy.zeros(n, dtype=xtype)
    w2 = cupy.zeros(n, dtype=xtype)
    r2 = r1

    while itn < maxiter:

        itn += 1
        s = 1.0 / beta
        v = s * y

        y = matvec(v)
        y -= shift * v

        if itn >= 2:
            y -= (beta / oldb) * r1

        alpha = cupy.inner(v, y)
        alpha = alpha.get().item()
        y -= (alpha / beta) * r2
        r1 = r2
        r2 = y
        y = psolve(r2)
        oldb = beta
        beta = cupy.inner(r2, y)
        beta = beta.get().item()
        beta = numpy.sqrt(beta)
        if beta < 0:
            raise ValueError('non-symmetric matrix')

        tnorm2 += alpha ** 2 + oldb ** 2 + beta ** 2

        if itn == 1:
            if beta / beta1 <= 10 * eps:
                istop = -1

        # Apply previous rotation Qk-1 to get
        #   [deltak epslnk+1] = [cs  sn][dbark    0   ]
        #   [gbar k dbar k+1]   [sn -cs][alfak betak+1].

        oldeps = epsln
        delta = cs * dbar + sn * alpha  # delta1 = 0         deltak
        gbar = sn * dbar - cs * alpha  # gbar 1 = alfa1     gbar k
        epsln = sn * beta  # epsln2 = 0         epslnk+1
        dbar = - cs * beta  # dbar 2 = beta2     dbar k+1
        root = numpy.linalg.norm([gbar, dbar])

        # Compute the next plane rotation Qk

        gamma = numpy.linalg.norm([gbar, beta])  # gammak
        gamma = max(gamma, eps)
        cs = gbar / gamma  # ck
        sn = beta / gamma  # sk
        phi = cs * phibar  # phik
        phibar = sn * phibar  # phibark+1

        # Update  x.

        denom = 1.0 / gamma
        w1 = w2
        w2 = w
        w = (v - oldeps * w1 - delta * w2) * denom
        x += phi * w

        # Go round again.

        gmax = max(gmax, gamma)
        gmin = min(gmin, gamma)
        z = rhs1 / gamma
        rhs1 = rhs2 - delta * z
        rhs2 = - epsln * z

        # Estimate various norms and test for convergence.

        Anorm = numpy.sqrt(tnorm2)
        ynorm = cupy.linalg.norm(x)
        ynorm = ynorm.get().item()
        epsa = Anorm * eps
        epsx = Anorm * ynorm * eps
        diag = gbar

        if diag == 0:
            diag = epsa

        qrnorm = phibar
        rnorm = qrnorm
        if ynorm == 0 or Anorm == 0:
            test1 = numpy.inf
        else:
            test1 = rnorm / (Anorm * ynorm)  # ||r||  / (||A|| ||x||)
        if Anorm == 0:
            test2 = numpy.inf
        else:
            test2 = root / Anorm  # ||Ar|| / (||A|| ||r||)

        # Estimate  cond(A).
        # In this version we look at the diagonals of  R  in the
        # factorization of the lower Hessenberg matrix,  Q * H = R,
        # where H is the tridiagonal matrix from Lanczos with one
        # extra row, beta(k+1) e_k^T.

        Acond = gmax / gmin

        # See if any of the stopping criteria are satisfied.
        # In rare cases, istop is already -1 from above (Abar = const*I).

        if istop == 0:
            t1 = 1 + test1  # These tests work if tol < eps
            t2 = 1 + test2
            if t2 <= 1:
                istop = 2
            if t1 <= 1:
                istop = 1

            if itn >= maxiter:
                istop = 6
            if Acond >= 0.1 / eps:
                istop = 4
            if epsx >= beta1:
                istop = 3
            # epsr = Anorm * ynorm * tol
            # if rnorm <= epsx   : istop = 2
            # if rnorm <= epsr   : istop = 1
            if test2 <= tol:
                istop = 2
            if test1 <= tol:
                istop = 1

        if callback is not None:
            callback(x)

        if istop != 0:
            break

    if istop == 6:
        info = maxiter
    else:
        info = 0

    return x, info
コード例 #20
0
try:
    import cupy as xp
    gpu = True
except ImportError:
    import numpy as xp
    gpu = False
from random import shuffle, seed
from function_utils import distance_function, top_k
from scorer import scorer

seed(777)
eps = xp.finfo(float).eps


class bilingual_space:
    def __init__(self, src_space, tgt_space):
        self.src_space = src_space
        self.tgt_space = tgt_space
        self.src_size = src_space.shape[0]
        self.tgt_size = tgt_space.shape[0]

    def learn_mapping(self, seeds, loss_type, **kwargs):
        """
            Learn the mapping function. The objective is l2 loss or
            hinge loss. Orthogonal constraint is optional.
            
            -Input:
            seeds: A list of two lists. seeds[0][i] and seeds[1][i] specifies
                   a seeding pair

            loss_type: 'l2' or 'hinge'
コード例 #21
0
                *,
                rtol: Optional[Union[float, Array]] = None) -> Array:
    """
    Array API compatible wrapper for :py:func:`np.matrix_rank <numpy.matrix_rank>`.

    See its docstring for more information.
    """
    # Note: this is different from np.linalg.matrix_rank, which supports 1
    # dimensional arrays.
    if x.ndim < 2:
        raise np.linalg.LinAlgError(
            "1-dimensional array given. Array must be at least two-dimensional"
        )
    S = np.linalg.svd(x._array, compute_uv=False)
    if rtol is None:
        tol = S.max(axis=-1, keepdims=True) * max(x.shape[-2:]) * np.finfo(
            S.dtype).eps
    else:
        if isinstance(rtol, Array):
            rtol = rtol._array
        # Note: this is different from np.linalg.matrix_rank, which does not multiply
        # the tolerance by the largest singular value.
        tol = S.max(axis=-1, keepdims=True) * np.asarray(rtol)[..., np.newaxis]
    return Array._new(np.count_nonzero(S > tol, axis=-1))


# Note: this function is new in the array API spec. Unlike transpose, it only
# transposes the last two axes.
def matrix_transpose(x: Array, /) -> Array:
    if x.ndim < 2:
        raise ValueError(
            "x must be at least 2-dimensional for matrix_transpose")
コード例 #22
0
ファイル: template.py プロジェクト: mritools/cupyimg
def match_template(image,
                   template,
                   pad_input=False,
                   mode="constant",
                   constant_values=0):
    """Match a template to a 2-D or 3-D image using normalized correlation.

    The output is an array with values between -1.0 and 1.0. The value at a
    given position corresponds to the correlation coefficient between the image
    and the template.

    For `pad_input=True` matches correspond to the center and otherwise to the
    top-left corner of the template. To find the best match you must search for
    peaks in the response (output) image.

    Parameters
    ----------
    image : (M, N[, D]) array
        2-D or 3-D input image.
    template : (m, n[, d]) array
        Template to locate. It must be `(m <= M, n <= N[, d <= D])`.
    pad_input : bool
        If True, pad `image` so that output is the same size as the image, and
        output values correspond to the template center. Otherwise, the output
        is an array with shape `(M - m + 1, N - n + 1)` for an `(M, N)` image
        and an `(m, n)` template, and matches correspond to origin
        (top-left corner) of the template.
    mode : see `numpy.pad`, optional
        Padding mode.
    constant_values : see `numpy.pad`, optional
        Constant values used in conjunction with ``mode='constant'``.

    Returns
    -------
    output : array
        Response image with correlation coefficients.

    Notes
    -----
    Details on the cross-correlation are presented in [1]_. This implementation
    uses FFT convolutions of the image and the template. Reference [2]_
    presents similar derivations but the approximation presented in this
    reference is not used in our implementation.

    This CuPy implementation does not force the image to float64 internally,
    but will use float32 for single-precision inputs.

    References
    ----------
    .. [1] J. P. Lewis, "Fast Normalized Cross-Correlation", Industrial Light
           and Magic.
    .. [2] Briechle and Hanebeck, "Template Matching using Fast Normalized
           Cross Correlation", Proceedings of the SPIE (2001).
           :DOI:`10.1117/12.421129`

    Examples
    --------
    >>> import cupy as cp
    >>> template = cp.zeros((3, 3))
    >>> template[1, 1] = 1
    >>> template
    array([[ 0.,  0.,  0.],
           [ 0.,  1.,  0.],
           [ 0.,  0.,  0.]])
    >>> image = cp.zeros((6, 6))
    >>> image[1, 1] = 1
    >>> image[4, 4] = -1
    >>> image
    array([[ 0.,  0.,  0.,  0.,  0.,  0.],
           [ 0.,  1.,  0.,  0.,  0.,  0.],
           [ 0.,  0.,  0.,  0.,  0.,  0.],
           [ 0.,  0.,  0.,  0.,  0.,  0.],
           [ 0.,  0.,  0.,  0., -1.,  0.],
           [ 0.,  0.,  0.,  0.,  0.,  0.]])
    >>> result = match_template(image, template)
    >>> cp.round(result, 3)
    array([[ 1.   , -0.125,  0.   ,  0.   ],
           [-0.125, -0.125,  0.   ,  0.   ],
           [ 0.   ,  0.   ,  0.125,  0.125],
           [ 0.   ,  0.   ,  0.125, -1.   ]])
    >>> result = match_template(image, template, pad_input=True)
    >>> cp.round(result, 3)
    array([[-0.125, -0.125, -0.125,  0.   ,  0.   ,  0.   ],
           [-0.125,  1.   , -0.125,  0.   ,  0.   ,  0.   ],
           [-0.125, -0.125, -0.125,  0.   ,  0.   ,  0.   ],
           [ 0.   ,  0.   ,  0.   ,  0.125,  0.125,  0.125],
           [ 0.   ,  0.   ,  0.   ,  0.125, -1.   ,  0.125],
           [ 0.   ,  0.   ,  0.   ,  0.125,  0.125,  0.125]])
    """
    check_nD(image, (2, 3))

    if image.ndim < template.ndim:
        raise ValueError("Dimensionality of template must be less than or "
                         "equal to the dimensionality of image.")
    if any(si < st for si, st in zip(image.shape, template.shape)):
        raise ValueError("Image must be larger than template.")

    image_shape = image.shape

    float_dtype = cp.promote_types(image.dtype, cp.float32)
    image = cp.asarray(image, dtype=float_dtype)
    template = cp.asarray(template, dtype=float_dtype)

    pad_width = tuple((width, width) for width in template.shape)
    if mode == "constant":
        image = cp.pad(
            image,
            pad_width=pad_width,
            mode=mode,
            constant_values=constant_values,
        )
    else:
        image = cp.pad(image, pad_width=pad_width, mode=mode)

    # Use special case for 2-D images for much better performance in
    # computation of integral images
    if image.ndim == 2:
        image_window_sum = _window_sum_2d(image, template.shape)
        image_window_sum2 = _window_sum_2d(image * image, template.shape)
    elif image.ndim == 3:
        image_window_sum = _window_sum_3d(image, template.shape)
        image_window_sum2 = _window_sum_3d(image * image, template.shape)

    template_mean = template.mean()
    template_volume = _prod(template.shape)
    template_ssd = template - template_mean
    template_ssd *= template_ssd
    template_ssd = cp.sum(template_ssd)

    if image.ndim == 2:
        xcorr = fftconvolve(image, template[::-1, ::-1], mode="valid")[1:-1,
                                                                       1:-1]
    elif image.ndim == 3:
        xcorr = fftconvolve(image, template[::-1, ::-1, ::-1],
                            mode="valid")[1:-1, 1:-1, 1:-1]

    numerator = xcorr - image_window_sum * template_mean

    denominator = image_window_sum2
    cp.multiply(image_window_sum, image_window_sum, out=image_window_sum)
    cp.divide(image_window_sum, template_volume, out=image_window_sum)
    denominator -= image_window_sum
    denominator *= template_ssd
    cp.maximum(denominator, 0,
               out=denominator)  # sqrt of negative number not allowed
    cp.sqrt(denominator, out=denominator)

    response = cp.zeros_like(xcorr, dtype=np.float64)

    # avoid zero-division
    mask = denominator > cp.finfo(np.float64).eps

    response[mask] = numerator[mask] / denominator[mask]

    slices = []
    for i in range(template.ndim):
        if pad_input:
            d0 = (template.shape[i] - 1) // 2
            d1 = d0 + image_shape[i]
        else:
            d0 = template.shape[i] - 1
            d1 = d0 + image_shape[i] - template.shape[i] + 1
        slices.append(slice(d0, d1))

    return response[tuple(slices)]
コード例 #23
0
def mat_mwu_gpu(a_mat, b_mat, melt: bool, effect: str, use_continuity=True):
    """
    Compute rank-biserial correlations and Mann-Whitney statistics
    between every column-column pair of a_mat (continuous) and b_mat (binary).

    In the case that a_mat or b_mat has a single column, the results are
    re-formatted with the multiple hypothesis-adjusted q-value also returned.

    Parameters
    ----------
    a_mat: Pandas DataFrame
        Continuous set of observations, with rows as samples and columns
        as labels.
    b_mat: Pandas DataFrame
        Binary set of observations, with rows as samples and columns as labels.
        Required to be castable to boolean datatype.
    melt: boolean
        Whether or not to melt the outputs into columns.
    use_continuity: bool
        Whether or not to use a continuity correction. True by default.
    effect: "mean", "median", or "rank_biserial"
        The effect statistic.

    Returns
    -------
    effects: rank-biserial correlations
    pvals: -log10 p-values of correlations
    """

    if effect not in ["rank_biserial"]:

        raise ValueError("effect must be 'rank_biserial'")

    a_nan = a_mat.isna().sum().sum() == 0
    b_nan = b_mat.isna().sum().sum() == 0

    if not a_nan and not b_nan:

        raise ValueError("a_mat and b_mat cannot have missing values")

    a_mat, b_mat = precheck_align(a_mat, b_mat, np.float64, np.bool)

    a_names = a_mat.columns
    b_names = b_mat.columns

    a_ranks = a_mat.apply(rankdata)
    a_ties = a_ranks.apply(tiecorrect)

    a_ranks = cp.array(a_ranks)

    a_mat, b_mat = cp.array(a_mat), cp.array(b_mat)
    b_mat = b_mat.astype(cp.bool)

    a_num_cols = a_mat.shape[1]  # number of variables in A
    b_num_cols = b_mat.shape[1]  # number of variables in B

    a_mat = cp.array(a_mat).astype(cp.float64)
    b_pos = b_mat.astype(cp.float64)
    b_neg = (~b_mat).astype(cp.float64)

    pos_ns = b_pos.sum(axis=0)
    neg_ns = b_neg.sum(axis=0)

    pos_ns = cp.vstack([pos_ns] * a_num_cols)
    neg_ns = cp.vstack([neg_ns] * a_num_cols)

    pos_ranks = cp.dot(a_ranks.T, b_pos)

    u1 = pos_ns * neg_ns + (pos_ns * (pos_ns + 1)) / 2.0 - pos_ranks
    u2 = pos_ns * neg_ns - u1

    # temporarily mask zeros
    n_prod = pos_ns * neg_ns
    zero_prod = n_prod == 0
    n_prod[zero_prod] = 1

    effects = 2 * u2 / (pos_ns * neg_ns) - 1

    # set zeros to nan
    effects[zero_prod] = 0

    a_ties = cp.vstack([cp.array(a_ties)] * b_num_cols).T

    #     if T == 0:
    #         raise ValueError('All numbers are identical in mannwhitneyu')

    sd = cp.sqrt(a_ties * pos_ns * neg_ns * (pos_ns + neg_ns + 1) / 12.0)

    meanrank = pos_ns * neg_ns / 2.0 + 0.5 * use_continuity
    bigu = cp.maximum(u1, u2)

    # temporarily mask zeros
    sd_0 = sd == 0
    sd[sd_0] = 1

    z = (bigu - meanrank) / sd

    z[sd_0] = 0

    # compute p values
    pvals = 2 * (1 - ndtr(cp.abs(z)))

    # account for small p-values rounding to 0
    pvals[pvals == 0] = cp.finfo(cp.float64).tiny

    pvals = -cp.log10(pvals)

    pvals = pd.DataFrame(pvals, columns=b_names, index=a_names)
    effects = pd.DataFrame(effects, columns=b_names, index=a_names)
    pos_ns = pd.DataFrame(pos_ns, columns=b_names, index=a_names)
    neg_ns = pd.DataFrame(neg_ns, columns=b_names, index=a_names)

    effects = effects.fillna(0)
    pvals = pvals.fillna(1)

    if melt:

        return melt_mwu(effects, pvals, pos_ns, neg_ns, effect)

    return effects, pvals
コード例 #24
0
ファイル: _routines_poly.py プロジェクト: the-lay/cupy
def polyfit(x, y, deg, rcond=None, full=False, w=None, cov=False):
    """Returns the least squares fit of polynomial of degree deg
    to the data y sampled at x.

    Args:
        x (cupy.ndarray): x-coordinates of the sample points of shape (M,).
        y (cupy.ndarray): y-coordinates of the sample points of shape
            (M,) or (M, K).
        deg (int): degree of the fitting polynomial.
        rcond (float, optional): relative condition number of the fit.
            The default value is ``len(x) * eps``.
        full (bool, optional): indicator of the return value nature.
            When False (default), only the coefficients are returned.
            When True, diagnostic information is also returned.
        w (cupy.ndarray, optional): weights applied to the y-coordinates
            of the sample points of shape (M,).
        cov (bool or str, optional): if given, returns the coefficients
            along with the covariance matrix.

    Returns:
        cupy.ndarray: of shape (deg + 1,) or (deg + 1, K).
            Polynomial coefficients from highest to lowest degree
        tuple (cupy.ndarray, int, cupy.ndarray, float):
            Present only if ``full=True``.
            Sum of squared residuals of the least-squares fit,
            rank of the scaled Vandermonde coefficient matrix,
            its singular values, and the specified value of ``rcond``.
        cupy.ndarray: of shape (M, M) or (M, M, K).
            Present only if ``full=False`` and ``cov=True``.
            The covariance matrix of the polynomial coefficient estimates.

    .. warning::

        numpy.RankWarning: The rank of the coefficient matrix in the
        least-squares fit is deficient. It is raised if ``full=False``.

    .. seealso:: :func:`numpy.polyfit`

    """
    if x.dtype.char == 'e' and y.dtype.kind == 'b':
        raise NotImplementedError('float16 x and bool y are not'
                                  ' currently supported')
    if y.dtype == numpy.float16:
        raise TypeError('float16 y are not supported')

    x = _polyfit_typecast(x)
    y = _polyfit_typecast(y)
    deg = int(deg)

    if deg < 0:
        raise ValueError('expected deg >= 0')
    if x.ndim != 1:
        raise TypeError('expected 1D vector for x')
    if x.size == 0:
        raise TypeError('expected non-empty vector for x')
    if y.ndim < 1 or y.ndim > 2:
        raise TypeError('expected 1D or 2D array for y')
    if x.size != y.shape[0]:
        raise TypeError('expected x and y to have same length')

    lhs = cupy.polynomial.polynomial.polyvander(x, deg)[:, ::-1]
    rhs = y

    if w is not None:
        w = _polyfit_typecast(w)
        if w.ndim != 1:
            raise TypeError('expected a 1-d array for weights')
        if w.size != x.size:
            raise TypeError('expected w and y to have the same length')

        lhs *= w[:, None]
        if rhs.ndim == 2:
            w = w[:, None]
        rhs *= w

    if rcond is None:
        rcond = x.size * cupy.finfo(x.dtype).eps

    scale = cupy.sqrt((cupy.square(lhs)).sum(axis=0))
    lhs /= scale
    c, resids, rank, s = cupy.linalg.lstsq(lhs, rhs, rcond)
    if y.ndim > 1:
        scale = scale.reshape(-1, 1)
    c /= scale

    order = deg + 1
    if rank != order and not full:
        msg = 'Polyfit may be poorly conditioned'
        warnings.warn(msg, numpy.RankWarning, stacklevel=4)

    if full:
        if resids.dtype.kind == 'c':
            resids = cupy.absolute(resids)
        return c, resids, rank, s, rcond
    if cov:
        base = cupy.linalg.inv(cupy.dot(lhs.T, lhs))
        base /= cupy.outer(scale, scale)

        if cov == 'unscaled':
            factor = 1
        elif x.size > order:
            factor = resids / (x.size - order)
        else:
            raise ValueError('the number of data points must exceed order'
                             ' to scale the covariance matrix')

        if y.ndim != 1:
            base = base[..., None]
        return c, base * factor

    return c
コード例 #25
0
 def net(self,par,X):
     y_linear = cp.dot(X, par['weights'])+par['bias']
     y_linear=cp.minimum(y_linear,-cp.log(cp.finfo(float).eps))
     y_linear=cp.maximum(y_linear,-cp.log(1./cp.finfo(float).tiny-1.0))
     yhat = self.softmax(y_linear)
     return yhat