def testEstimateAnomalyLikelihoods(self):
    """
    This calls estimateAnomalyLikelihoods to estimate the distribution on fake
    data and validates the results
    """

    # Generate an estimate using fake distribution of anomaly scores.
    data1 = _generateSampleData(mean=0.2)

    likelihoods, avgRecordList, estimatorParams = (
      an.estimateAnomalyLikelihoods(data1[0:1000])
    )
    self.assertEqual(len(likelihoods), 1000)
    self.assertEqual(len(avgRecordList), 1000)
    self.assertTrue(an.isValidEstimatorParams(estimatorParams))

    # Check that the sum is correct
    avgParams = estimatorParams["movingAverage"]
    total = 0
    for v in avgRecordList:
      total = total + v[2]
    self.assertTrue(avgParams["total"], total)

    # Check that the estimated mean is correct
    dParams = estimatorParams["distribution"]
    self.assertWithinEpsilon(dParams["mean"],
                             total / float(len(avgRecordList)))

    # Number of points with lower than 2% probability should be pretty low
    # but not zero. Can't use exact 2% here due to random variations
    self.assertLessEqual(cupy.sum(likelihoods < 0.02), 50)
    self.assertGreaterEqual(cupy.sum(likelihoods < 0.02), 1)
Exemple #2
0
 def test_22(self):
     N = 32
     M = 4
     Nd = 8
     D = cp.random.randn(Nd, Nd, M)
     D /= cp.sqrt(cp.sum(D**2, axis=(0, 1)))
     X0 = cp.zeros((N, N, M))
     xr = cp.random.randn(N, N, M)
     xp = cp.abs(xr) > 3
     X0[xp] = cp.random.randn(X0[xp].size)
     S = cp.sum(sl.fftconv(D, X0), axis=2)
     lmbda = 1e-3
     opt = cbpdn.ConvBPDN.Options(
         {'Verbose': False, 'MaxMainIter': 500, 'RelStopTol': 1e-5,
          'rho': 5e-1, 'AutoRho': {'Enabled': False}})
     bp = cbpdn.ConvBPDN(D, S, lmbda, opt)
     Xp = bp.solve()
     epsilon = cp.linalg.norm(bp.reconstruct(Xp).squeeze() - S)
     opt = cbpdn.ConvMinL1InL2Ball.Options(
         {'Verbose': False, 'MaxMainIter': 500, 'RelStopTol': 1e-5,
          'rho': 2e2, 'RelaxParam': 1.0, 'AutoRho': {'Enabled': False}})
     bc = cbpdn.ConvMinL1InL2Ball(D, S, epsilon=epsilon, opt=opt)
     Xc = bc.solve()
     assert cp.linalg.norm(Xp - Xc) / cp.linalg.norm(Xp) < 1e-3
     assert(cp.abs(cp.linalg.norm(Xp.ravel(), 1) -
                   cp.linalg.norm(Xc.ravel(), 1)) < 1e-3)
Exemple #3
0
def var(a, axis=None, dtype=None, out=None, ddof=0, keepdims=False):
    """Returns the variance along an axis.

    Args:
        a (cupy.ndarray): Array to compute variance.
        axis (int): Along which axis to compute variance. The flattened array
            is used by default.
        dtype: Data type specifier.
        out (cupy.ndarray): Output array.
        keepdims (bool): If True, the axis is remained as an axis of size one.

    Returns:
        cupy.ndarray: The variance of the input array along the axis.

    .. seealso:: :func:`numpy.var`

    """
    if axis is None:
        axis = tuple(range(a.ndim))
    if not isinstance(axis, tuple):
        axis = (axis,)

    if dtype is None and issubclass(a.dtype.type,
                                    (numpy.integer, numpy.bool_)):
        dtype = numpy.dtype(numpy.float64)

    arrmean = mean(a, axis=axis, dtype=dtype, keepdims=True)

    x = cupy.subtract(a, arrmean, dtype=dtype)
    cupy.square(x, x)
    ret = cupy.sum(x, axis=axis, dtype=dtype, out=out, keepdims=keepdims)
    rcount = max(_count_reduce_items(a, axis) - ddof, 0)
    return cupy.multiply(ret, ret.dtype.type(1.0 / rcount), out=ret)
Exemple #4
0
def entropy(pk, qk=None, base=None, axis=0):
    """Calculate the entropy of a distribution for given probability values.

    If only probabilities ``pk`` are given, the entropy is calculated as
    ``S = -sum(pk * log(pk), axis=axis)``.

    If ``qk`` is not None, then compute the Kullback-Leibler divergence
    ``S = sum(pk * log(pk / qk), axis=axis)``.

    This routine will normalize ``pk`` and ``qk`` if they don't sum to 1.

    Args:
        pk (ndarray): Defines the (discrete) distribution. ``pk[i]`` is the
            (possibly unnormalized) probability of event ``i``.
        qk (ndarray, optional): Sequence against which the relative entropy is
            computed. Should be in the same format as ``pk``.
        base (float, optional): The logarithmic base to use, defaults to ``e``
            (natural logarithm).
        axis (int, optional): The axis along which the entropy is calculated.
            Default is 0.

    Returns:
        S (cupy.ndarray): The calculated entropy.

    """
    if pk.dtype.kind == 'c' or qk is not None and qk.dtype.kind == 'c':
        raise TypeError("complex dtype not supported")

    float_type = cupy.float32 if pk.dtype.char in 'ef' else cupy.float64
    pk = pk.astype(float_type, copy=False)
    pk = _normalize(pk, axis)
    if qk is None:
        vec = special.entr(pk)
    else:
        if qk.shape != pk.shape:
            raise ValueError("qk and pk must have same shape.")
        qk = qk.astype(float_type, copy=False)
        qk = _normalize(qk, axis)
        vec = special.rel_entr(pk, qk)
    s = cupy.sum(vec, axis=axis)
    if base is not None:
        s /= math.log(base)
    return s
Exemple #5
0
def get_spots(specmin, nspec, wavelengths, psfdata):
    '''Calculate PSF spots for the specified spectra and wavelengths

    Args:
        specmin: first spectrum to include
        nspec: number of spectra to evaluate spots for
        wavelengths: 1D array of wavelengths
        psfdata: PSF data from io.read_psf() of Gauss Hermite PSF file

    Returns:
        spots: 4D array[ispec, iwave, ny, nx] of PSF spots
        corners: (xc,yc) where each is 2D array[ispec,iwave] lower left corner of spot

    '''
    nwave = len(wavelengths)
    p = evalcoeffs(psfdata, wavelengths, specmin, nspec)
    nx = 2 * p['HSIZEX'] + 1
    ny = 2 * p['HSIZEY'] + 1
    spots = cp.zeros((nspec, nwave, ny, nx))
    #use mark's numblocks and blocksize method
    blocksize = 256
    numblocks = (nwave + blocksize - 1) // blocksize
    for ispec in range(nspec):
        pGHx, pGHy = calc_pgh(ispec, wavelengths, p)
        ghc = cp.asarray(p['GH'][:, :, ispec, :])
        mspots = cp.zeros((nwave, ny, nx))  #empty every time!
        _multispot[numblocks, blocksize](pGHx, pGHy, ghc, mspots)
        spots[ispec] = mspots

    #- ensure positivity and normalize
    #- TODO: should this be within multispot itself?
    spots = spots.clip(0.0)
    norm = cp.sum(spots,
                  axis=(2, 3))  #- norm[nspec, nwave] = sum over each spot
    spots = (spots.T /
             norm.T).T  #- transpose magic for numpy array broadcasting

    #- Define corners of spots
    #- extra 0.5 is because X and Y are relative to center of pixel not edge
    xc = np.floor(p['X'] - p['HSIZEX'] + 0.5).astype(int)
    yc = np.floor(p['Y'] - p['HSIZEY'] + 0.5).astype(int)

    return spots, (xc, yc)
def softmax_loss_vectorized(W, X, y, reg):
    """
    Softmax loss function, vectorized version.

    Inputs and outputs are the same as softmax_loss_naive.
    """
    # Initialize the loss and gradient to zero.
    loss = 0.0
    dW = np.zeros_like(W)
    num_train = X.shape[0]

    #cupy
    W = cp.array(W)
    dW = cp.array(dW)
    X = cp.array(X)
    y = cp.array(y)

    #############################################################################
    # TODO: Compute the softmax loss and its gradient using no explicit loops.  #
    # Store the loss in loss and the gradient in dW. If you are not careful     #
    # here, it is easy to run into numeric instability. Don't forget the        #
    # regularization!                                                           #
    #############################################################################
    # *****START OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****

    scores = cp.dot(X, W)
    scores_max = scores.max(axis=1)[:, cp.newaxis]
    scores -= scores_max  #broadcast -
    scores = cp.exp(scores)
    total_scores = scores.sum(axis=1)[:, cp.newaxis]  #broadcast /
    stable_scores = scores / total_scores
    # stable_scores = cp.asnumpy(stable_scores)
    loss = cp.mean(-cp.log(stable_scores[cp.arange(num_train), y]))
    loss += reg * cp.sum(W * W)

    stable_scores[cp.arange(num_train), y] -= 1
    dW = cp.dot(cp.transpose(X), stable_scores)
    dW /= num_train
    dW += reg * 2 * W

    # *****END OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****

    return cp.asnumpy(loss), cp.asnumpy(dW)
Exemple #7
0
    def sense_at_angle(angle):
        sense_angle = SlimeWorld.slime_angle + angle
        sense_dir = (cp.array([cp.sin(sense_angle),
                               cp.cos(sense_angle)]).T * SlimeWorld.sense_dist)
        sense_pos = SlimeWorld.slime_pos + sense_dir
        SlimeWorld.clip(sense_pos)
        int_sense_pos = sense_pos.astype(int)
        # return SlimeWorld.cells[int_sense_pos[:, 0], int_sense_pos[:, 1], 0]

        # return cp.sum(
        #     SlimeWorld.cells[int_sense_pos[:, 0], int_sense_pos[:, 1], :], axis=-1
        # )

        return -cp.sum(
            cp.abs(SlimeWorld.cells[int_sense_pos[:, 0], int_sense_pos[:,
                                                                       1], :] -
                   SlimeWorld.slime_color),
            axis=-1,
        )
Exemple #8
0
    def test(self, x_test, y_test, batch_size):
        start = time.time()
        y_hat = np.zeros_like(y_test)
        for idx in range(int(x_test.shape[0]/batch_size)):
            x_test_batch = x_test.take(indices = range(idx, min(idx+batch_size, x_test.shape[0])), axis=0)
            y_test_batch = y_test.take(indices=range(idx, min(idx + batch_size, y_test.shape[0])), axis=0)

            y_hat_batch = self.forward(x_test_batch)
            loss_batch = y_hat_batch - y_test_batch

            y_hat[idx*batch_size : idx*batch_size + y_hat_batch.shape[0], :] = y_hat_batch

        self.test_accuracy = list(np.argmax(y_hat, axis=1) == np.argmax(y_test, axis=1) ).count(True) / y_hat.shape[0]
        self.test_loss = (-np.sum(y_test * np.log(np.clip(y_hat, 1e-20, 1.))) / y_hat.shape[0]).tolist()

        h, r = divmod(time.time() - start, 3600)
        m, s = divmod(r, 60)
        time_per_epoch = "{:0>2}:{:0>2}:{:05.2f}".format(int(h), int(m), s)
        print("test loss: {:.5f} | test accuracy: {:.5f} | time: {}".format(self.test_loss, self.test_accuracy, time_per_epoch))
Exemple #9
0
    def predict_log_proba(self, X) -> CumlArray:
        """
        Return log-probability estimates for the test vector X.

        """
        if has_scipy():
            from scipy.sparse import isspmatrix as scipy_sparse_isspmatrix
        else:
            from cuml.common.import_utils import dummy_function_always_false \
                as scipy_sparse_isspmatrix

        # todo: use a sparse CumlArray style approach when ready
        # https://github.com/rapidsai/cuml/issues/2216
        if scipy_sparse_isspmatrix(X) or cupyx.scipy.sparse.isspmatrix(X):
            X = X.tocoo()
            rows = cp.asarray(X.row, dtype=X.row.dtype)
            cols = cp.asarray(X.col, dtype=X.col.dtype)
            data = cp.asarray(X.data, dtype=X.data.dtype)
            X = cupyx.scipy.sparse.coo_matrix((data, (rows, cols)),
                                              shape=X.shape)
        else:
            X = input_to_cupy_array(X, order='K').array

        jll = self._joint_log_likelihood(X)

        # normalize by P(X) = P(f_1, ..., f_n)

        # Compute log(sum(exp()))

        # Subtract max in exp to prevent inf
        a_max = cp.amax(jll, axis=1, keepdims=True)

        exp = cp.exp(jll - a_max)
        logsumexp = cp.log(cp.sum(exp, axis=1))

        a_max = cp.squeeze(a_max, axis=1)

        log_prob_x = a_max + logsumexp

        if log_prob_x.ndim < 2:
            log_prob_x = log_prob_x.reshape((1, log_prob_x.shape[0]))
        result = jll - log_prob_x.T
        return result
Exemple #10
0
def check_matmul_grad():
    seq_length = 2
    batchsize = 3
    feature_dimension = 4
    X = chainer.Variable(
        np.arange(0, batchsize * feature_dimension * seq_length).astype(
            np.float32).reshape((batchsize, feature_dimension, seq_length)))
    W = chainer.Variable(
        np.arange(0, feature_dimension**2 * 3).astype(np.float32).reshape(
            (feature_dimension * 3, feature_dimension)))

    U = functions.connection.convolution_2d.convolution_2d(
        X[:, :, None, :], W[..., None, None])[:, :, 0]
    _U = np.matmul(W.data, X.data)
    loss = functions.sum(U)
    loss.backward()
    print(W.data)
    print(X.grad)
    print(xp.sum(W.data, axis=0))
Exemple #11
0
    def backward(self, dout, optimizer=None):
        NF, CF, HF, WF = self.W.shape

        db = cp.sum(dout, axis=(0, 2, 3))
        db = db.reshape(NF, -1)

        dout_reshaped = dout.transpose(1, 2, 3, 0).reshape(NF, -1)
        dW = dout_reshaped @ self.X_col.T
        dW = dW.reshape(self.W.shape)
        
        if optimizer is not None:
            self.b = optimizer(self.b, db)
            self.W = optimizer(self.W, dW)

        W_reshape = self.W.reshape(NF, -1)
        dX_col = W_reshape.T @ dout_reshaped
        dX = self.col2im_indices(dX_col.astype(cp.float32))

        return dX
Exemple #12
0
def singleBackward(da_curr, w_curr, b_curr, z_curr, a_prev, activation=relu):
    m = a_prev.shape[1]

    # hax. change later
    if activation.__name__ == "relu":
        backact = reluBackward
    elif activation.__name__ == "sigmoid":
        backact = sigmoidBackward
    else:
        backact = reluBackward

    dzcurr = backact(da_curr, z_curr)
    if usegpu == True:
        a_prev = np.asarray(a_prev)
    dwcurr = np.dot(dzcurr, a_prev.T) / m
    dbcurr = np.sum(dzcurr, axis=1, keepdims=True) / m
    daprev = np.dot(w_curr.T, dzcurr)

    return daprev, dwcurr, dbcurr
Exemple #13
0
 def test_01_02_circle_with_noise(self):
     """Test that the Canny filter finds the circle outlines
     in a noisy image"""
     cp.random.seed(0)
     i, j = cp.mgrid[-200:200, -200:200].astype(float) / 200
     c = cp.abs(cp.sqrt(i * i + j * j) - 0.5) < 0.02
     cf = c.astype(float) * 0.5 + cp.random.uniform(size=c.shape) * 0.5
     result = feature.canny(cf, 4, 0.1, 0.2, cp.ones(c.shape, bool))
     #
     # erode and dilate the circle to get rings that should contain the
     # outlines
     #
     cd = binary_dilation(c, iterations=4, brute_force=True)
     ce = binary_erosion(c, iterations=4, brute_force=True)
     cde = cp.logical_and(cd, cp.logical_not(ce))
     self.assertTrue(cp.all(cde[result]))
     point_count = cp.sum(result)
     self.assertTrue(point_count > 1200)
     self.assertTrue(point_count < 1600)
Exemple #14
0
def accuracy(y_pred, y_true):
    """
    Calculate accuracy of one-hot encoded output.
    
    Parameters
    ----------
    y_pred : np.array
        One-hot encoded predicted labels.
    y_true : np.array
        One-hot encoded true labels.
    """

    # Calculate labels.
    labels_true = y_true.argmax(axis=1)
    labels_pred = y_pred.argmax(axis=1)

    # Calculate accuracy.
    n_good = cp.sum(labels_true - labels_pred == 0)
    return n_good / len(y_true)
Exemple #15
0
def cel(y_pred, y_true):
    """
    Perform cross-entropy loss function.

    Parameters
    ----------
    y_true : cp.array of floats, shape (number of examples, number of classes)
        True labels, one hot encoded.
    y_pred : cp.array of floats, shape (number of examples, number of classes)
        Predicted labels, one hot encoded.

    Returns
    -------
    cp.array of floats, shape (number of examples)
       Loss values per example.
    """
    
    epsilon = 1e-16
    return -1. * cp.sum(cp.multiply(y_true, cp.log(cp.maximum(y_pred, epsilon))), axis=1)
Exemple #16
0
    def noise_log_likelihood(self):
        """ Calculates the real part of noise log-likelihood

        Returns
        -------
        float: The real part of the noise log likelihood

        """
        if np.isnan(self._noise_log_l):
            log_l = 0
            for interferometer in self.interferometers:
                name = interferometer.name
                log_l -= (
                    2.0
                    / self.duration
                    * xp.sum(xp.abs(self.strain[name]) ** 2 / self.psds[name])
                )
            self._noise_log_l = float(log_l)
        return self._noise_log_l
Exemple #17
0
def auc(vector_predict, vector_true, gpu=False):
    if gpu:
        vector_predict = cp.array(vector_predict)
        vector_true = cp.array(vector_true)
        pos_indexes = cp.where(vector_true == 1)[0]
        sort_indexes = cp.argsort(vector_predict)
        rank = cp.nonzero(cp.in1d(sort_indexes, pos_indexes))[0]
        return (cp.sum(rank) - len(pos_indexes) *
                (len(pos_indexes) + 1) / 2) / (
                    len(pos_indexes) *
                    (len(vector_predict) - len(pos_indexes)))
    else:
        pos_indexes = np.where(vector_true == 1)[0]
        sort_indexes = np.argsort(vector_predict)
        rank = np.nonzero(np.in1d(sort_indexes, pos_indexes))[0]
        return (np.sum(rank) - len(pos_indexes) *
                (len(pos_indexes) + 1) / 2) / (
                    len(pos_indexes) *
                    (len(vector_predict) - len(pos_indexes)))
Exemple #18
0
def scd_fam(x, Np, L, N=None):   #  x = 1024 Np = 256 L = 1
    def sliding_window(x, w, s):
        shape = (x.shape[0], ((x.shape[1] - w) // s + 1), w)
        strides = (x.strides[0], x.strides[1]*s, x.strides[1])
        return as_strided(x, shape, strides)

    # input channelization
    bs = x.shape[0]
    xs = sliding_window(x, Np, L)
    Pe = int(cp.floor(int(cp.log(xs.shape[1])/cp.log(2))))
    P = 2**Pe
    N = L*P
    xs2 = xs[:,0:P,:]
    # windowing
    w = cp.hamming(Np)
    w /= cp.sqrt(cp.sum(w**2))
    xw = xs2 * cp.tile(w, (P,1))
    XF1 = cp.fft.fft(xw, axis=-1)
    XF1 = cp.fft.fftshift(XF1, axes=-1)
    # calculating complex demodulates
    f = cp.arange(Np)/float(Np) - .5
    t = cp.arange(P)*L

    f = cp.tile(f, (P,1))
    t = cp.tile(t.reshape(P,1), (1, Np))

    XD = XF1
    XD *= cp.exp(-1j*2*np.pi*f*t)

    # calculating conjugate products, second FFT and the final matrix
    Sx = cp.zeros((bs, Np, 2*N), dtype=cp.complex64)
    Mp = N//Np//2

    for k in range(Np):
        for l in range(Np):
            XF2 = cp.fft.fft(XD[:,:,k]*cp.conj(XD[:,:,l]), axis=-1)
            XF2 = cp.fft.fftshift(XF2, axes=-1)
            XF2 /= P

            i = (k+l) // 2
            a = int(((k-l)/float(Np) + 1.) * N)
            Sx[:,i,a-Mp:a+Mp] = XF2[:,(P//2-Mp):(P//2+Mp)]
    return Sx   # shape (batch, alpha, f_k)
Exemple #19
0
def mean_peak_width(image, peak_image=None, target_height=0.5,
                    return_numpy=True):
    """
    Calculate the mean peak width of all given peak positions within a line
    profile.

    Args:

        image: Original line profile used to detect all peaks. This array will be
        further analyzed to better determine the peak positions.

        peak_image: Boolean NumPy array specifying the peak positions in the full
        SLI stack

        target_height: Relative peak height in relation to the prominence of the
        given peak.

        return_numpy: Necessary if using `use_gpu`. Specifies if a CuPy or Numpy
        array will be returned.

    Returns:

        NumPy array where each entry corresponds to the mean peak width of the
        line profile. The values are in degree.
    """
    if peak_image is not None:
        gpu_peak_image = cupy.array(peak_image).astype('uint8')
    else:
        gpu_peak_image = peaks(image, return_numpy=False).astype('uint8')
    peak_width_gpu = peak_width(image, gpu_peak_image, target_height,
                                return_numpy=False)
    peak_width_gpu = cupy.sum(peak_width_gpu, axis=-1) / \
                     cupy.maximum(1, cupy.count_nonzero(gpu_peak_image,
                                                        axis=-1))

    del gpu_peak_image
    if return_numpy:
        peak_width_cpu = cupy.asnumpy(peak_width_gpu)
        del peak_width_gpu
        return peak_width_cpu
    else:
        return peak_width_gpu
Exemple #20
0
 def int_pdf_multi(self, a_kj, b_kj, systs=None, get=False):
     if systs is None:
         t_ij, h_ij, w_i = self.t_ij, self.h_ij, self.w_i
     else:
         t_ij, h_ij, w_i = systs
     if cp == np:
         return [
             KernelDensityPDF._int_kdpdf1(a_j, b_j, t_ij, h_ij, w_i)
             for a_j, b_j in zip(a_kj, b_kj)
         ]
     else:
         int_k = cp.empty(a_kj.shape[0])
         block_size = 64
         grid_size = a_kj.shape[0] // block_size + 1
         KernelDensityPDF._int_kdpdf1_multi(
             (grid_size, ), (block_size, ),
             (a_kj, b_kj, t_ij, h_ij, w_i, t_ij.shape[0], t_ij.shape[1],
              a_kj.shape[0], int_k))
         int_k = int_k / cp.sum(self.w_i)
         return int_k.get() if get else int_k
Exemple #21
0
 def reconstruct_cupy(self, img):
     assert cupy, "No CuPy present"
     self._imgstore = img.copy()
     imf = cp.fft.rfft2(cp.asarray(img)) * cp.asarray(
         self._prefilter[:, 0:self.N // 2 + 1])
     self._carray_cp[:, 0:self.N // 2,
                     0:self.N // 2 + 1] = imf[:, 0:self.N // 2,
                                              0:self.N // 2 + 1]
     self._carray_cp[:, 3 * self.N // 2:2 * self.N,
                     0:self.N // 2 + 1] = imf[:, self.N // 2:self.N,
                                              0:self.N // 2 + 1]
     del imf
     cp._default_memory_pool.free_all_blocks()
     img2 = cp.sum(
         cp.fft.irfft2(self._carray_cp) * cp.asarray(self._reconfactor), 0)
     self._bigimgstore_cp = cp.fft.irfft2(
         cp.fft.rfft2(img2) * self._postfilter_cp[:, 0:self.N + 1])
     del img2
     cp._default_memory_pool.free_all_blocks()
     return self._bigimgstore_cp.get()
Exemple #22
0
    def loss(self, x, t):
        """損失関数を求める

        Parameters
        ----------
        x : 入力データ
        t : 教師ラベル

        Returns
        -------
        損失関数の値
        """
        y = self.predict(x)

        weight_decay = 0
        for idx in range(1, self.hidden_layer_num + 2):
            W = self.params['W' + str(idx)]
            weight_decay += 0.5 * self.weight_decay_lambda * np.sum(W**2)

        return self.last_layer.forward(y, t) + weight_decay
    def forward(self, xs, ts):
        N, T, V = xs.shape

        if ts.ndim == 3:
            ts = ts.argmax(axis=2)

        mask = (ts != self.ignore_lagel)

        xs = xs.reshape(N * T, V)
        ts = ts.reshape(N * T)
        mask = mask.reshape(N * T)

        ys = softmax(xs)
        ls = cp.log(ys[cp.arange(N * T), ts])
        ls *= mask
        loss = cp.sum(ls)
        loss /= mask.sum()

        self.cache = (ts, ys, mask, (N, T, V))
        return loss
 def _determine_center_and_radius(data, manual=False, size=25):
     sh = np.concatenate([data.scan_dimensions, data.frame_dimensions])
     c = np.zeros((2, ))
     c[:] = (sh[-1] // 2, sh[-2] // 2)
     c = cp.array(c)
     radius = cp.ones((1, )) * sh[-1] // 2
     inds = cp.array(data.indices[:size, :size].astype(cp.uint32))
     cts = cp.array(data.counts[:size, :size].astype(cp.uint32))
     dc_subset = sparse_to_dense_datacube_crop(inds,
                                               cts, (size, size),
                                               data.frame_dimensions,
                                               c,
                                               radius,
                                               bin=2)
     dcs = cp.sum(dc_subset, (0, 1))
     m1 = dcs.get()
     m = (gaussian(m1.astype(cp.float32), 2) > m1.max() * 3e-1).astype(
         cp.float)
     r, y0, x0 = get_probe_size(m)
     return 2 * np.array([y0, x0]), r * 2
Exemple #25
0
    def backward(self):
        W, b = self.variables
        grads = self.activator._backward(self.grads)
        filter_nums, n_C, filter_h, filter_w = W.shape
        grads = grads.transpose(0, 2, 3, 1).reshape(-1, filter_nums)
        if W.require_grads:
            dW = self.col.T.dot(grads)
            W.grads += dW.transpose(1, 0).reshape(filter_nums, n_C, filter_h,
                                                  filter_w)
        if b.require_grads:
            b.grads += cp.sum(grads, axis=0)
        for layer in self.inbound_layers:
            if layer.require_grads:
                dcol = grads.dot(self.col_W.T)

                layer.grads += col2im(self.input_shape, self.pad_size,
                                      self.filter_size, self.stride, dcol)
            else:
                layer.grads = grads
        del self.output_tensor
Exemple #26
0
def ECP(error_weight, orig_weight, set_map):
    orig_shape = error_weight.shape
    error_weight, orig_weight = cp.ravel(error_weight.view(
        cp.uint32)), cp.ravel(orig_weight.view(cp.uint32))
    shape = (int(error_weight.__len__() / 16), 16)

    # Reshape 64 bit in one row
    error_weight, orig_weight = cp.reshape(error_weight, shape), cp.reshape(
        orig_weight, shape)

    # Calculate stuck bits
    stuck_bits = cp.bitwise_xor(error_weight, orig_weight)
    stuck_bits_sum = cp.sum(stuck_bits, axis=1)
    error = cp.concatenate(cp.in1d(stuck_bits_sum, set_map).nonzero())

    if error.__len__() == 0:
        return cp.reshape(error_weight, orig_shape).view(cp.float32)
    else:
        error_weight[error, :] = orig_weight[error, :]
        return cp.reshape(error_weight, orig_shape).view(cp.float32)
Exemple #27
0
    def forward(self, norms, labels, reconst, inpt):
        self.labels = labels

        int1 = self.relu1(0.9 - norms)
        int2 = self.relu2(norms - 0.1)
        margin_loss = labels * int1**2 + 0.5*(1-labels) * int2**2
        bs, ndim_prev = margin_loss.shape[0], margin_loss.shape[-1]
        margin_loss = cp.sum(margin_loss, axis=-1).mean()
        
        reconst_loss, reconst_grad = self.mse_loss(reconst.reshape(reconst.shape[0],-1), inpt.reshape(inpt.shape[0],-1))
        loss = margin_loss + self.reconst_factor * reconst_loss
        
        margin_grad = cp.ones((bs, ndim_prev)) / float(bs)
        margin_grad_pos = -self.relu1.backward(margin_grad * labels * (2*int1))
        margin_grad_neg = self.relu2.backward(margin_grad * 0.5*(1-labels) * (2*int2))

        margin_grad = margin_grad_pos + margin_grad_neg
        reconst_grad *= self.reconst_factor
        
        return loss, (margin_grad, reconst_grad)
Exemple #28
0
def create_test_dataset():
    cp = np  # cpu mode only here
    s1 = np.load(test_path.joinpath('my_conv2_input.npy'))
    s0 = np.copy(s1)
    tmax = np.ceil(4 * sig)
    dt = cp.arange(-tmax, tmax + 1)
    gauss = cp.exp(-dt**2 / (2 * sig**2))
    gauss = (gauss / cp.sum(gauss)).astype(np.float32)

    cNorm = lfilter_cpu(gauss, 1., np.r_[np.ones(s1.shape[0]),
                                         np.zeros(int(tmax))])
    cNorm = cNorm[int(tmax):]

    s1 = lfilter_cpu(gauss,
                     1,
                     np.r_[s1, np.zeros((int(tmax), s1.shape[1]))],
                     axis=0)
    s1 = s1[int(tmax):] / cNorm[:, np.newaxis]

    np.save(test_path.joinpath('my_conv2_output.npy'), s1)
Exemple #29
0
def sub_routine(vector_u,
                matrix_V,
                vector_train,
                bias,
                measure,
                topK=500,
                gpu=False):

    train_index = vector_train.nonzero()[1]
    if measure == "Cosine":
        vector_predict = matrix_V.dot(vector_u)
    else:
        if gpu:
            import cupy as cp
            vector_predict = -cp.sum(cp.square(matrix_V - vector_u), axis=1)
        else:
            vector_predict = -np.sum(np.square(matrix_V - vector_u), axis=1)
    if bias is not None:
        if gpu:
            import cupy as cp
            vector_predict = vector_predict + cp.array(bias)
        else:
            vector_predict = vector_predict + bias

    if gpu:
        import cupy as cp
        candidate_index = cp.argpartition(
            -vector_predict, topK + len(train_index))[:topK + len(train_index)]
        vector_predict = candidate_index[
            vector_predict[candidate_index].argsort()[::-1]]
        vector_predict = cp.asnumpy(vector_predict).astype(np.float32)
    else:
        candidate_index = np.argpartition(
            -vector_predict, topK + len(train_index))[:topK + len(train_index)]
        vector_predict = candidate_index[
            vector_predict[candidate_index].argsort()[::-1]]
    vector_predict = np.delete(
        vector_predict,
        np.isin(vector_predict, train_index).nonzero()[0])

    return vector_predict[:topK]
def threadInnerFuncTreatment(arr, nameFunc, iName, kwargs):
    """
        Thread inner function that get the array of value points
        from a given test function
    
        Parameters:
            1. arr      : Array of Y values of the baselineFunction
            2. nameFunc : Name of the test function
            3. iName    : loop variable name (i)
            4. kwargs   : List of parameters to pass to the test function as **kwargs

        RETURN -> List: arrVals
    """
    arrVals = []
    itera = [x * C_STEP for x in range(1, int(C_MAX_VALUE/C_STEP))]
    currThreshold = 0
    prev = None

    for i in itera:
        kwargs[iName] = i
        funcCallable, dictKwargs, name = getTestFunctionLoop(nameFunc, iName, **kwargs)
        valCurr = getArrFunc(arr, funcCallable, dictKwargs) + [name] + [i]
        arrVals.append(valCurr)

        if prev is None:
            prev = valCurr
        else:
            valCurr, prev = reshapeAll([valCurr, prev])
            diff = np.array(valCurr[0]) - np.array(prev[0])
            diff = np.sum(diff) / len(diff)
            diff = np.asnumpy(diff).tolist()
            if diff > 0:
                currThreshold = currThreshold + 1
                if currThreshold == TRESHOLD_BREAK_C:
                    break
            else:
                currThreshold = 0

            prev = valCurr

    return arrVals
Exemple #31
0
def my_kmedoids(image_data, K, threshold=0):
    """
    This is the cuda implementation of my_kmedoids. Requires cuda to function properly.
    """

    N = image_data.shape[0]
    p = image_data.shape[1]

    medoids = np.zeros((K, p))
    _, medoids = my_kmeans(image_data, K)

    medoids = cp.asarray(medoids)
    medoids_old = cp.zeros(medoids.shape)
    medoids_new = deepcopy(medoids)
    error = cp.linalg.norm(medoids_new - medoids_old)
    image_data = cp.asarray(image_data)
    labels = cp.zeros(N)

    DisMat = cp.zeros((N, K))
    iter_ct = 0
    while error > threshold:
        iter_ct += 1
        #print('K-medoids iteration {}'.format(iter_ct))
        medoids_old = deepcopy(medoids_new)
        for i in range(K):  # assign image_data points to closest centroids
            DisMat[:, i] = cp.linalg.norm(image_data - medoids_new[i], axis=1)
        labels = cp.argmin(DisMat, axis=1)
        for i in range(K):
            cluster = image_data[labels == i]
            DMC = sum(cp.linalg.norm(cluster - medoids_new[i], axis=1))
            DMP = cp.zeros(cluster.shape[0])
        if cluster.shape[0] == 0:
            medoids_new[i] = medoids_old[i]
        else:
            for j in range(cluster.shape[0]):
                DMP[j] = cp.sum(cp.linalg.norm(cluster - cluster[j], axis=1))
            small_cost_idx = cp.argmin(DMP)
            if DMP[small_cost_idx] < DMC:
                medoids_new[i] = cluster[small_cost_idx]
        error = cp.linalg.norm(medoids_new - medoids_old)
    return cp.asnumpy(labels.astype(int)), cp.asnumpy(medoids)
Exemple #32
0
 def logsumexp(self, attr_set=None):
     if attr_set == None or len(attr_set) == 0:
         xp = cp.get_array_module(self.values)
         if xp == cp:
             values = xp.exp(self.values)
             values = xp.sum(values)
             values = xp.log(values)
             return values
         else:
             return scipy.special.logsumexp(self.values)
     assert (set(attr_set) <= set(self.domain.attr_list))
     sum_attr = list(set(self.domain.attr_list) - set(attr_set))
     sum_attr = tuple(self.domain.index_list(sum_attr))
     if cp.get_array_module(self.values) == cp:
         values = cp.exp(self.values)
         values = cp.sum(values, axis=sum_attr)
         values = cp.log(values)
     else:
         values = scipy.special.logsumexp(self.values, axis=sum_attr)
     return Factor(self.domain.project(attr_set), values,
                   cp.get_array_module(self.values))
Exemple #33
0
    def step(self, solution, real_solution, *args, **kwargs):
        """Chi^2 statistics.

        Note, that fo usage in feasibility method chi^2 should be divided by number of detectors.
        Args:
            solution (ndarray): supposed solution.
            real_solution (ndarray): known solution.
             *args: not used, but needed to be here in order to work with Solver properly.
             **kwargs: not used, but needed to be here in order to work with Solver properly.

        Returns:
            float: chi^2.

        """
        chi = solution - self.real_solution
        chi = chi**2
        chi = cp.divide(chi, self.real_solution)
        chi = cp.where(cp.isnan(chi), 0, chi)
        res = cp.sum(chi)
        self.data.append(res)
        return res
Exemple #34
0
 def test_10(self):
     N = 64
     M = 4
     Nd = 8
     D = cp.random.randn(Nd, Nd, M)
     X0 = cp.zeros((N, N, M))
     xr = cp.random.randn(N, N, M)
     xp = cp.abs(xr) > 3
     X0[xp] = cp.random.randn(X0[xp].size)
     S = cp.sum(sl.fftconv(D, X0), axis=2)
     lmbda = 1e-4
     rho = 1e-1
     opt = cbpdn.ConvBPDN.Options({'Verbose': False, 'MaxMainIter': 500,
                                   'RelStopTol': 1e-3, 'rho': rho,
                                   'AutoRho': {'Enabled': False}})
     b = cbpdn.ConvBPDN(D, S, lmbda, opt)
     b.solve()
     X1 = b.Y.squeeze()
     assert sl.rrs(X0, X1) < 5e-5
     Sr = b.reconstruct().squeeze()
     assert sl.rrs(S, Sr) < 1e-4
Exemple #35
0
 def test_11(self):
     N = 63
     M = 4
     Nd = 8
     D = cp.random.randn(Nd, Nd, M)
     X0 = cp.zeros((N, N, M))
     xr = cp.random.randn(N, N, M)
     xp = cp.abs(xr) > 3
     X0[xp] = cp.random.randn(X0[xp].size)
     S = cp.sum(sl.ifftn(sl.fftn(D, (N, N), (0, 1)) *
                         sl.fftn(X0, None, (0, 1)), None, (0, 1)).real,
                axis=2)
     lmbda = 1e-2
     L = 1e3
     opt = cbpdn.ConvBPDN.Options({'Verbose': False, 'MaxMainIter': 2000,
                                   'RelStopTol': 1e-9, 'L': L,
                                   'BackTrack': {'Enabled': False}})
     b = cbpdn.ConvBPDN(D, S, lmbda, opt)
     b.solve()
     X1 = b.X.squeeze()
     assert sl.rrs(X0, X1) < 5e-4
     Sr = b.reconstruct().squeeze()
     assert sl.rrs(S, Sr) < 2e-4
    def get_deconv(self, variable, indices):
        # 1. 最も活性した場所以外を0にする
        #maxbounds = self.get_max_patch_bounds(loss, rank, indices)
        isfc = Vutil.has_fc_layer(variable)
        # 全結合層の可視化の場合
        if isfc:
            values = Vutil.get_fc_info(variable, indices)
            variable.data.fill(0)
            for i, (j, v) in enumerate(zip(indices, values)):
                variable.data[i, j] = v
        # 畳み込み層やプーリング層などの可視化の場合
        else:
            maxinfo = Vutil.get_max_info(variable, indices)
            variable.data.fill(0)
            for i, (c, info) in enumerate(zip(indices, maxinfo)):
                variable.data[i, c, info[1], info[0]] = info[2]

        # 2. 入力層まで逆操作を繰り返す
        data_layer = Vutil.get_data_layer(variable)
        xp = cuda.get_array_module(data_layer.data)

        fixed_RMS = 300
        if xp == cupy:
            rms = cupy.sqrt(cupy.sum(data_layer.data ** 2, axis=(1,2,3)) / np.product(data_layer.data.shape[1:]))
            #rms = cupy.sqrt(cupy.sum(convW ** 2, axis=(2, 3)) / np.product(convW.shape[2:]))
        else:
            rms = np.linalg.norm(data_layer.data, axis=(1,2,3)) ** 2 / np.product(data_layer.data.shape[1:])
            #rms = np.linalg.norm(convW, axis=(2, 3)) ** 2 / np.product(convW.shape[2:])
        scale = fixed_RMS / rms
        scale = scale.reshape(-1,1,1,1)
        #print(rms, scale)
        #data_layer.data *= scale

        self.deconv(variable)

        return data_layer.data
  def testUpdateAnomalyLikelihoods(self):
    """
    A slight more complex test. This calls estimateAnomalyLikelihoods
    to estimate the distribution on fake data, followed by several calls
    to updateAnomalyLikelihoods.
    """

    #------------------------------------------
    # Step 1. Generate an initial estimate using fake distribution of anomaly
    # scores.
    data1 = _generateSampleData(mean=0.2)[0:1000]
    _, _, estimatorParams = (
      an.estimateAnomalyLikelihoods(data1, averagingWindow=5)
    )

    #------------------------------------------
    # Step 2. Generate some new data with a higher average anomaly
    # score. Using the estimator from step 1, to compute likelihoods. Now we
    # should see a lot more anomalies.
    data2 = _generateSampleData(mean=0.6)[0:300]
    likelihoods2, avgRecordList2, estimatorParams2 = (
      an.updateAnomalyLikelihoods(data2, estimatorParams)
    )
    self.assertEqual(len(likelihoods2), len(data2))
    self.assertEqual(len(avgRecordList2), len(data2))
    self.assertTrue(an.isValidEstimatorParams(estimatorParams))

    # The new running total should be different
    self.assertNotEqual(estimatorParams2["movingAverage"]["total"],
                        estimatorParams["movingAverage"]["total"])

    # We should have many more samples where likelihood is < 0.01, but not all
    self.assertGreaterEqual(cupy.sum(likelihoods2 < 0.01), 25)
    self.assertLessEqual(cupy.sum(likelihoods2 < 0.01), 250)

    #------------------------------------------
    # Step 3. Generate some new data with the expected average anomaly score. We
    # should see fewer anomalies than in Step 2.
    data3 = _generateSampleData(mean=0.2)[0:1000]
    likelihoods3, avgRecordList3, estimatorParams3 = (
      an.updateAnomalyLikelihoods(data3, estimatorParams2)
    )

    self.assertEqual(len(likelihoods3), len(data3))
    self.assertEqual(len(avgRecordList3), len(data3))
    self.assertTrue(an.isValidEstimatorParams(estimatorParams3))

    # The new running total should be different
    self.assertNotEqual(estimatorParams3["movingAverage"]["total"],
                        estimatorParams["movingAverage"]["total"])
    self.assertNotEqual(estimatorParams3["movingAverage"]["total"],
                        estimatorParams2["movingAverage"]["total"])

    # We should have a small number samples where likelihood is < 0.02, but at
    # least one
    self.assertGreaterEqual(cupy.sum(likelihoods3 < 0.01), 1)
    self.assertLessEqual(cupy.sum(likelihoods3 < 0.01), 100)

    #------------------------------------------
    # Step 4. Validate that sending data incrementally is the same as sending
    # in one batch
    allData = data1
    allData.extend(data2)
    allData.extend(data3)

    # Compute moving average of all the data and check it's the same
    _, historicalValuesAll, totalAll = (
      an._anomalyScoreMovingAverage(allData, windowSize=5)
    )
    self.assertEqual(sum(historicalValuesAll),
                     sum(estimatorParams3["movingAverage"]["historicalValues"]))
    self.assertEqual(totalAll,
                     estimatorParams3["movingAverage"]["total"])
Exemple #38
0
def _inner(x, y, axis=-1):
    """Patched version of :func:`sporco.linalg.inner`."""

    return cp.sum(x * y, axis=axis, keepdims=True)
    def backward(self, inputs, grad_outputs):
        e1 = array.as_mat(inputs[0])
        e2 = array.as_mat(inputs[1])
        W = inputs[2]
        gy = grad_outputs[0]
        print 'cupy.max(gy) = ',
        print cupy.max(gy)
        print 'cupy.min(gy) = ',
        print cupy.min(gy)
        #print 'backward'
        #print 'gy.shape',
        #print gy.shape
        '''
        xp = cuda.get_array_module(*inputs)
        if xp is numpy:
            gW = numpy.einsum('ij,ik,il->jkl', e1, e2, gy)
            ge1 = numpy.einsum('ik,jkl,il->ij', e2, W, gy)
            ge2 = numpy.einsum('ij,jkl,il->ik', e1, W, gy)
        else:
            kern = cuda.reduce('T in0, T in1, T in2', 'T out',
                               'in0 * in1 * in2', 'a + b', 'out = a', 0,
                               'bilinear_product')

            e1_b = e1[:, :, None, None]  # ij
            e2_b = e2[:, None, :, None]  # ik
            gy_b = gy[:, None, None, :]  # il
            W_b = W[None, :, :, :]  # jkl

            gW = kern(e1_b, e2_b, gy_b, axis=0)  # 'ij,ik,il->jkl'
            ge1 = kern(e2_b, W_b, gy_b, axis=(2, 3))  # 'ik,jkl,il->ij'
            ge2 = kern(e1_b, W_b, gy_b, axis=(1, 3))  # 'ij,jkl,il->ik'
        '''
        #ge1_ext = e1*gy.astype(dtype=gy.dtype, copy=False) #Hadamard product
        #print 'ge1_ext.shape',
        #print ge1_ext.shape
        #ge1 = cupy.sum(ge1_ext, axis=1).astype(dtype=gy.dtype, copy=False)
        #print 'ge1.shape',
        #print ge1.shape

        ge1 = cupy.sum(gy, axis=1).reshape(len(gy), 1).astype(dtype=gy.dtype, copy=False)
        print 'cupy.max(ge1) = ',
        print cupy.max(ge1)
        print 'cupy.min(ge1) = ',
        print cupy.min(ge1)

        gy_sum = cupy.sum(gy, axis=1).reshape(len(gy), 1).astype(dtype=gy.dtype, copy=False)
        #print 'gy_sum.shape',
        #print gy_sum.shape
        gy_tile = cupy.tile(gy_sum, len(gy[0])).astype(dtype=gy.dtype, copy=False)
        #print 'gy_tile.shape',
        #print gy_tile.shape
        #print 'gy.shape',
        #print gy.shape
        #print 'gy_tile.shape',
        #print gy_tile.shape
        #print 'gy_tile / len(gy[0]).dtype',
        #print (gy_tile / len(gy[0])).dtype
        #ge_tmp1 = gy_tile / len(gy[0])
        #ge_tmp2 = gy - gy_tile

        ge2 = (gy - gy_tile / len(gy[0])).astype(dtype=gy.dtype, copy=False)
        #print 'ge2.shape',
        #print ge2.shape
        print 'cupy.max(ge2) = ',
        print cupy.max(ge2)
        print 'cupy.min(ge2) = ',
        print cupy.min(ge2)
  
        gW = cupy.zeros(len(e1[0])*len(e2[0])*len(e2[0])).reshape(len(e1[0]), len(e2[0]), len(e2[0])).astype(dtype=gy.dtype, copy=False)
        #print 'gW.shape',
        #print gW.shape

        ret = ge1.reshape(inputs[0].shape), ge2.reshape(inputs[1].shape), gW
        if len(inputs) == 6:
            V1, V2, b = inputs[3:]
            gV1 = e1.T.dot(gy)
            gV2 = e2.T.dot(gy)
            gb = gy.sum(0)
            ge1 += gy.dot(V1.T)
            ge2 += gy.dot(V2.T)
            ret += gV1, gV2, gb
        #print 'len(ret)',
        #print len(ret)
        #print 'ret[0].shape',
        #print ret[0].shape
        #print 'ret[1].shape',
        #print ret[1].shape
        #print 'ret[2].shape',
        #print ret[2].shape

        return ret
Exemple #40
0
def norm(x, ord=None, axis=None, keepdims=False):
    """Returns one of matrix norms specified by ``ord`` parameter.

    Complex valued matrices and vectors are not supported.
    See numpy.linalg.norm for more detail.

    Args:
        x (cupy.ndarray): Array to take norm. If ``axis`` is None,
            ``x`` must be 1-D or 2-D.
        ord (non-zero int, inf, -inf, 'fro'): Norm type.
        axis (int, 2-tuple of ints, None): 1-D or 2-D norm is cumputed over
            ``axis``.
        keepdims (bool): If this is set ``True``, the axes which are normed
            over are left.

    Returns:
        cupy.ndarray

    """
    if not issubclass(x.dtype.type, (numpy.inexact, numpy.object_)):
        x = x.astype(float)

    # Immediately handle some default, simple, fast, and common cases.
    if axis is None:
        ndim = x.ndim
        if ((ord is None) or (ord in ('f', 'fro') and ndim == 2) or
                (ord == 2 and ndim == 1)):

            x = x.ravel()
            sqnorm = cupy.sum(x ** 2)
            ret = cupy.sqrt(sqnorm)
            if keepdims:
                ret = ret.reshape(ndim * [1])
            return ret

    # Normalize the `axis` argument to a tuple.
    nd = x.ndim
    if axis is None:
        axis = tuple(range(nd))
    elif not isinstance(axis, tuple):
        try:
            axis = int(axis)
        except Exception:
            raise TypeError(
                "'axis' must be None, an integer or a tuple of integers")
        axis = (axis,)

    if len(axis) == 1:
        if ord == numpy.Inf:
            return abs(x).max(axis=axis, keepdims=keepdims)
        elif ord == -numpy.Inf:
            return abs(x).min(axis=axis, keepdims=keepdims)
        elif ord == 0:
            # Zero norm
            return (x != 0).sum(axis=axis, keepdims=keepdims, dtype=x.dtype)
        elif ord == 1:
            # special case for speedup
            return abs(x).sum(axis=axis, keepdims=keepdims)
        elif ord is None or ord == 2:
            # special case for speedup
            s = x ** 2
            return cupy.sqrt(s.sum(axis=axis, keepdims=keepdims))
        else:
            try:
                float(ord)
            except TypeError:
                raise ValueError("Invalid norm order for vectors.")
            absx = abs(x)
            absx **= ord
            return absx.sum(axis=axis, keepdims=keepdims) ** (1.0 / ord)
    elif len(axis) == 2:
        row_axis, col_axis = axis
        if row_axis < 0:
            row_axis += nd
        if col_axis < 0:
            col_axis += nd
        if not (0 <= row_axis < nd and 0 <= col_axis < nd):
            raise ValueError('Invalid axis %r for an array with shape %r' %
                             (axis, x.shape))
        if row_axis == col_axis:
            raise ValueError('Duplicate axes given.')
        if ord == 1:
            if col_axis > row_axis:
                col_axis -= 1
            ret = abs(x).sum(axis=row_axis).max(axis=col_axis)
        elif ord == numpy.Inf:
            if row_axis > col_axis:
                row_axis -= 1
            ret = abs(x).sum(axis=col_axis).max(axis=row_axis)
        elif ord == -1:
            if col_axis > row_axis:
                col_axis -= 1
            ret = abs(x).sum(axis=row_axis).min(axis=col_axis)
        elif ord == -numpy.Inf:
            if row_axis > col_axis:
                row_axis -= 1
            ret = abs(x).sum(axis=col_axis).min(axis=row_axis)
        elif ord in [None, 'fro', 'f']:
            ret = cupy.sqrt((x ** 2).sum(axis=axis))
        else:
            raise ValueError("Invalid norm order for matrices.")
        if keepdims:
            ret_shape = list(x.shape)
            ret_shape[axis[0]] = 1
            ret_shape[axis[1]] = 1
            ret = ret.reshape(ret_shape)
        return ret
    else:
        raise ValueError("Improper number of dimensions to norm.")
Exemple #41
0
    def choice(self, a, size=None, replace=True, p=None):
        """Returns an array of random values from a given 1-D array.

        .. seealso::
            :func:`cupy.random.choice` for full document,
            :meth:`numpy.random.choice`

        """
        if a is None:
            raise ValueError('a must be 1-dimensional or an integer')
        if isinstance(a, cupy.ndarray) and a.ndim == 0:
            raise NotImplementedError
        if isinstance(a, six.integer_types):
            a_size = a
            if a_size <= 0:
                raise ValueError('a must be greater than 0')
        else:
            a = cupy.array(a, copy=False)
            if a.ndim != 1:
                raise ValueError('a must be 1-dimensional or an integer')
            else:
                a_size = len(a)
                if a_size == 0:
                    raise ValueError('a must be non-empty')

        if p is not None:
            p = cupy.array(p)
            if p.ndim != 1:
                raise ValueError('p must be 1-dimensional')
            if len(p) != a_size:
                raise ValueError('a and p must have same size')
            if not (p >= 0).all():
                raise ValueError('probabilities are not non-negative')
            p_sum = cupy.sum(p).get()
            if not numpy.allclose(p_sum, 1):
                raise ValueError('probabilities do not sum to 1')

        if not replace:
            raise NotImplementedError

        if size is None:
            raise NotImplementedError
        shape = size
        size = numpy.prod(shape)

        if p is not None:
            p = cupy.broadcast_to(p, (size, a_size))
            index = cupy.argmax(cupy.log(p) -
                                cupy.random.gumbel(size=(size, a_size)),
                                axis=1)
            if not isinstance(shape, six.integer_types):
                index = cupy.reshape(index, shape)
        else:
            index = cupy.random.randint(0, a_size, size=shape)
            # Align the dtype with NumPy
            index = index.astype(cupy.int64, copy=False)

        if isinstance(a, six.integer_types):
            return index

        if index.ndim == 0:
            return cupy.array(a[index], dtype=a.dtype)

        return a[index]