Ejemplo n.º 1
0
    def forward(self, x_orig):
        xp = cupy.get_array_module(*x_orig)
        ldim, cdim, rdim = self._internal_shape(x_orig[0])
        x = x_orig[0].reshape(ldim, cdim, rdim)

        if self.use_batch_mean:
            mean = x.mean(axis=(0, 2), keepdims=True)
            var = x.var(axis=(0, 2), keepdims=True)
            var += self.eps
        else:
            mean = self.avg_mean
            var = self.avg_var

        self.std = xp.sqrt(var, dtype=var.dtype)
        x_mu = x - mean
        self.x_hat = x_mu / self.std
        y = self.gamma * self.x_hat
        y += self.beta

        # Compute exponential moving average
        if self.use_batch_mean and self.update_batch_estimations:
            if self.is_finetune:
                self.N[0] += 1
                decay = 1. / self.N[0]
            else:
                decay = self.decay

            m = ldim * rdim
            adjust = m / max(m - 1., 1.)  # unbiased estimation
            self.avg_mean *= decay
            self.avg_mean += (1 - decay) * mean
            self.avg_var *= decay
            self.avg_var += (1 - decay) * adjust * var

        return y.reshape(x_orig[0].shape),
Ejemplo n.º 2
0
def transform(in_data, train=True, crop_size=32, padding=4):
    img, label = in_data
    img = img.copy()
    xp = cp.get_array_module(img)

    # Random flip & crop
    if train:
        # Random flip
        if random.randint(0, 1):
            img = img[:, :, ::-1]

        # Random crop
        pad_img = xp.pad(img, [(0, 0), (padding, padding), (padding, padding)],
                         'constant')
        C, H, W = pad_img.shape
        top = random.randint(0, H - crop_size - 1)
        left = random.randint(0, W - crop_size - 1)
        bottom = top + crop_size
        right = left + crop_size
        img = pad_img[:, top:bottom, left:right]

    # Normalize
    mean = xp.array([0.485, 0.456, 0.406])
    std = xp.array([0.229, 0.224, 0.225])
    img -= mean[:, None, None]
    img /= std[:, None, None]

    return img, label
Ejemplo n.º 3
0
def categorical_crossentropy(scores, labels):
    xp = get_array_module(scores)
    target = xp.zeros(scores.shape, dtype='float32')
    loss = 0.
    for i in range(len(labels)):
        target[i, int(labels[i])] = 1.
        loss += (1.0-scores[i, int(labels[i])])**2
    return scores - target, loss
 def forward(self, inputs):
     xp = cupy.get_array_module(*inputs[0])
     """
     return (1/N) * \sum_i^N \sum_j^L [py_ij * log(py_ij) - py_ij * log(py_tilde_ij)]
     """
     py,py_tilde = inputs
     kl = py * ( xp.log(py) - xp.log(py_tilde) )
     kl_sum = kl.sum(axis=1,keepdims=True)
     return kl_sum.mean(keepdims=True).reshape(()),
Ejemplo n.º 5
0
def to_categorical(y, nb_classes=None):
    # From keras
    xp = get_array_module(y)
    if xp is cupy:
        y = y.get()
    y = numpy.array(y, dtype='int').ravel()
    if not nb_classes:
        nb_classes = numpy.max(y) + 1
    n = y.shape[0]
    categorical = numpy.zeros((n, nb_classes), dtype='float32')
    categorical[numpy.arange(n), y] = 1
    return xp.asarray(categorical)
 def backward(self, inputs, grad_outputs):
     xp = cupy.get_array_module(*inputs[0])
     """
     (gradient w.r.t py) = log(py) + 1 - log(py_tilde)
     (gradient w.r.t py_tilde) = - py/py_tilde
     """
     py,py_tilde = inputs
     coeff = xp.asarray(grad_outputs[0]/py.shape[0],'float32')
     if(self.unchain_py):
         ret_py = None
     else:
         ret_py = coeff * ( xp.log(py) - xp.log(py_tilde) + 1.0)
     ret_py_tilde = -coeff * py/py_tilde
     return ret_py,ret_py_tilde
Ejemplo n.º 7
0
def mean_pool(X_lengths, drop=0.):
    X, lengths = X_lengths
    xp = get_array_module(X)
    output = xp.zeros((len(lengths), X.shape[1]), dtype='float32')
    start = 0
    for i, length in enumerate(lengths):
        end = start + length
        output[i] = X[start : end].mean(axis=0)
        start = end
    def finish_update(d_output, sgd=None):
        d_X = xp.zeros((X.shape[0], X.shape[1]), dtype='float32')
        start = 0
        for i, length in enumerate(lengths):
            end = start + length
            d_X[start : end] += d_output[i] / (end-start)
            start = end
        return d_X
    return output, finish_update
Ejemplo n.º 8
0
def max_pool(X_lengths, drop=0.):
    X, lengths = X_lengths
    xp = get_array_module(X)
    maxes = xp.zeros((len(lengths), X.shape[1]), dtype='float32')
    start = 0
    for i, length in enumerate(lengths):
        end = start + length
        maxes[i] = X[start : end].max(axis=0)
        start = end
    def finish_update(d_maxes, sgd=None):
        d_X = xp.zeros((X.shape[0], X.shape[1]), dtype='float32')
        start = 0
        for i, length in enumerate(lengths):
            end = start + length
            d_X[start : end] += d_maxes[i] * (d_maxes[i] == maxes[i])
            start = end
        return d_X
    return maxes, finish_update
Ejemplo n.º 9
0
def get_array_module(*args):
    """Gets an appropriate one from :mod:`numpy` or :mod:`cupy`.

    This is almost equivalent to :func:`cupy.get_array_module`. The only
    difference is that this function can be used even if CUDA is not available.

    Args:
        args: Values to determine whether NumPy or CuPy should be used.

    Returns:
        module: :mod:`cupy` or :mod:`numpy` is returned based on the types of
        the arguments.

    """
    if available:
        return cupy.get_array_module(*args)
    else:
        return numpy
Ejemplo n.º 10
0
def get_array_module(*args):
    """Gets an appropriate one from :mod:`numpy` or :mod:`cupy`.

    This is almost equivalent to :func:`cupy.get_array_module`. The differences
    are that this function can be used even if CUDA is not available and that
    it will return their data arrays' array module for
    :class:`~chainer.Variable` arguments.

    Args:
        args: Values to determine whether NumPy or CuPy should be used.

    Returns:
        module: :mod:`cupy` or :mod:`numpy` is returned based on the types of
        the arguments.

    """
    if available:
        args = [arg.data if isinstance(arg, chainer.Variable) else arg
                for arg in args]
        return cupy.get_array_module(*args)
    else:
        return numpy
Ejemplo n.º 11
0
def get_array_module(*args):
    """Gets an appropriate one from :mod:`numpy` or :mod:`cupy`.

    This is almost equivalent to :func:`cupy.get_array_module`. The differences
    are that this function can be used even if CUDA is not available and that
    it will return their data arrays' array module for
    :class:`~chainer.Variable` arguments.

    Args:
        args: Values to determine whether NumPy or CuPy should be used.

    Returns:
        module: :mod:`cupy` or :mod:`numpy` is returned based on the types of
        the arguments.

    """
    if available:
        args = [
            arg.data if isinstance(arg, chainer.Variable) else arg
            for arg in args
        ]
        return cupy.get_array_module(*args)
    else:
        return numpy
Ejemplo n.º 12
0
    def apply(self, M, sim_object, postime, t):
        if HAS_CUPY and self.use_gpu:
            xp = cp.get_array_module(M)
        else:
            xp = np

        postime.calc_pos(t)
        fov = xp.asarray(sim_object.fov[None, :], dtype=xp.float32)

        rr = (postime.pos * self.dirvec * fov).sum(1)
        theta = rr * self.M0 * 267.522

        # print([postime.pos.min(), postime.pos.max(), fov])
        # print([rr.min(), rr.max()])
        # print([theta.min(), theta.max()])

        M_new = xp.zeros_like(M)
        M_new[:, 0] = M[:, 0] * xp.cos(theta) - M[:, 1] * xp.sin(theta)
        M_new[:, 1] = M[:, 0] * xp.sin(theta) + M[:, 1] * xp.cos(theta)
        M_new[:, 2] = M[:, 2]

        M[:] = M_new

        return None
Ejemplo n.º 13
0
def get_convolve(x):
    """Returns correct convolve module based on input

    Parameters
    ----------
    x : :obj:`numpy.ndarray`
        Array

    Returns
    -------
    mod : :obj:`func`
        Module to be used to process array (:mod:`numpy` or :mod:`cupy`)

    """
    if not deps.cupy_enabled:
        return convolve

    if cp.get_array_module(x) == np:
        return convolve
    else:
        if deps.cusignal_enabled:
            return cusignal.convolution.convolve
        else:
            raise ModuleNotFoundError(cusignal_message)
    def as_single_prec(self):
        """
        force single precision

        :rtype: SFTData
        """
        dict_single = dict()
        xp = cp.get_array_module(self.Yenc)
        for k, v in asdict(self).items():
            if v is None:
                continue

            if v.dtype == xp.float64:
                dict_single[k] = v.astype(xp.float32)
            elif v.dtype == xp.complex128:
                dict_single[k] = v.astype(xp.complex64)
            elif v.dtype == xp.float32:
                dict_single[k] = v
            elif v.dtype == xp.complex64:
                dict_single[k] = v
            else:
                raise NotImplementedError

        return SFTData(**dict_single)
Ejemplo n.º 15
0
Archivo: gmm.py Proyecto: yuhc/ava-cupy
def draw(X, pred, means, covariances, output):
    xp = cupy.get_array_module(X)
    for i in six.moves.range(2):
        labels = X[pred == i]
        if xp is cupy:
            labels = labels.get()
        plt.scatter(labels[:, 0], labels[:, 1], c=np.random.rand(1, 3))
    if xp is cupy:
        means = means.get()
        covariances = covariances.get()
    plt.scatter(means[:, 0],
                means[:, 1],
                s=120,
                marker='s',
                facecolors='y',
                edgecolors='k')
    x = np.linspace(-5, 5, 1000)
    y = np.linspace(-5, 5, 1000)
    X, Y = np.meshgrid(x, y)
    for i in six.moves.range(2):
        dist = stats.multivariate_normal(means[i], covariances[i])
        Z = dist.pdf(np.stack([X, Y], axis=-1))
        plt.contour(X, Y, Z)
    plt.savefig(output)
Ejemplo n.º 16
0
 def _init_proj_matrix(self, init_sample, compressed_dim, proj_method):
     """
         init the projection matrix
     """
     if gpu_config.use_gpu:
         xp = cp.get_array_module(init_sample[0])
     else:
         xp = np
     x = [xp.reshape(x, (-1, x.shape[2])) for x in init_sample]
     x = [z - z.mean(0) for z in x]
     proj_matrix_ = []
     if self.config.proj_init_method == 'pca':
         for x_, compressed_dim_ in zip(x, compressed_dim):
             proj_matrix, _, _ = xp.linalg.svd(x_.T.dot(x_))
             proj_matrix = proj_matrix[:, :compressed_dim_]
             proj_matrix_.append(proj_matrix)
     elif self.config.proj_init_method == 'rand_uni':
         for x_, compressed_dim_ in zip(x, compressed_dim):
             proj_matrix = xp.random.uniform(size=(x_.shape[1],
                                                   compressed_dim_))
             proj_matrix /= xp.sqrt(
                 xp.sum(proj_matrix**2, axis=0, keepdims=True))
             proj_matrix_.append(proj_matrix)
     return proj_matrix_
Ejemplo n.º 17
0
    def __call__(self, arr: np.ndarray, gpu=False):
        if self.p is None or self.p == 0:
            return arr

        if gpu:
            arr = cp.asnumpy(arr)
        elif cp.get_array_module(arr) == cp:
            print("You forgot to set `gpu=True` for a Cupy array")
            gpu = True
            arr = cp.asnumpy(arr)
        else:
            arr = arr.copy()

        if arr.dtype == np.uint8:
            arr = np.apply_along_axis(self.intError, 1, arr)
        # elif arr.dtype == np.float32:
        #     result = self.floatError(arr)
        else:
            raise ValueError("Only uint8 allowed")

        if gpu:
            arr = cp.array(arr)

        return arr
Ejemplo n.º 18
0
def sample_fs(xf, grid_sz=None):
    if gpu_config.use_gpu:
        xp = cp.get_array_module(xf)
    else:
        xp = np
    sz = xf.shape[:2]
    if grid_sz is None or sz == grid_sz:
        x = sz[0] * sz[1] * cifft2(xf)
    else:
        sz = np.array(sz)
        grid_sz = np.array(grid_sz)
        if np.any(grid_sz < sz):
            raise (
                "The grid size must be larger than or equal to the siganl size"
            )
        tot_pad = grid_sz - sz
        pad_sz = np.ceil(tot_pad / 2).astype(np.int32)
        xf_pad = xp.pad(xf, tuple(pad_sz), 'constant')
        if np.any(tot_pad % 2 == 1):
            xf_pad = xf_pad[:xf_pad.shape[0] -
                            (tot_pad[0] % 2), :xf_pad.shape[1] -
                            (tot_pad[1] % 2)]
        x = grid_sz[0] * grid_sz[1] * cifft2(xf_pad)
    return x
Ejemplo n.º 19
0
        def lf(*args, **kwargs):
            t0 = args[-1]
            xp = cupy.get_array_module(t0)
            t = xp.expand_dims(t0.astype(xp.float32), axis=1)
            #t = xp.eye(self.n_label, dtype=np.float32)[t0]
            x = args[0]

            mu, ln_var = self.encode(x, t)
            batchsize = len(mu.data)
            # reconstruction loss
            rec_loss = 0
            for l in six.moves.range(k):
                z = F.gaussian(mu, ln_var)
                rec_loss += F.bernoulli_nll(x, self.decode(z, t, sigmoid=False)) \
                            / (k * batchsize)
            self.rec_loss = rec_loss
            self.loss = self.rec_loss + \
                        C * gaussian_kl_divergence(mu, ln_var) / batchsize
            chainer.report({
                'rec_loss': rec_loss,
                'loss': self.loss
            },
                           observer=self)
            return self.loss
Ejemplo n.º 20
0
def circ_mul_v(circ,v,eigs=None):
    ''' multiply a circulant matrix A by multi vector v.

    Args:
        circ (ndarray): representation of the multilevel circulant matrix A, i.e.
             the first column of A in proper shape.
        v (ndarray): vector to be multiplied. Should be reshaped to the same shape
             as circ. Should be the same reshape order (column first/row first) as circ.
    Returns:
        result of multiplication.
    '''
    if use_gpu > 0:
        import cupy
        xp = cupy.get_array_module(circ)
    else:
        xp = np
    
    if eigs is None:
        eigs = circ_eigs(circ)
    tmp = xp.real(xp.fft.ifft2(xp.fft.fft2(v,norm='ortho')*eigs,norm='ortho'))
    if xp is cupy:
        return tmp.astype(xp.float32)
    else:
        return tmp
Ejemplo n.º 21
0
def _gibbs_removal_1d(x, axis=0, n_points=3, xp=None):
    """Suppresses Gibbs ringing along a given axis using fourier sub-shifts.

    Parameters
    ----------
    x : 2D ndarray
        Matrix x.
    axis : int (0 or 1)
        Axis in which Gibbs oscillations will be suppressed.
        Default is set to 0.
    n_points : int, optional
        Number of neighbours to access local TV (see note).
        Default is set to 3.

    Returns
    -------
    xc : 2D ndarray
        Matrix with suppressed Gibbs oscillations along the given axis.

    Notes
    -----
    This function suppresses the effects of Gibbs oscillations based on the
    analysis of local total variation (TV). Although artefact correction is
    done based on two adjacent points for each voxel, total variation should be
    accessed in a larger range of neighbours. The number of neighbours to be
    considered in TV calculation can be adjusted using the parameter n_points.

    """
    if xp is None:
        xp = cp.get_array_module(x)
    float_dtype = xp.promote_types(x.dtype, np.float32)
    ssamp = xp.linspace(0.02, 0.9, num=45, dtype=float_dtype)

    xs = xp.moveaxis(x, axis, -1).copy()
    h = xp.ones(n_points, dtype=x.real.dtype)  # filter used in _image_tv

    # TV for shift zero (baseline)
    tvr, tvl = _image_tv(xs, h, axis=-1)
    tvp = xp.minimum(tvr, tvl)
    tvn = tvp.copy()

    # Find optimal shift for gibbs removal
    isp = xs.copy()
    isn = xs.copy()
    sp = xp.zeros(xs.shape, dtype=float_dtype)
    sn = xp.zeros(xs.shape, dtype=float_dtype)
    n = xs.shape[-1]
    c = xp.fft.fft(xs, axis=-1)
    k = xp.fft.fftfreq(n, 1 / (2.0j * np.pi))
    k = k.astype(xp.promote_types(xs.dtype, xp.complex64), copy=False)
    if xs.ndim == 2:
        k = k[np.newaxis, :]
        ssamp_nd = ssamp[:, np.newaxis]
    elif xs.ndim == 3:
        k = k[np.newaxis, np.newaxis, :]
        ssamp_nd = ssamp[:, np.newaxis, np.newaxis]
    all_eks = ssamp_nd * k
    xp.exp(all_eks, out=all_eks)
    for s, eks in zip(ssamp, all_eks):
        eks = eks[np.newaxis, ...]
        # Access positive shift for given s
        img_p = c * eks
        img_p = xp.fft.ifft(img_p, axis=-1)
        xp.abs(img_p, out=img_p)

        tvsr, tvsl = _image_tv(img_p, h, axis=-1)
        tvs_p = xp.minimum(tvsr, tvsl)

        # Access negative shift for given s
        img_n = c * xp.conj(eks)  # xp.exp(-ks)
        img_n = xp.fft.ifft(img_n, axis=-1)
        xp.abs(img_n, out=img_n)

        tvsr, tvsl = _image_tv(img_n, h, axis=-1)
        tvs_n = xp.minimum(tvsr, tvsl)

        maskp = tvp > tvs_p
        maskn = tvn > tvs_n

        # Update positive shift params
        isp[maskp] = img_p[maskp].real
        sp[maskp] = s
        tvp[maskp] = tvs_p[maskp]

        # Update negative shift params
        isn[maskn] = img_n[maskn].real
        sn[maskn] = s
        tvn[maskn] = tvs_n[maskn]

    # check non-zero sub-voxel shifts
    idx = xp.nonzero(sp + sn)

    # use positive and negative optimal sub-voxel shifts to interpolate to
    # original grid points
    sn_i = sn[idx]
    isn_i = isn[idx]
    tmp = isp[idx] - isn_i
    tmp /= sp[idx] + sn_i
    tmp *= sn_i
    tmp += isn_i
    xs[idx] = tmp

    return xp.moveaxis(xs, -1, axis)
Ejemplo n.º 22
0
def normalize_axis1(x):
    xp = cupy.get_array_module(*x)
    abs_x = abs(x)
    x = x / (1e-6 + abs_x.max(axis=1,keepdims=True))
    x_norm_2 = x**2
    return x / xp.sqrt(1e-6 + x_norm_2.sum(axis=1,keepdims=True))
Ejemplo n.º 23
0
 def sample_function(x, y, z):
     xp = cupy.get_array_module(x, y, z)
     return xp.square(xp.add(x, y))
Ejemplo n.º 24
0
 def f(x):
     xp = cupy.get_array_module(x)
     return xp.square(x)
Ejemplo n.º 25
0
def flatten_sequences(sequences, drop=0.0):  # pragma: no cover
    xp = get_array_module(sequences[0])
    return xp.concatenate(sequences), None
Ejemplo n.º 26
0
def L1_distance(vec1, vec2, labels, margin=0.2):
    xp = get_array_module(vec1)
    dist = xp.abs(vec1 - vec2).sum(axis=1)
    loss = (dist > margin) - labels
    return (sent1-sent2) * loss, (sent2-sent1) * loss, loss
Ejemplo n.º 27
0
def resample(x, sr_orig, sr_new, axis=-1, filter='kaiser_best', **kwargs):
    '''Resample a signal x from sr_orig to sr_new along a given axis.

    Parameters
    ----------
    x : np.ndarray, dtype=np.float*
        The input signal(s) to resample.

    sr_orig : int > 0
        The sampling rate of x

    sr_new : int > 0
        The target sampling rate of the output signal(s)

    axis : int
        The target axis along which to resample `x`

    filter : optional, str or callable
        The resampling filter to use.

        By default, uses the `kaiser_best` (pre-computed filter).

    kwargs
        additional keyword arguments provided to the specified filter

    Returns
    -------
    y : np.ndarray
        `x` resampled to `sr_new`

    Raises
    ------
    ValueError
        if `sr_orig` or `sr_new` is not positive

    TypeError
        if the input signal `x` has an unsupported data type.

    Examples
    --------
    >>> # Generate a sine wave at 440 Hz for 5 seconds
    >>> sr_orig = 44100.0
    >>> x = np.sin(2 * np.pi * 440.0 / sr_orig * np.arange(5 * sr_orig))
    >>> x
    array([ 0.   ,  0.063, ..., -0.125, -0.063])
    >>> # Resample to 22050 with default parameters
    >>> resampy.resample(x, sr_orig, 22050)
    array([ 0.011,  0.123, ..., -0.193, -0.103])
    >>> # Resample using the fast (low-quality) filter
    >>> resampy.resample(x, sr_orig, 22050, filter='kaiser_fast')
    array([ 0.013,  0.121, ..., -0.189, -0.102])
    >>> # Resample using a high-quality filter
    >>> resampy.resample(x, sr_orig, 22050, filter='kaiser_best')
    array([ 0.011,  0.123, ..., -0.193, -0.103])
    >>> # Resample using a Hann-windowed sinc filter
    >>> resampy.resample(x, sr_orig, 22050, filter='sinc_window',
    ...                  window=scipy.signal.hann)
    array([ 0.011,  0.123, ..., -0.193, -0.103])

    >>> # Generate stereo data
    >>> x_right = np.sin(2 * np.pi * 880.0 / sr_orig * np.arange(len(x)))])
    >>> x_stereo = np.stack([x, x_right])
    >>> x_stereo.shape
    (2, 220500)
    >>> # Resample along the time axis (1)
    >>> y_stereo = resampy.resample(x, sr_orig, 22050, axis=1)
    >>> y_stereo.shape
    (2, 110250)
    '''

    if sr_orig <= 0:
        raise ValueError('Invalid sample rate: sr_orig={}'.format(sr_orig))

    if sr_new <= 0:
        raise ValueError('Invalid sample rate: sr_new={}'.format(sr_new))

    sample_ratio = float(sr_new) / sr_orig

    # Set up the output shape
    shape = list(x.shape)
    shape[axis] = int(shape[axis] * sample_ratio)

    if shape[axis] < 1:
        raise ValueError('Input signal length={} is too small to '
                         'resample from {}->{}'.format(x.shape[axis], sr_orig,
                                                       sr_new))

    # Preserve contiguity of input (if it exists)
    # If not, revert to C-contiguity by default
    if x.flags['F_CONTIGUOUS']:
        order = 'F'
    else:
        order = 'C'

    xp = cp.get_array_module(x)

    y = xp.zeros(shape, dtype=x.dtype, order=order)

    interp_win, precision, _ = get_filter(filter, **kwargs)
    interp_win = xp.asarray(interp_win)

    if sample_ratio < 1:
        interp_win *= sample_ratio

    interp_delta = xp.zeros_like(interp_win)
    interp_delta[:-1] = xp.diff(interp_win)

    # Construct 2d views of the data with the resampling axis on the first dimension
    x_2d = x.swapaxes(0, axis).reshape((x.shape[axis], -1))
    y_2d = y.swapaxes(0, axis).reshape((y.shape[axis], -1))
    resample_f(x_2d, y_2d, sample_ratio, interp_win, interp_delta,
               int(precision))

    return y
Ejemplo n.º 28
0
 def argsort(self, a, axis=-1):
     if self.external:
         xp = cupy.get_array_module(a)
         return xp.argsort(a, axis=axis)
     else:
         return a.argsort(axis=axis)
Ejemplo n.º 29
0
 def sum(self):
     return cp.get_array_module(self.values).sum(self.values)
Ejemplo n.º 30
0
def wrap_take(array, *args, **kwargs):
    if get_array_module(array) == numpy:
        kwargs["mode"] = "wrap"

    return array.take(*args, **kwargs)
Ejemplo n.º 31
0
def flatten_sequences(sequences, drop=0.): # pragma: no cover
    xp = get_array_module(sequences[0])
    return xp.concatenate(sequences), None
Ejemplo n.º 32
0
 def init_gx(self, inputs):
     xp = cupy.get_array_module(*inputs.data)
     self.gx = as_mat(xp.zeros_like(inputs.data))
Ejemplo n.º 33
0
    def collide(self, input_f):
        xp = cp.get_array_module(input_f)
        f = input_f
        vaxis = tuple(-(i + 1) for i in range(self.num_dim))

        return xp.sum(f * self.weights, axis=vaxis, keepdims=True) - f
Ejemplo n.º 34
0
 def argpartition(self, a, kth, axis=-1):
     if self.external:
         xp = cupy.get_array_module(a)
         return xp.argpartition(a, kth, axis=axis)
     else:
         return a.argpartition(kth, axis=axis)
Ejemplo n.º 35
0
def add_noise_c(h, sigma=0.2):
    xp = cp.get_array_module(h.data)
    if chainer.config.train:
        return h + sigma * xp.random.randn(*h.data.shape, dtype=cp.float32)
    else:
        return h
Ejemplo n.º 36
0
def wrap_take(array, *args, **kwargs):
    if get_array_module(array) == numpy:
        kwargs["mode"] = "wrap"

    return array.take(*args, **kwargs)
Ejemplo n.º 37
0
Archivo: gmm.py Proyecto: zivzone/cupy
def predict(X, inv_cov, means, weights):
    xp = cupy.get_array_module(X)
    log_prob = estimate_log_prob(X, inv_cov, means)
    return (log_prob + xp.log(weights)).argmax(axis=1)
Ejemplo n.º 38
0
 def dbeta(self, upstream_dx):
     xp = cp.get_array_module(upstream_dx)
     dbeta = xp.sum(upstream_dx, axis=self.av_axis)
     if self.input_dimension == 4:
         dbeta = dbeta[xp.newaxis, :, xp.newaxis, xp.newaxis]
     return dbeta
Ejemplo n.º 39
0
 def forward(self, X):
     xp = cp.get_array_module(X)
     return 0.5*self.strength*xp.sum(xp.power(X,2))
Ejemplo n.º 40
0
    def forward(self, X, test_mode=False, use_express=False):
        """
        X.shape = (batch_size, channel, height, width)
        Note that when following a convolution layer, we learn a (gamma, beta)
        for each of the channels (feature maps).
        """
        self.input_shape = X.shape
        xp = cp.get_array_module(X)

        if not test_mode:
            if len(X.shape) == 4 and not self.is_on_gpu:
                mean, var = batch_norm_stats_cy.channelwise_mean_and_var_4d(X)
            else:
                mean = xp.mean(X, axis=self.av_axis)
                var = xp.var(X, axis=self.av_axis)
            self.std = xp.sqrt(var + self.eps)
            if len(X.shape) == 4:
                self.std = self.std[xp.newaxis, :, xp.newaxis, xp.newaxis]
                mean = mean[xp.newaxis, :, xp.newaxis, xp.newaxis]
            self.X_demean = X - mean
            self.X_hat = self.X_demean / self.std

            if self.non_learned_params["running_mean"] is not None:
                self.non_learned_params["running_mean"] = (
                    self.run_momentum * self.non_learned_params["running_mean"]
                    + (1 - self.run_momentum) * mean)
            else:
                self.non_learned_params["running_mean"] = mean
            if self.non_learned_params["running_std"] is not None:
                self.non_learned_params["running_std"] = (
                    self.run_momentum * self.non_learned_params["running_std"]
                    + (1 - self.run_momentum) * self.std)
            else:
                self.non_learned_params["running_std"] = self.std
            if use_express:
                return (ne.evaluate("gamma*X_hat + beta",
                                    local_dict={
                                        **vars(self), 'gamma':
                                        self.learned_params['gamma'],
                                        'beta':
                                        self.learned_params['beta']
                                    }))
            else:
                return (self.learned_params['gamma'] * self.X_hat +
                        self.learned_params['beta'])
        else:  # test_mode
            if use_express:
                X_hat = ne.evaluate(
                    "(X - running_mean)/running_std",
                    local_dict={
                        'running_mean':
                        self.non_learned_params["running_mean"],
                        'running_std': self.non_learned_params["running_std"]
                    })
                return (ne.evaluate("gamma*X_hat + beta",
                                    local_dict={
                                        'gamma': self.learned_params['gamma'],
                                        'beta': self.learned_params['beta']
                                    }))
            else:
                X_hat = (X - self.non_learned_params["running_mean"]
                         ) / self.non_learned_params["running_std"]
                return (self.learned_params['gamma'] * X_hat +
                        self.learned_params['beta'])
Ejemplo n.º 41
0
 def getNorm(self, psiSquared):
     """
     Finds norm for a given state of psi squared along the z-axis
     """
     xp = cp.get_array_module(psiSquared)
     return xp.sqrt(xp.sum(psiSquared*self.hz))
Ejemplo n.º 42
0
def _gibbs_removal_2d_or_3d(image, n_points=3, G0=None, G1=None, *, xp=None):
    """ Suppress Gibbs ringing of a 2D image.

    Parameters
    ----------
    image : 2D ndarray
        Matrix containing the 2D image.
    n_points : int, optional
        Number of neighbours to access local TV (see note). Default is
        set to 3.
    G0 : 2D ndarray, optional.
        Weights for the image corrected along axis 0. If not given, the
        function estimates them using the function :func:`_weights`.
    G1 : 2D ndarray
        Weights for the image corrected along axis 1. If not given, the
        function estimates them using the function :func:`_weights`.

    Returns
    -------
    imagec : 2D ndarray
        Matrix with Gibbs oscillations reduced along axis a.

    Notes
    -----
    This function suppresses the effects of Gibbs oscillations based on the
    analysis of local total variation (TV). Although artefact correction is
    done based on two adjacent points for each voxel, total variation should be
    accessed in a larger range of neighbours. The number of neighbours to be
    considered in TV calculation can be adjusted using the parameter n_points.

    References
    ----------
    Please cite the following articles
    .. [1] Neto Henriques, R., 2018. Advanced Methods for Diffusion MRI Data
           Analysis and their Application to the Healthy Ageing Brain
           (Doctoral thesis). https://doi.org/10.17863/CAM.29356
    .. [2] Kellner E, Dhital B, Kiselev VG, Reisert M. Gibbs-ringing artifact
           removal based on local subvoxel-shifts. Magn Reson Med. 2016
           doi: 10.1002/mrm.26054.

    """
    if xp is None:
        xp = cp.get_array_module(image)
    if G0 is None or G1 is None:
        G0, G1 = _weights(image.shape[:2], image.dtype, xp=xp)
        if image.ndim > 2:
            G0 = G0[..., np.newaxis]
            G1 = G1[..., np.newaxis]

    if image.ndim not in [2, 3]:
        raise ValueError(
            "expected a 2D image or a 3D array corresponding to a batch of 2D "
            "images stacked along the last axis")
    img_c1 = _gibbs_removal_1d(image, axis=1, n_points=n_points)
    img_c0 = _gibbs_removal_1d(image, axis=0, n_points=n_points)

    C1 = xp.fft.fftn(img_c1, axes=(0, 1))
    C0 = xp.fft.fftn(img_c0, axes=(0, 1))
    imagec = xp.fft.fftshift(C1, axes=(0, 1)) * G1
    imagec += xp.fft.fftshift(C0, axes=(0, 1)) * G0
    imagec = xp.fft.ifftn(imagec, axes=(0, 1))
    imagec = xp.abs(imagec)
    return imagec
Ejemplo n.º 43
0
 def g(x):
     xp = cupy.get_array_module(x)
     return xp.frexp(x)
Ejemplo n.º 44
0
def gibbs_removal(vol,
                  slice_axis=2,
                  n_points=3,
                  inplace=False,
                  num_threads=None,
                  *,
                  xp=None):
    """Suppresses Gibbs ringing artefacts of images volumes.

    Parameters
    ----------
    vol : ndarray ([X, Y]), ([X, Y, Z]) or ([X, Y, Z, g])
        Matrix containing one volume (3D) or multiple (4D) volumes of images.
    slice_axis : int (0, 1, or 2)
        Data axis corresponding to the number of acquired slices.
        Default is set to the third axis.
    n_points : int, optional
        Number of neighbour points to access local TV (see note).
        Default is set to 3.
    inplace : bool, optional
        unimplemented option on the GPU
    num_threads : int or None, optional
        unsupported option on the GPU

    Returns
    -------
    vol : ndarray ([X, Y]), ([X, Y, Z]) or ([X, Y, Z, g])
        Matrix containing one volume (3D) or multiple (4D) volumes of corrected
        images.

    Notes
    -----
    For 4D matrix last element should always correspond to the number of
    diffusion gradient directions.

    References
    ----------
    Please cite the following articles
    .. [1] Neto Henriques, R., 2018. Advanced Methods for Diffusion MRI Data
           Analysis and their Application to the Healthy Ageing Brain
           (Doctoral thesis). https://doi.org/10.17863/CAM.29356
    .. [2] Kellner E, Dhital B, Kiselev VG, Reisert M. Gibbs-ringing artifact
           removal based on local subvoxel-shifts. Magn Reson Med. 2016
           doi: 10.1002/mrm.26054.

    """
    nd = vol.ndim

    if xp is None:
        xp = cp.get_array_module(vol)
    if xp is np:
        # The implementation here was refactored for the GPU
        # Dipy's version is faster on the CPU, so fall back to it in that case.
        from dipy.denoise.gibbs import gibbs_removal as gibbs_removal_cpu
        try:
            return gibbs_removal_cpu(vol,
                                     slice_axis=slice_axis,
                                     n_points=n_points,
                                     inplace=inplace,
                                     num_threads=num_threads)
        except TypeError:
            warnings.warn("inplace and num_threads arguments ignored")
            # older DIPY did not have inplace or num_threads kwargs
            return gibbs_removal_cpu(vol,
                                     slice_axis=slice_axis,
                                     n_points=n_points)

    if not isinstance(inplace, bool):
        raise TypeError("inplace must be a boolean.")

    if num_threads is not None:
        warnings.warn("num_threads is ignored by the GPU operation")

    # check the axis corresponding to different slices
    # 1) This axis cannot be larger than 2
    if slice_axis > 2:
        raise ValueError("Different slices have to be organized along" +
                         "one of the 3 first matrix dimensions")

    # 2) If this is not 2, swap axes so that different slices are ordered
    # along axis 2. Note that swapping is not required if data is already a
    # single image
    elif slice_axis < 2 and nd > 2:
        vol = xp.swapaxes(vol, slice_axis, 2)

    # check matrix dimension
    if nd == 4:
        inishap = vol.shape
        vol = vol.reshape((inishap[0], inishap[1], inishap[2] * inishap[3]))
    elif nd > 4:
        raise ValueError("Data have to be a 4D, 3D or 2D matrix")
    elif nd < 2:
        raise ValueError("Data is not an image")

    # Produce weigthing functions for 2D Gibbs removal
    shap = vol.shape
    G0, G1 = _weights(shap[:2], vol.dtype, xp=xp)

    if inplace:
        raise NotImplementedError("inplace restoration not supported")

    # Run Gibbs removal of 2D images
    if nd > 2:
        G0 = G0[..., np.newaxis]
        G1 = G1[..., np.newaxis]
    vol = _gibbs_removal_2d_or_3d(vol, n_points=n_points, G0=G0, G1=G1, xp=xp)

    # Reshape data to original format
    if nd == 4:
        vol = vol.reshape(inishap)
    if slice_axis < 2 and nd > 2:
        vol = xp.swapaxes(vol, slice_axis, 2)

    return vol
Ejemplo n.º 45
0
def wrap_take(array, *args, **kwargs):
    if get_array_module(array) == numpy:
        kwargs['mode'] = 'wrap'

    return array.take(*args, **kwargs)
Ejemplo n.º 46
0
def perturbation_with_max_norm_constraint(x,norm):
    xp = cupy.get_array_module(*x)
    return norm * xp.sign(x)