コード例 #1
0
ファイル: data_stream.py プロジェクト: bbrzycki/setigen
    def get_samples(self, num_samples):
        """
        Retrieve voltage samples, based on noise and signal source functions.
        
        If custom signals add complex voltages, the voltage array will be cast to
        complex type.
        
        Parameters
        ----------
        num_samples : int
            Number of samples to get
            
        Returns
        -------
        v : array
            Array of voltage samples
        """
        self._update_t(num_samples)

        for noise_func in self.noise_sources:
            self.v += noise_func(self.ts)

        for signal_func in self.signal_sources:
            # Ensure that the array is of the correct type
            signal_v = xp.array(signal_func(self.ts))
            # If there are complex voltages, make sure to cast self.v to complex
            if not xp.iscomplexobj(self.v) and xp.iscomplexobj(signal_v):
                self.v = self.v.astype(complex)
            self.v += signal_v

        self.start_obs = False

        return self.v
コード例 #2
0
def cmplx_sort(p):
    """Sort roots based on magnitude.

    Parameters
    ----------
    p : array_like
        The roots to sort, as a 1-D array.

    Returns
    -------
    p_sorted : ndarray
        Sorted roots.
    indx : ndarray
        Array of indices needed to sort the input `p`.

    Examples
    --------
    >>> import cusignal
    >>> vals = [1, 4, 1+1.j, 3]
    >>> p_sorted, indx = cusignal.cmplx_sort(vals)
    >>> p_sorted
    array([1.+0.j, 1.+1.j, 3.+0.j, 4.+0.j])
    >>> indx
    array([0, 2, 3, 1])

    """
    p = cp.asarray(p)
    if cp.iscomplexobj(p):
        indx = cp.argsort(abs(p))
    else:
        indx = cp.argsort(p)
    return cp.take(p, indx, 0), indx
コード例 #3
0
def hilbert_2D(Volume_spectra: cp.ndarray) -> cp.array:
    """
    Compute the analytic signal, using the Hilbert transform. \
    The transformation is done along the last axis.

    Args:
        Volume_spectra::cp.ndarray
        2nd order tensor containing spectras raw data. Last dimension is depth encoding.

    Returns:
        Analytical signal of Volume_spectra::cp.ndarray
        :rtype: cp.ndarray

    Notes
    -----
    The analytic signal ``x_a(t)`` of signal ``x(t)`` is:
    .. math:: x_a = F^{-1}(F(x) 2U) = x + i y

    """

    if cp.iscomplexobj(Volume_spectra):
        raise ValueError("x must be real.")

    Volume_spectra = fftpack.fft(Volume_spectra,
                                  axis=-1,
                                  overwrite_x=True)[:,:Arguments.dimension[2]//2]

    dum =  cp.zeros_like(Volume_spectra)

    Volume_spectra = cp.concatenate( (Volume_spectra*2,dum), axis=1)

    return cp.fft.ifft(Volume_spectra, axis=1)
コード例 #4
0
ファイル: filtering.py プロジェクト: mfkiwl/cusignal
def hilbert2(x, N=None):
    """
    Compute the '2-D' analytic signal of `x`

    Parameters
    ----------
    x : array_like
        2-D signal data.
    N : int or tuple of two ints, optional
        Number of Fourier components. Default is ``x.shape``

    Returns
    -------
    xa : ndarray
        Analytic signal of `x` taken along axes (0,1).

    References
    ----------
    .. [1] Wikipedia, "Analytic signal",
        https://en.wikipedia.org/wiki/Analytic_signal

    """
    x = atleast_2d(x)
    if x.ndim > 2:
        raise ValueError("x must be 2-D.")
    if iscomplexobj(x):
        raise ValueError("x must be real.")
    if N is None:
        N = x.shape
    elif isinstance(N, int):
        if N <= 0:
            raise ValueError("N must be positive.")
        N = (N, N)
    elif len(N) != 2 or cp.any(cp.asarray(N) <= 0):
        raise ValueError(
            "When given as a tuple, N must hold exactly two positive integers"
        )

    Xf = fftpack.fft2(x, N, axes=(0, 1))
    h1 = zeros(N[0], "d")
    h2 = zeros(N[1], "d")
    for p in range(2):
        h = eval("h%d" % (p + 1))
        N1 = N[p]
        if N1 % 2 == 0:
            h[0] = h[N1 // 2] = 1
            h[1 : N1 // 2] = 2
        else:
            h[0] = 1
            h[1 : (N1 + 1) // 2] = 2
        exec("h%d = h" % (p + 1), globals(), locals())

    h = h1[:, newaxis] * h2[newaxis, :]
    k = x.ndim
    while k > 2:
        h = h[:, newaxis]
        k -= 1
    x = fftpack.ifft2(Xf * h, axes=(0, 1))
    return x
コード例 #5
0
def hilbert2(x, N=None):
    """
    Compute the '2-D' analytic signal of `x`

    Parameters
    ----------
    x : array_like
        2-D signal data.
    N : int or tuple of two ints, optional
        Number of Fourier components. Default is ``x.shape``

    Returns
    -------
    xa : ndarray
        Analytic signal of `x` taken along axes (0,1).

    References
    ----------
    .. [1] Wikipedia, "Analytic signal",
        https://en.wikipedia.org/wiki/Analytic_signal

    """
    x = cp.atleast_2d(x)
    if x.ndim > 2:
        raise ValueError("x must be 2-D.")
    if cp.iscomplexobj(x):
        raise ValueError("x must be real.")
    if N is None:
        N = x.shape
    elif isinstance(N, int):
        if N <= 0:
            raise ValueError("N must be positive.")
        N = (N, N)
    elif len(N) != 2 or cp.any(cp.asarray(N) <= 0):
        raise ValueError(
            "When given as a tuple, N must hold exactly two positive integers")

    Xf = cp.fft.fft2(x, N, axes=(0, 1))

    h1, h2 = _hilbert2_kernel(size=N[1])

    h = h1[:, cp.newaxis] * h2[cp.newaxis, :]
    k = x.ndim
    while k > 2:
        h = h[:, cp.newaxis]
        k -= 1
    x = cp.fft.ifft2(Xf * h, axes=(0, 1))
    return x
コード例 #6
0
def calculate_fid(act1, act2, cuda_ind=0):
    with cp.cuda.Device(cuda_ind):
        act1, act2 = cp.array(act1), cp.array(act2)
        # calculate mean and covariance statistics
        mu1, sigma1 = act1.mean(axis=0), cp.cov(act1, rowvar=False)
        mu2, sigma2 = act2.mean(axis=0), cp.cov(act2, rowvar=False)
        # calculate sum squared difference between means
        ssdiff = cp.sum((mu1 - mu2)**2.0)
        # calculate sqrt of product between cov
        covmean = cp.array(sqrtm(sigma1.dot(sigma2).get()))
        # check and correct imaginary numbers from sqrt
        if cp.iscomplexobj(covmean):
            covmean = covmean.real
        # calculate score
        fid = ssdiff + cp.trace(sigma1 + sigma2 - 2.0 * covmean)
        return fid
コード例 #7
0
def corrcoef(a, y=None, rowvar=True, bias=None, ddof=None, *, dtype=None):
    """Returns the Pearson product-moment correlation coefficients of an array.

    Args:
        a (cupy.ndarray): Array to compute the Pearson product-moment
            correlation coefficients.
        y (cupy.ndarray): An additional set of variables and observations.
        rowvar (bool): If ``True``, then each row represents a variable, with
            observations in the columns. Otherwise, the relationship is
            transposed.
        bias (None): Has no effect, do not use.
        ddof (None): Has no effect, do not use.
        dtype: Data type specifier. By default, the return data-type will have
            at least `numpy.float64` precision.

    Returns:
        cupy.ndarray: The Pearson product-moment correlation coefficients of
        the input array.

    .. seealso:: :func:`numpy.corrcoef`

    """
    if bias is not None or ddof is not None:
        warnings.warn('bias and ddof have no effect and are deprecated',
                      DeprecationWarning)

    out = cov(a, y, rowvar, dtype=dtype)
    try:
        d = cupy.diag(out)
    except ValueError:
        return out / out

    stddev = cupy.sqrt(d.real)
    out /= stddev[:, None]
    out /= stddev[None, :]

    cupy.clip(out.real, -1, 1, out=out.real)
    if cupy.iscomplexobj(out):
        cupy.clip(out.imag, -1, 1, out=out.imag)

    return out
コード例 #8
0
def metadata(A):
    return (A.shape, A.ndim, A.dtype, _cp.iscomplexobj(A))
コード例 #9
0
ファイル: deconvolution.py プロジェクト: MannyKayy/cupyimg
def unsupervised_wiener(image,
                        psf,
                        reg=None,
                        user_params=None,
                        is_real=True,
                        clip=True):
    """Unsupervised Wiener-Hunt deconvolution.

    Return the deconvolution with a Wiener-Hunt approach, where the
    hyperparameters are automatically estimated. The algorithm is a
    stochastic iterative process (Gibbs sampler) described in the
    reference below. See also ``wiener`` function.

    Parameters
    ----------
    image : (M, N) ndarray
       The input degraded image.
    psf : ndarray
       The impulse response (input image's space) or the transfer
       function (Fourier space). Both are accepted. The transfer
       function is automatically recognized as being complex
       (``cupy.iscomplexobj(psf)``).
    reg : ndarray, optional
       The regularisation operator. The Laplacian by default. It can
       be an impulse response or a transfer function, as for the psf.
    user_params : dict, optional
       Dictionary of parameters for the Gibbs sampler. See below.
    clip : boolean, optional
       True by default. If true, pixel values of the result above 1 or
       under -1 are thresholded for skimage pipeline compatibility.

    Returns
    -------
    x_postmean : (M, N) ndarray
       The deconvolved image (the posterior mean).
    chains : dict
       The keys ``noise`` and ``prior`` contain the chain list of
       noise and prior precision respectively.

    Other parameters
    ----------------
    The keys of ``user_params`` are:

    threshold : float
       The stopping criterion: the norm of the difference between to
       successive approximated solution (empirical mean of object
       samples, see Notes section). 1e-4 by default.
    burnin : int
       The number of sample to ignore to start computation of the
       mean. 15 by default.
    min_iter : int
       The minimum number of iterations. 30 by default.
    max_iter : int
       The maximum number of iterations if ``threshold`` is not
       satisfied. 200 by default.
    callback : callable (None by default)
       A user provided callable to which is passed, if the function
       exists, the current image sample for whatever purpose. The user
       can store the sample, or compute other moments than the
       mean. It has no influence on the algorithm execution and is
       only for inspection.

    Examples
    --------
    >>> import cupy as cp
    >>> from skimage import color, data, restoration
    >>> img = color.rgb2gray(data.astronaut())
    >>> from scipy.signal import convolve2d
    >>> psf = cp.ones((5, 5)) / 25
    >>> img = convolve2d(img, psf, 'same')
    >>> img += 0.1 * img.std() * cp.random.standard_normal(img.shape)
    >>> deconvolved_img = restoration.unsupervised_wiener(img, psf)

    Notes
    -----
    The estimated image is design as the posterior mean of a
    probability law (from a Bayesian analysis). The mean is defined as
    a sum over all the possible images weighted by their respective
    probability. Given the size of the problem, the exact sum is not
    tractable. This algorithm use of MCMC to draw image under the
    posterior law. The practical idea is to only draw highly probable
    images since they have the biggest contribution to the mean. At the
    opposite, the less probable images are drawn less often since
    their contribution is low. Finally the empirical mean of these
    samples give us an estimation of the mean, and an exact
    computation with an infinite sample set.

    References
    ----------
    .. [1] François Orieux, Jean-François Giovannelli, and Thomas
           Rodet, "Bayesian estimation of regularization and point
           spread function parameters for Wiener-Hunt deconvolution",
           J. Opt. Soc. Am. A 27, 1593-1607 (2010)

           https://www.osapublishing.org/josaa/abstract.cfm?URI=josaa-27-7-1593

           http://research.orieux.fr/files/papers/OGR-JOSA10.pdf
    """
    params = {
        "threshold": 1e-4,
        "max_iter": 200,
        "min_iter": 30,
        "burnin": 15,
        "callback": None,
    }
    params.update(user_params or {})

    if reg is None:
        reg, _ = uft.laplacian(image.ndim, image.shape, is_real=is_real)
    if not cp.iscomplexobj(reg):
        reg = uft.ir2tf(reg, image.shape, is_real=is_real)

    if psf.shape != reg.shape:
        trans_fct = uft.ir2tf(psf, image.shape, is_real=is_real)
    else:
        trans_fct = psf

    # The mean of the object
    x_postmean = cp.zeros(trans_fct.shape)
    # The previous computed mean in the iterative loop
    prev_x_postmean = cp.zeros(trans_fct.shape)

    # Difference between two successive mean
    delta = np.NAN

    # Initial state of the chain
    gn_chain, gx_chain = [1], [1]

    # The correlation of the object in Fourier space (if size is big,
    # this can reduce computation time in the loop)
    areg2 = cp.abs(reg)
    areg2 *= areg2
    atf2 = cp.abs(trans_fct)
    atf2 *= atf2

    # The Fourier transform may change the image.size attribute, so we
    # store it.
    if is_real:
        data_spectrum = uft.urfft2(image.astype(np.float))
    else:
        data_spectrum = uft.ufft2(image.astype(np.float))

    # Gibbs sampling
    for iteration in range(params["max_iter"]):
        # Sample of Eq. 27 p(circX^k | gn^k-1, gx^k-1, y).

        # weighting (correlation in direct space)
        precision = gn_chain[-1] * atf2 + gx_chain[-1] * areg2  # Eq. 29
        excursion = (np.sqrt(0.5) / cp.sqrt(precision) *
                     (cp.random.standard_normal(data_spectrum.shape) +
                      1j * cp.random.standard_normal(data_spectrum.shape)))

        # mean Eq. 30 (RLS for fixed gn, gamma0 and gamma1 ...)
        wiener_filter = gn_chain[-1] * cp.conj(trans_fct) / precision

        # sample of X in Fourier space
        x_sample = wiener_filter * data_spectrum + excursion
        if params["callback"]:
            params["callback"](x_sample)

        # sample of Eq. 31 p(gn | x^k, gx^k, y)
        gn_chain.append(
            npr.gamma(
                image.size / 2,
                2 / uft.image_quad_norm(data_spectrum - x_sample * trans_fct),
            ))

        # sample of Eq. 31 p(gx | x^k, gn^k-1, y)
        gx_chain.append(
            npr.gamma((image.size - 1) / 2,
                      2 / uft.image_quad_norm(x_sample * reg)))

        # current empirical average
        if iteration > params["burnin"]:
            x_postmean = prev_x_postmean + x_sample

        if iteration > (params["burnin"] + 1):
            current = x_postmean / (iteration - params["burnin"])
            previous = prev_x_postmean / (iteration - params["burnin"] - 1)

            delta = (cp.sum(cp.abs(current - previous)) /
                     cp.sum(cp.abs(x_postmean)) /
                     (iteration - params["burnin"]))

        prev_x_postmean = x_postmean

        # stop of the algorithm
        if (iteration > params["min_iter"]) and (delta < params["threshold"]):
            break

    # Empirical average \approx POSTMEAN Eq. 44
    x_postmean = x_postmean / (iteration - params["burnin"])
    if is_real:
        x_postmean = uft.uirfft2(x_postmean, shape=image.shape)
    else:
        x_postmean = uft.uifft2(x_postmean)

    if clip:
        x_postmean[x_postmean > 1] = 1
        x_postmean[x_postmean < -1] = -1

    return (x_postmean, {"noise": gn_chain, "prior": gx_chain})
コード例 #10
0
ファイル: deconvolution.py プロジェクト: MannyKayy/cupyimg
def wiener(image, psf, balance, reg=None, is_real=True, clip=True):
    r"""Wiener-Hunt deconvolution

    Return the deconvolution with a Wiener-Hunt approach (i.e. with
    Fourier diagonalisation).

    Parameters
    ----------
    image : (M, N) ndarray
       Input degraded image
    psf : ndarray
       Point Spread Function. This is assumed to be the impulse
       response (input image space) if the data-type is real, or the
       transfer function (Fourier space) if the data-type is
       complex. There is no constraints on the shape of the impulse
       response. The transfer function must be of shape `(M, N)` if
       `is_real is True`, `(M, N // 2 + 1)` otherwise (see
       `cupy.fft.rfftn`).
    balance : float
       The regularisation parameter value that tunes the balance
       between the data adequacy that improve frequency restoration
       and the prior adequacy that reduce frequency restoration (to
       avoid noise artifacts).
    reg : ndarray, optional
       The regularisation operator. The Laplacian by default. It can
       be an impulse response or a transfer function, as for the
       psf. Shape constraint is the same as for the `psf` parameter.
    is_real : boolean, optional
       True by default. Specify if ``psf`` and ``reg`` are provided
       with hermitian hypothesis, that is only half of the frequency
       plane is provided (due to the redundancy of Fourier transform
       of real signal). It's apply only if ``psf`` and/or ``reg`` are
       provided as transfer function.  For the hermitian property see
       ``uft`` module or ``cupy.fft.rfftn``.
    clip : boolean, optional
       True by default. If True, pixel values of the result above 1 or
       under -1 are thresholded for skimage pipeline compatibility.

    Returns
    -------
    im_deconv : (M, N) ndarray
       The deconvolved image.

    Examples
    --------
    >>> import cupy as cp
    >>> from skimage import color, data, restoration
    >>> img = color.rgb2gray(data.astronaut())
    >>> from scipy.signal import convolve2d
    >>> psf = cp.ones((5, 5)) / 25
    >>> img = convolve2d(img, psf, 'same')
    >>> img += 0.1 * img.std() * cp.random.standard_normal(img.shape)
    >>> deconvolved_img = restoration.wiener(img, psf, 1100)

    Notes
    -----
    This function applies the Wiener filter to a noisy and degraded
    image by an impulse response (or PSF). If the data model is

    .. math:: y = Hx + n

    where :math:`n` is noise, :math:`H` the PSF and :math:`x` the
    unknown original image, the Wiener filter is

    .. math::
       \hat x = F^\dagger (|\Lambda_H|^2 + \lambda |\Lambda_D|^2)
       \Lambda_H^\dagger F y

    where :math:`F` and :math:`F^\dagger` are the Fourier and inverse
    Fourier transforms respectively, :math:`\Lambda_H` the transfer
    function (or the Fourier transform of the PSF, see [Hunt] below)
    and :math:`\Lambda_D` the filter to penalize the restored image
    frequencies (Laplacian by default, that is penalization of high
    frequency). The parameter :math:`\lambda` tunes the balance
    between the data (that tends to increase high frequency, even
    those coming from noise), and the regularization.

    These methods are then specific to a prior model. Consequently,
    the application or the true image nature must corresponds to the
    prior model. By default, the prior model (Laplacian) introduce
    image smoothness or pixel correlation. It can also be interpreted
    as high-frequency penalization to compensate the instability of
    the solution with respect to the data (sometimes called noise
    amplification or "explosive" solution).

    Finally, the use of Fourier space implies a circulant property of
    :math:`H`, see [Hunt].

    References
    ----------
    .. [1] François Orieux, Jean-François Giovannelli, and Thomas
           Rodet, "Bayesian estimation of regularization and point
           spread function parameters for Wiener-Hunt deconvolution",
           J. Opt. Soc. Am. A 27, 1593-1607 (2010)

           https://www.osapublishing.org/josaa/abstract.cfm?URI=josaa-27-7-1593

           http://research.orieux.fr/files/papers/OGR-JOSA10.pdf

    .. [2] B. R. Hunt "A matrix theory proof of the discrete
           convolution theorem", IEEE Trans. on Audio and
           Electroacoustics, vol. au-19, no. 4, pp. 285-288, dec. 1971
    """
    if reg is None:
        reg, _ = uft.laplacian(image.ndim, image.shape, is_real=is_real)
    if not cp.iscomplexobj(reg):
        reg = uft.ir2tf(reg, image.shape, is_real=is_real)

    if psf.shape != reg.shape:
        trans_func = uft.ir2tf(psf, image.shape, is_real=is_real)
    else:
        trans_func = psf

    atf2 = cp.abs(trans_func)
    atf2 *= atf2
    areg2 = cp.abs(reg)
    areg2 *= areg2
    wiener_filter = cp.conj(trans_func) / (atf2 + balance * areg2)
    if is_real:
        deconv = uft.uirfft2(wiener_filter * uft.urfft2(image),
                             shape=image.shape)
    else:
        deconv = uft.uifft2(wiener_filter * uft.ufft2(image))

    if clip:
        deconv[deconv > 1] = 1
        deconv[deconv < -1] = -1

    return deconv
コード例 #11
0
def hilbert(x, N=None, axis=-1):
    """
    Compute the analytic signal, using the Hilbert transform.

    The transformation is done along the last axis by default.

    Parameters
    ----------
    x : array_like
        Signal data.  Must be real.
    N : int, optional
        Number of Fourier components.  Default: ``x.shape[axis]``
    axis : int, optional
        Axis along which to do the transformation.  Default: -1.

    Returns
    -------
    xa : ndarray
        Analytic signal of `x`, of each 1-D array along `axis`

    Notes
    -----
    The analytic signal ``x_a(t)`` of signal ``x(t)`` is:

    .. math:: x_a = F^{-1}(F(x) 2U) = x + i y

    where `F` is the Fourier transform, `U` the unit step function,
    and `y` the Hilbert transform of `x`. [1]_

    In other words, the negative half of the frequency spectrum is zeroed
    out, turning the real-valued signal into a complex signal.  The Hilbert
    transformed signal can be obtained from ``cp.imag(hilbert(x))``, and the
    original signal from ``cp.real(hilbert(x))``.

    Examples
    ---------
    In this example we use the Hilbert transform to determine the amplitude
    envelope and instantaneous frequency of an amplitude-modulated signal.

    >>> import cupy as cp
    >>> import matplotlib.pyplot as plt
    >>> from cusignal import hilbert, chirp

    >>> duration = 1.0
    >>> fs = 400.0
    >>> samples = int(fs*duration)
    >>> t = cp.arange(samples) / fs

    We create a chirp of which the frequency increases from 20 Hz to 100 Hz and
    apply an amplitude modulation.

    >>> signal = chirp(t, 20.0, t[-1], 100.0)
    >>> signal *= (1.0 + 0.5 * cp.sin(2.0*cp.pi*3.0*t) )

    The amplitude envelope is given by magnitude of the analytic signal. The
    instantaneous frequency can be obtained by differentiating the
    instantaneous phase in respect to time. The instantaneous phase corresponds
    to the phase angle of the analytic signal.

    >>> analytic_signal = hilbert(signal)
    >>> amplitude_envelope = cp.abs(analytic_signal)
    >>> instantaneous_phase = cp.unwrap(cp.angle(analytic_signal))
    >>> instantaneous_frequency = (cp.diff(instantaneous_phase) /
    ...                            (2.0*cp.pi) * fs)

    >>> fig = plt.figure()
    >>> ax0 = fig.add_subplot(211)
    >>> ax0.plot(cp.asnumpy(t), cp.asnumpy(signal), label='signal')
    >>> ax0.plot(cp.asnumpy(t), cp.asnumpy(amplitude_envelope), \
        label='envelope')
    >>> ax0.set_xlabel("time in seconds")
    >>> ax0.legend()
    >>> ax1 = fig.add_subplot(212)
    >>> ax1.plot(t[1:], instantaneous_frequency)
    >>> ax1.set_xlabel("time in seconds")
    >>> ax1.set_ylim(0.0, 120.0)

    References
    ----------
    .. [1] Wikipedia, "Analytic signal".
           https://en.wikipedia.org/wiki/Analytic_signal
    .. [2] Leon Cohen, "Time-Frequency Analysis", 1995. Chapter 2.
    .. [3] Alan V. Oppenheim, Ronald W. Schafer. Discrete-Time Signal
           Processing, Third Edition, 2009. Chapter 12.
           ISBN 13: 978-1292-02572-8

    """
    x = cp.asarray(x)
    if cp.iscomplexobj(x):
        raise ValueError("x must be real.")
    if N is None:
        N = x.shape[axis]
    if N <= 0:
        raise ValueError("N must be positive.")

    Xf = cp.fft.fft(x, N, axis=axis)
    h = _hilbert_kernel(size=N)

    if x.ndim > 1:
        ind = [cp.newaxis] * x.ndim
        ind[axis] = slice(None)
        h = h[tuple(ind)]
    x = cp.fft.ifft(Xf * h, axis=axis)
    return x
コード例 #12
0
                vect = cp.zeros(self.shape, dtype=self.dtype)
                vect[idxs] = v
                yield vect

    def randn(self):
        return cp.array(
            cp.random.randn(*self.shape)).astype(self.dtype) + 1.0j * cp.array(
                cp.random.randn(*self.shape)).astype(self.dtype)

    def _inner_prod(self, x, y):
        return cp.real(cp.dot(cp.conj(cp.ravel(x)), cp.ravel(y)))

    def _covector(self, x):
        return cp.conj(x)


VSpace.register(
    cp.ndarray,
    lambda x: ComplexArrayVSpace(x) if cp.iscomplexobj(x) else ArrayVSpace(x),
)

float_types = [float, cp.float64, cp.float32, cp.float16]

for type_ in float_types:
    ArrayVSpace.register(type_)

complex_types = [complex, np.complex64, np.complex128]

for type_ in complex_types:
    ComplexArrayVSpace.register(type_)