コード例 #1
0
 def take_lagr(self, psi, phi, data, h, e, lamd, mu, alpha, rho, tau,
               model):
     lagr = cp.zeros(7, dtype="float32")
     # Lagrangian ptycho part by angles partitions
     for k in range(0, self.ptheta):
         ids = np.arange(k * self.tomoshapep[0],
                         (k + 1) * self.tomoshapep[0])
         self.cl_ptycho.setobj(self.scan[:, ids].data.ptr,
                               self.prb.data.ptr)
         fpsi = self.fwd_ptycho(psi[ids])
         datap = cp.array(data[ids])
         if (model == 'poisson'):
             lagr[0] += cp.sum(
                 cp.abs(fpsi)**2 - 2 * datap * self.mlog(cp.abs(fpsi)) -
                 (datap - 2 * datap * self.mlog(cp.sqrt(datap))))
         if (model == 'gaussian'):
             lagr[0] += cp.linalg.norm(cp.abs(fpsi) - cp.sqrt(datap))**2
     lagr[1] = alpha * cp.sum(
         np.sqrt(cp.real(cp.sum(phi * cp.conj(phi), 0))))
     lagr[2] = 2 * cp.sum(cp.real(cp.conj(lamd) * (h - psi)))
     lagr[3] = rho * cp.linalg.norm(h - psi)**2
     lagr[4] = 2 * cp.sum(np.real(cp.conj(mu) * (e - phi)))
     lagr[5] = tau * cp.linalg.norm(e - phi)**2
     lagr[6] = cp.sum(lagr[0:5])
     return lagr
コード例 #2
0
def cupy_signal(signal):
    amp = cp.sqrt(cp.real(signal * cp.conj(signal)))
    phase = cp.angle(signal)
    real = cp.real(signal)
    imag = cp.imag(signal)

    return amp, phase, real, imag
コード例 #3
0
def besselj__n(n, z):
    if n<0:
        return -1**(-n) * besselj__n(-n, z)
    if n==0:
        return cupyx.scipy.special.j0(cp.real(z))
    elif n==1:
        return cupyx.scipy.special.j1(cp.real(z))
    elif n>=2:
        return 2*(n-1)*besselj__n(int(n)-1, z)/cp.real(z) - besselj__n(int(n)-2, z)
コード例 #4
0
def julia_set_cp(zs, phase):
    ns = cp.zeros_like(Z, dtype=cp.float32)
    for i in range(n_iteration):
        # cupy doesn't support complex in where, we need to decompose it to real and img parts
        zs_real = cp.where(
            cp.abs(zs) < R, cp.real(zs**2 + 0.7885 * cp.exp(phase)),
            cp.real(zs))
        zs_imag = cp.where(
            cp.abs(zs) < R, cp.imag(zs**2 + 0.7885 * cp.exp(phase)),
            cp.imag(zs))
        zs = zs_real + 1j * zs_imag
        not_diverged = cp.abs(zs) < R
        ns = ns + not_diverged.astype(cp.float32)

    return ns, zs
コード例 #5
0
ファイル: sr.py プロジェクト: wangxinhua123/Level1
 def test_extend_conv(self):
     objsize = self.pf.shape
     inisize = self.gaussff.shape
     imxsize = objsize[0] + inisize[0]
     imysize = objsize[1] + inisize[1]
     cp.cuda.Device(0).use
     objtmp = cp.ndarray([imxsize, imysize])
     initmp = cp.ndarray([imxsize, imysize])
     objtmp[0:objsize[0], 0:objsize[1]] = self.pf
     initmp[0:inisize[0], 0:inisize[1]] = self.gaussff
     objtmp = self.shift(objtmp, imxsize / 2 - objsize[0] / 2,
                         imysize / 2 - objsize[1] / 2)
     initmp = self.shift(initmp, imxsize / 2 - inisize[0] / 2,
                         imysize / 2 - inisize[1] / 2)
     objfft = self.shift(
         cp.fft.fft2(objtmp) * cp.sqrt(imxsize * imysize), imxsize / 2,
         imysize / 2)
     inifft = self.shift(
         cp.fft.fft2(initmp) * cp.sqrt(imxsize * imysize), imxsize / 2,
         imysize / 2)
     convfft = self.shift(
         cp.fft.ifft2(self.shift(objfft * inifft, imxsize / 2,
                                 imysize / 2)), imxsize / 2, imysize / 2)
     self.conv = cp.real(convfft)
     return float(self.conv)
コード例 #6
0
    def run(self):
        max_shape = self._find_max_shape()

        # compute FT, assuming they are the same size
        fft1 = cp.asarray(self.image1, dtype=cp.complex64)
        fft2 = cp.asarray(self.image2, dtype=cp.complex64)

        plan = get_fft_plan(fft1, value_type="C2C")
        fft1 = fftn(fft1, overwrite_x=True, plan=plan)
        fft2 = fftn(fft2, overwrite_x=True, plan=plan)

        print(f"shape: {fft1.shape}, dtype: {fft1.dtype}")

        @cp.fuse
        def normalize(fft_image):
            re, im = cp.real(fft_image), cp.imag(fft_image)
            length = cp.sqrt(re * re + im * im)
            return fft_image / length

        fft1 = normalize(fft1)
        fft2 = cp.conj(normalize(fft2))

        # phase correlation spectrum
        pcm = fft1 * fft2
        pcm = ifftn(pcm, overwrite_x=True, plan=plan)
        pcm = cp.real(pcm)

        from skimage.morphology import disk
        from skimage.filters import median
        pcm = cp.asnumpy(pcm)
        pcm = median(pcm, disk(3))
        pcm = cp.asarray(pcm)

        peak_list = self._extract_correlation_peaks(pcm)
コード例 #7
0
 def Hpower(self, x):
     x = np.fft.ifft2(self.H *
                      np.fft.fft2(np.expand_dims(x, -1), axes=(0, 1)),
                      axes=(0, 1))
     x = np.sum(self.mask * self.crop(np.real(x)), 2)
     x = self.pad(x)
     return x
コード例 #8
0
def test_wiener(dtype):
    psf = np.ones((5, 5)) / 25
    data = signal.convolve2d(cp.asnumpy(test_img), psf, "same")
    np.random.seed(0)
    data += 0.1 * data.std() * np.random.standard_normal(data.shape)

    psf = cp.asarray(psf, dtype=dtype)
    data = cp.asarray(data, dtype=dtype)

    deconvolved = restoration.wiener(data, psf, 0.05)
    assert deconvolved.dtype == dtype

    path = fetch('restoration/tests/camera_wiener.npy')
    rtol = 1e-5 if dtype == np.float32 else 1e-12
    atol = rtol
    cp.testing.assert_allclose(deconvolved,
                               np.load(path),
                               rtol=rtol,
                               atol=atol)

    _, laplacian = uft.laplacian(2, data.shape)
    otf = uft.ir2tf(psf, data.shape, is_real=False)
    deconvolved = restoration.wiener(data,
                                     otf,
                                     0.05,
                                     reg=laplacian,
                                     is_real=False)
    cp.testing.assert_allclose(cp.real(deconvolved),
                               np.load(path),
                               rtol=rtol,
                               atol=atol)
コード例 #9
0
    def log_likelihood_ratio(self):
        """ Calculates the real part of log-likelihood value

        Returns
        -------
        float: The real part of the log likelihood

        """
        waveform_polarizations = self.waveform_generator.frequency_domain_strain(
            self.parameters)
        if waveform_polarizations is None:
            return np.nan_to_num(-np.inf)

        d_inner_h = 0
        h_inner_h = 0

        for interferometer in self.interferometers:
            d_inner_h_ifo, h_inner_h_ifo = self.calculate_snrs(
                interferometer=interferometer,
                waveform_polarizations=waveform_polarizations,
            )
            d_inner_h += d_inner_h_ifo
            h_inner_h += h_inner_h_ifo

        if self.distance_marginalization:
            log_l = self.distance_marglinalized_likelihood(d_inner_h=d_inner_h,
                                                           h_inner_h=h_inner_h)
        else:
            log_l = -2 / self.duration * (h_inner_h - 2 * xp.real(d_inner_h))
        return float(log_l.real)
コード例 #10
0
    def grad_ptycho(self, data, psi, prb, scan, zlamd, rho, niter):
        """Gradient solver for the ptychography problem |||FQpsi|-sqrt(data)||^2_2 + rho||psi-zlamd||^2_2"""
        # minimization functional
        def minf(fpsi, psi):
            f = cp.linalg.norm(cp.abs(fpsi) - cp.sqrt(data))**2
            if(rho > 0):
                f += rho*cp.linalg.norm(psi-zlamd)**2
            return f

        for i in range(niter):
            # compute the gradient
            fpsi = self.fwd_ptycho(psi, prb, scan)

            gradpsi = self.adj_ptycho(
                fpsi - cp.sqrt(data)*fpsi/(cp.abs(fpsi)+1e-32), prb, scan)

            # normalization coefficient for skipping the line search procedure
            afpsi = self.adj_ptycho(fpsi, prb, scan)
            norm_coeff = cp.real(cp.sum(psi*cp.conj(afpsi)) /
                                 (cp.sum(afpsi*cp.conj(afpsi))+1e-32))

            if(rho > 0):
                gradpsi += rho*(psi-zlamd)
                gradpsi *= min(1/rho, norm_coeff)/2
            else:
                gradpsi *= norm_coeff/2
            # update psi
            psi = psi - 0.5*gradpsi
            # check convergence
            # print(f'{i}) {minf(fpsi, psi).get():.2e} ')

        return psi
コード例 #11
0
ファイル: quantization.py プロジェクト: bbrzycki/setigen
    def quantize(self, voltages, custom_stds=None):
        """
        Quantize input complex voltages. Cache voltage means and standard deviations, per 
        polarization and per antenna.
        
        Parameters
        ----------
        voltages : array
            Array of complex voltages
        custom_stds : float, list, or array
            Custom standard deviation to use for scaling, instead of automatic calculation. Each quantizer will go
            from custom_stds values to self.target_std. Can either be a single value or an array-like object of length
            2, to set the custom standard deviation for real and imaginary parts.
            
        Returns
        -------
        q_voltages : array
            Array of complex quantized voltages
        """
        try:
            assert len(custom_stds) == 2
        except TypeError:
            custom_stds = [custom_stds] * 2

        q_r = self.quantizer_r.quantize(xp.real(voltages),
                                        custom_std=custom_stds[0])
        q_i = self.quantizer_i.quantize(xp.imag(voltages),
                                        custom_std=custom_stds[1])

        self.stats_cache_r = self.quantizer_r.stats_cache
        self.stats_cache_i = self.quantizer_i.stats_cache

        return q_r + q_i * 1j
コード例 #12
0
def test_masked_registration_random_masks_non_equal_sizes():
    """masked_register_translation should be able to register
    translations between images that are not the same size even
    with random masks."""
    # See random number generator for reproducible results
    np.random.seed(23)

    reference_image = cp.asarray(camera())
    shift = (-7, 12)
    shifted = cp.real(
        fft.ifft2(fourier_shift(fft.fft2(reference_image), shift)))

    # Crop the shifted image
    shifted = shifted[64:-64, 64:-64]

    # Random masks with 75% of pixels being valid
    ref_mask = np.random.choice([True, False],
                                reference_image.shape,
                                p=[3 / 4, 1 / 4])
    shifted_mask = np.random.choice([True, False],
                                    shifted.shape,
                                    p=[3 / 4, 1 / 4])

    reference_image = cp.asarray(reference_image)
    shifted = cp.asarray(shifted)
    measured_shift = masked_register_translation(
        reference_image,
        shifted,
        cp.ones(ref_mask.shape, dtype=ref_mask.dtype),
        cp.ones(shifted_mask.shape, dtype=shifted_mask.dtype),
    )
    cp.testing.assert_array_equal(measured_shift, -cp.asarray(shift))
コード例 #13
0
def test_masked_registration_random_masks():
    """masked_register_translation should be able to register translations
    between images even with random masks."""
    # See random number generator for reproducible results
    np.random.seed(23)

    reference_image = cp.array(camera())
    shift = (-7, 12)
    shifted = cp.real(fft.ifft2(fourier_shift(
        fft.fft2(reference_image), shift)))

    # Random masks with 75% of pixels being valid
    ref_mask = np.random.choice(
        [True, False], reference_image.shape, p=[3 / 4, 1 / 4])
    shifted_mask = np.random.choice(
        [True, False], shifted.shape, p=[3 / 4, 1 / 4])

    ref_mask = cp.asarray(ref_mask)
    shifted_mask = cp.asarray(shifted_mask)

    measured_shift = masked_register_translation(reference_image,
                                                 shifted,
                                                 reference_mask=ref_mask,
                                                 moving_mask=shifted_mask)

    cp.testing.assert_array_equal(measured_shift, -cp.asarray(shift))
コード例 #14
0
def test_wiener():
    psf = np.ones((5, 5)) / 25
    data = convolve2d(test_img.get(), psf, "same")
    np.random.seed(0)
    data += 0.1 * data.std() * np.random.standard_normal(data.shape)

    psf = cp.asarray(psf)
    data = cp.asarray(data)

    deconvolved = restoration.wiener(data, psf, 0.05)

    if have_fetch:
        path = fetch("restoration/tests/camera_wiener.npy")
    else:
        path = pjoin(dirname(abspath(__file__)), "camera_wiener.npy")
    cp.testing.assert_allclose(deconvolved, np.load(path), rtol=1e-3)

    _, laplacian = uft.laplacian(2, data.shape)
    otf = uft.ir2tf(psf, data.shape, is_real=False)
    deconvolved = restoration.wiener(data,
                                     otf,
                                     0.05,
                                     reg=laplacian,
                                     is_real=False)
    cp.testing.assert_allclose(cp.real(deconvolved), np.load(path), rtol=1e-3)
コード例 #15
0
ファイル: solver.py プロジェクト: arbrefleur/tomocg
 def solve_reg(self, u, mu, tau, alpha):
     z = self.fwd_reg(u) + mu / tau
     # Soft-thresholding
     za = cp.sqrt(cp.real(cp.sum(z * cp.conj(z), 0)))
     z[:, za <= alpha / tau] = 0
     z[:, za > alpha/tau] -= alpha/tau * \
         z[:, za > alpha/tau]/(za[za > alpha/tau])
     return z
コード例 #16
0
ファイル: TVmin.py プロジェクト: yohschang/RBC_analysis
    def positive_constrain(self,dn_3D,dF_3D_2):
        n_med = 1.334
        wavelength = 532
        k2 = (2*np.pi*n_med/wavelength)**2
        n_med2 = n_med**2
        # dF_3D = cupy.multiply(cupy.subtract(cupy.divide(cupy.multiply(dn_3D,dn_3D),n_med2),1),-k2)
        # dF_3D = cupy.fft.fftn(dF_3D)
        # dF_3D[cupy.not_equal(dF_3D_2,0)] = dF_3D_2[cupy.not_equal(dF_3D_2,0)]
        # dF_3D   = cupy.fft.ifftn(dF_3D)    
        # dn_3D   = cupy.multiply(cupy.sqrt(cupy.add(cupy.divide(dF_3D,-k2), 1)), n_med)
        
        # dn_3D =  cupy.fft.fftshift(dn_3D);
        dn_3D[cupy.less(cupy.real(dn_3D),n_med)] = n_med+1j*cupy.imag(dn_3D[cupy.less(cupy.real(dn_3D),n_med)])
        dn_3D[cupy.less(cupy.imag(dn_3D),0)]     = cupy.real(dn_3D[cupy.less(cupy.imag(dn_3D), 0)])


        return dn_3D
コード例 #17
0
def compensate_dispersion_3D(spectra: np.ndarray,
                             calibration: dict) -> cp.array:

    calib = cp.asarray(calibration['dispersion'])

    Pdispersion = cp.asarray(calib * complex(0, 1) * Arguments.dispersion)

    return cp.real(hilbert_3D(spectra) * cp.exp(Pdispersion))
コード例 #18
0
def spectrum_shift_2D(Volume_spectra: cp.ndarray) -> cp.ndarray:

    Volume_spectra = hilbert_2D(Volume_spectra)

    spectrum_shift = cp.exp(complex(0,1) * cp.arange( start=0, stop=Arguments.dimension[2], dtype=cp.float ) * Arguments.shift)

    Volume_spectra = cp.multiply(Volume_spectra, spectrum_shift)

    return cp.real(Volume_spectra)
コード例 #19
0
def compensate_dispersion_2D(Volume_spectra: cp.ndarray, dispersion) -> cp.ndarray:

    Pdispersion = cp.asarray( dispersion * complex(0,1) * Arguments.dispersion )

    compensated_spectra =  hilbert_2D(Volume_spectra) * cp.exp( Pdispersion )

    compensated_spectra = cp.real(compensated_spectra)

    return compensated_spectra
コード例 #20
0
 def inner_with_domain(sub_loads, ignore_domain=False):
     full_loads = cp.zeros(shape, dtype=dtype)
     full_loads[domain] = sub_loads
     fft_loads = forward_trans(full_loads, s=input_shape)
     full = norm_inv * cp.real(backward_trans(fft_loads * fft_im))
     full = full[:full_loads.shape[0], :full_loads.shape[1]]
     if ignore_domain:
         return full
     return full[domain]
コード例 #21
0
def spectrum_shift_3D(temp: cp.ndarray):

    spectrum_shift = cp.exp(
        complex(0, 1) * cp.arange(start=0, stop=Arguments.dimension[2]) *
        shift)

    temp = cp.multiply(temp, spectrum_shift)

    temp = cp.real(temp)
コード例 #22
0
 def distance_marglinalized_likelihood(self, d_inner_h, h_inner_h):
     log_l = (
         -2 / self.duration *
         xp.real(h_inner_h * self.parameters["luminosity_distance"]**2 /
                 self.distance_array**2 -
                 2 * d_inner_h * self.parameters["luminosity_distance"] /
                 self.distance_array))
     log_l = xp.log(xp.sum(xp.exp(log_l) * self.distance_prior_array))
     return log_l
コード例 #23
0
    def Hadj(self, x):
        x = np.expand_dims(x, -1)
        x = x * self.mask
        x = self.pad(x)

        x = np.fft.fft2(x, axes=(0, 1))
        x = np.fft.ifft2(self.Hconj * x, axes=(0, 1))
        x = np.real(x)
        return x
コード例 #24
0
def Compute_fiedler_vector(G):
    nrom_laplacian_matrics = nx.normalized_laplacian_matrix(G,weight='weight')
    nrom_laplacian_matrics_cupy=cp.asarray(nrom_laplacian_matrics.toarray())
    w,v=cp.linalg.eigh(nrom_laplacian_matrics_cupy)
    #algebraic_connectivity,fiedler_vector=power_iteration(nrom_laplacian_matrics.)
    algebraic_connectivity = w[1] # Neat measure of how tight the graph is
    fiedler_vector = v[:,1].T
    fiedler_vector=torch.Tensor(cp.asarray(cp.real(fiedler_vector)))
    algebraic_connectivity=torch.Tensor(cp.asarray(algebraic_connectivity))
    return algebraic_connectivity, fiedler_vector
コード例 #25
0
def magnetization(s, B, d):
    sz = cp.diag([Sz(conf, 0) for conf in range(0, d)])
    #   sz=cp.array([[0,1],[1,0]])
    mag = cp.array(0., dtype=np.float32)
    for i_bond in range(2):
        sB = cp.tensordot(cp.diag(s[np.mod(i_bond - 1, 2)]),
                          B[i_bond],
                          axes=(1, 1))
        C = cp.tensordot(sB, cp.conj(sB), axes=([0, 2], [0, 2]))
        mag += cp.real(cp.tensordot(C, sz, axes=([0, 1], [0, 1])).get())
    return mag * 0.5
コード例 #26
0
 def take_lagr(self, data, psi, prb, scan, z, lamd, rho):
     """Compute Lagrangian"""
     lagr = np.zeros(4, dtype='float32')
     for k in range(self.nnodes):
         ids = cp.arange(k*self.nscan, (k+1)*self.nscan)
         lagr[0] += cp.linalg.norm(cp.abs(self.fwd_ptycho(psi[k],
                                   prb, scan[:, ids]))-cp.sqrt(data[ids]))**2
     lagr[1] = 2*cp.sum(cp.real(cp.conj(lamd)*(psi-z)))
     lagr[2] = rho*cp.linalg.norm(psi-z)**2
     lagr[3] = cp.sum(lagr[:3])
     return lagr
コード例 #27
0
 def inner_no_domain(full_loads):
     full_loads = cp.asarray(full_loads)
     if full_loads.shape == shape:
         flat = False
     else:
         full_loads = cp.reshape(full_loads, loads.shape)
         flat = True
     fft_loads = forward_trans(full_loads, s=input_shape)
     full = norm_inv * cp.real(backward_trans(fft_loads * fft_im))
     full = full[:full_loads.shape[0], :full_loads.shape[1]]
     if flat:
         full = full.flatten()
     return full
コード例 #28
0
def dftUpsample(imageCorr, upsampleFactor, xyShift):
    """
    This performs a matrix multiply DFT around a small neighboring region of the inital
    correlation peak. By using the matrix multiply DFT to do the Fourier upsampling, the
    efficiency is greatly improved. This is adapted from the subfuction dftups found in
    the dftregistration function on the Matlab File Exchange.

    https://www.mathworks.com/matlabcentral/fileexchange/18401-efficient-subpixel-image-registration-by-cross-correlation

    The matrix multiplication DFT is from:

    Manuel Guizar-Sicairos, Samuel T. Thurman, and James R. Fienup, "Efficient subpixel
    image registration algorithms," Opt. Lett. 33, 156-158 (2008).
    http://www.sciencedirect.com/science/article/pii/S0045790612000778

    Args:
        imageCorr (complex valued ndarray):
            Correlation image between two images in Fourier space.
        upsampleFactor (int):
            Scalar integer of how much to upsample.
        xyShift (list of 2 floats):
            Coordinates in the UPSAMPLED GRID around which to upsample.
            These must be single-pixel IN THE UPSAMPLED GRID

    Returns:
        (ndarray):
            Upsampled image from region around correlation peak.
    """
    imageSize = imageCorr.shape
    pixelRadius = 1.5
    numRow = np.ceil(pixelRadius * upsampleFactor)
    numCol = numRow

    colKern = cp.exp(
        (-1j * 2 * cp.pi / (imageSize[1] * upsampleFactor))
        * cp.outer(
            (cp.fft.ifftshift((cp.arange(imageSize[1]))) - cp.floor(imageSize[1] / 2)),
            (cp.arange(numCol) - xyShift[1]),
        )
    )

    rowKern = cp.exp(
        (-1j * 2 * cp.pi / (imageSize[0] * upsampleFactor))
        * cp.outer(
            (cp.arange(numRow) - xyShift[0]),
            (cp.fft.ifftshift(cp.arange(imageSize[0])) - cp.floor(imageSize[0] / 2)),
        )
    )

    imageUpsample = cp.real(rowKern @ imageCorr @ colKern)
    return imageUpsample
コード例 #29
0
def corrszsz(dist, s, B, d):
    sz = cp.diag([Sz(conf, 0) for conf in range(0, d)])
    corr = cp.array(0., dtype=np.float32)
    if dist == 0:
        sz2 = cp.tensordot(sz, sz, axes=(1, 0))
        for i_bond in range(2):
            sB = cp.tensordot(cp.diag(s[np.mod(i_bond - 1, 2)]),
                              B[i_bond],
                              axes=(1, 1))
            C = cp.tensordot(sB, cp.conj(sB), axes=([0, 2], [0, 2]))
            corr += cp.real(
                cp.tensordot(C, sz2, axes=([0, 1], [0, 1])) -
                cp.tensordot(C, sz, axes=([0, 1], [0, 1])) *
                cp.tensordot(C, sz, axes=([0, 1], [0, 1])))
        return 0.5 * corr

    if dist != 0:
        dist = cp.abs(dist)
        for i_bond in range(2):
            sB = cp.tensordot(cp.diag(s[np.mod(i_bond - 1, 2)]),
                              B[i_bond],
                              axes=(1, 1))
            C = cp.tensordot(sB, cp.conj(sB), axes=(0, 0))
            R = cp.tensordot(C, sz, axes=([0, 2], [0, 1]))
            mean1 = cp.trace(R)
            for i in range(dist - 1):
                T = cp.tensordot(R, B[np.mod(i_bond + 1 + i, 2)], axes=(0, 1))
                T = cp.tensordot(T,
                                 cp.conj(B[np.mod(i_bond + 1 + i, 2)]),
                                 axes=(0, 1))
                R = cp.trace(T, axis1=0, axis2=2)
            C = cp.tensordot(B[cp.mod(i_bond + dist, 2)],
                             cp.conj(B[cp.mod(i_bond + dist, 2)]),
                             axes=(2, 2))
            L = cp.tensordot(R, C, axes=([0, 1], [1, 3]))
            corr += cp.real(
                cp.tensordot(L, sz, axes=([0, 1], [0, 1])) - mean1 * mean1)
        return 0.5 * corr
コード例 #30
0
def besselFirstKindOnGPU(order, arg):
    import cupy as cp
    from cupyx.scipy import special
    if order == 0:
        bess = special.j0(arg)
    elif order == 1:
        bess = special.j1(arg)
    elif order >= 2:
        bess = 2 * (float(order) - 1) * besselFirstKindOnGPU(
            int(order) - 1, arg) / cp.real(arg) - besselFirstKindOnGPU(
                int(order) - 2, arg)
        if not cp.all(arg):
            zero_idx = cp.where(arg == 0)
            bess[zero_idx] = 0
    return bess