예제 #1
0
 def test_22(self):
     N = 32
     M = 4
     Nd = 8
     D = cp.random.randn(Nd, Nd, M)
     D /= cp.sqrt(cp.sum(D**2, axis=(0, 1)))
     X0 = cp.zeros((N, N, M))
     xr = cp.random.randn(N, N, M)
     xp = cp.abs(xr) > 3
     X0[xp] = cp.random.randn(X0[xp].size)
     S = cp.sum(sl.fftconv(D, X0), axis=2)
     lmbda = 1e-3
     opt = cbpdn.ConvBPDN.Options(
         {'Verbose': False, 'MaxMainIter': 500, 'RelStopTol': 1e-5,
          'rho': 5e-1, 'AutoRho': {'Enabled': False}})
     bp = cbpdn.ConvBPDN(D, S, lmbda, opt)
     Xp = bp.solve()
     epsilon = cp.linalg.norm(bp.reconstruct(Xp).squeeze() - S)
     opt = cbpdn.ConvMinL1InL2Ball.Options(
         {'Verbose': False, 'MaxMainIter': 500, 'RelStopTol': 1e-5,
          'rho': 2e2, 'RelaxParam': 1.0, 'AutoRho': {'Enabled': False}})
     bc = cbpdn.ConvMinL1InL2Ball(D, S, epsilon=epsilon, opt=opt)
     Xc = bc.solve()
     assert cp.linalg.norm(Xp - Xc) / cp.linalg.norm(Xp) < 1e-3
     assert(cp.abs(cp.linalg.norm(Xp.ravel(), 1) -
                   cp.linalg.norm(Xc.ravel(), 1)) < 1e-3)
예제 #2
0
 def setup_method(self, method):
     np.random.seed(12345)
     N = 32
     L = 20
     self.U = cp.ones((N, N, N))
     self.U[:, 0:(old_div(N, 2))] = -1
     self.V = cp.asarray(np.random.randn(N, N, N))
     t = cp.sort(cp.abs(self.V).ravel())[self.V.size - L]
     self.V[cp.abs(self.V) < t] = 0
     self.D = self.U + self.V
예제 #3
0
def test_feret_diameter_max():
    # comparator result is based on SAMPLE from manually-inspected computations
    comparator_result = 18
    test_result = regionprops(SAMPLE)[0].feret_diameter_max
    assert cp.abs(test_result - comparator_result) < 1
    # square, test that Feret diameter is sqrt(2) * square side
    img = cp.zeros((20, 20), dtype=cp.uint8)
    img[2:-2, 2:-2] = 1
    feret_diameter_max = regionprops(img)[0].feret_diameter_max
    assert cp.abs(feret_diameter_max - 16 * math.sqrt(2)) < 1
예제 #4
0
def show_f(x, title='Figure'):
    """
    Shows Fourier Spectrum as well as the inverse Fourier transform. For debugging.
    """
    plt.suptitle(title)
    plt.subplot(121)
    plt.imshow(cp.asnumpy(cp.log(cp.abs(x) + 1)))

    plt.subplot(122)
    plt.imshow(cp.asnumpy(cp.abs(cp_inverse_fourier(x))))

    plt.show()
예제 #5
0
파일: generator.py 프로젝트: yanfuzhou/eGPU
    def multivariate_normal(self, mean, cov, size=None, check_valid='ignore',
                            tol=1e-8, dtype=float):
        """(experimental) Returns an array of samples drawn from the
        multivariate normal distribution.

        .. seealso::
            :func:`cupy.random.multivariate_normal` for full documentation,
            :meth:`numpy.random.RandomState.multivariate_normal
            <numpy.random.mtrand.RandomState.multivariate_normal>`
        """
        util.experimental('cupy.random.RandomState.multivariate_normal')
        mean = cupy.asarray(mean, dtype=dtype)
        cov = cupy.asarray(cov, dtype=dtype)
        if size is None:
            shape = ()
        elif isinstance(size, cupy.util.collections_abc.Sequence):
            shape = tuple(size)
        else:
            shape = size,

        if mean.ndim != 1:
            raise ValueError('mean must be 1 dimensional')
        if (cov.ndim != 2) or (cov.shape[0] != cov.shape[1]):
            raise ValueError('cov must be 2 dimensional and square')
        if len(mean) != len(cov):
            raise ValueError('mean and cov must have same length')
        shape += (len(mean),)

        x = self.standard_normal(size=shape, dtype=dtype)

        u, s, v = cupy.linalg.svd(cov)

        if check_valid != 'ignore':
            if check_valid != 'warn' and check_valid != 'raise':
                raise ValueError(
                    'check_valid must equal \'warn\', \'raise\', or '
                    '\'ignore\'')

            a = cupy.dot(v.T * s, v)
            b = cov
            psd = cupy.all(cupy.abs(a-b) <= tol*(1+cupy.abs(b)))
            if not psd:
                if check_valid == 'warn':
                    warnings.warn(
                        'covariance is not symmetric positive-semidefinite.',
                        RuntimeWarning)
                else:
                    raise ValueError(
                        'covariance is not symmetric positive-semidefinite.')

        x = cupy.dot(x, cupy.sqrt(s)[:, None] * v)
        x += mean
        return x
예제 #6
0
def corny(x, y, z, t):
    depth = 2
    R = 3
    mod = R / 3
    r = cp.abs(z) < R
    for _ in range(depth):
        r &= (x % mod < mod / 3) | (x % mod > 2 * mod / 3)
        r &= (y % mod < mod / 3) | (y % mod > 2 * mod / 3)
        r &= (z % mod < mod / 3) | (z % mod > 2 * mod / 3)
        mod /= 3
    return 12 * r + z * (z > R) / 20, 7 * r + cp.abs(z) * (
        z < 0) / 20, 1 * r + z * (z > R) / 20
    def __vector_nonlinear_step(self, ux, uy, gamma_mankorv, step_length):
        '''

        :param ux: samples of x-pol
        :param uy: samples of y-pol
        :param gamma_mankorv: 8/9*gamma,represent average over 那个啥球上
        :return: samples after nonlinear propagation
        '''
        leff = self.leff(step_length)
        power = cp.abs(ux)**2 + cp.abs(uy)**2
        gamma_mankorv = gamma_mankorv * leff
        ux = ux * cp.exp(1j * gamma_mankorv * power)
        uy = uy * cp.exp(1j * gamma_mankorv * power)
        return ux, uy
예제 #8
0
def wiener_deconvolution(image, psf, snr=30, add_pad=0):
    """ A GPU accelerated implementation of a linear Wiener filter. Some effort is made
    to allow processing even relatively large images, but some kind of block-based processing
     (as in the RL implementation) may be required in some cases."""
    assert isinstance(image, Image)
    assert isinstance(psf, Image)

    image_s = Image(image.copy(), image.spacing)
    orig_shape = image.shape

    if image.ndim != psf.ndim:
        raise ValueError("Image and psf dimensions do not match")

    if psf.spacing != image.spacing:
        psf = imops.zoom_to_spacing(psf, image.spacing)

    if add_pad != 0:
        new_shape = list(i + 2 * add_pad for i in image_s.shape)
        image_s = imops.zero_pad_to_shape(image_s, new_shape)

    if psf.shape != image_s.shape:
        psf = imops.zero_pad_to_shape(psf, image_s.shape)

    psf /= psf.max()
    psf = fftshift(psf)

    psf_dev = cp.asarray(psf.astype(np.complex64))
    with get_fft_plan(psf_dev):
        psf_dev = fftn(psf_dev, overwrite_x=True)

    below = cp.asnumpy(psf_dev)
    psf_abs = cp.abs(psf_dev)**2
    psf_abs /= (psf_abs + snr)
    above = cp.asnumpy(psf_abs)
    psf_abs = None
    psf_dev = None

    image_dev = cp.asarray(image_s.astype(np.complex64))
    with get_fft_plan(image_dev):
        image_dev = fftn(image_dev, overwrite_x=True)

    wiener_dev = cp.asarray(arrayops.safe_divide(above, below))

    image_dev *= wiener_dev

    result = cp.asnumpy(cp.abs(ifftn(image_dev, overwrite_x=True)).real)
    result = Image(result, image.spacing)

    return imops.remove_zero_padding(result, orig_shape)
예제 #9
0
def julia_set_cp(zs, phase):
    ns = cp.zeros_like(Z, dtype=cp.float32)
    for i in range(n_iteration):
        # cupy doesn't support complex in where, we need to decompose it to real and img parts
        zs_real = cp.where(
            cp.abs(zs) < R, cp.real(zs**2 + 0.7885 * cp.exp(phase)),
            cp.real(zs))
        zs_imag = cp.where(
            cp.abs(zs) < R, cp.imag(zs**2 + 0.7885 * cp.exp(phase)),
            cp.imag(zs))
        zs = zs_real + 1j * zs_imag
        not_diverged = cp.abs(zs) < R
        ns = ns + not_diverged.astype(cp.float32)

    return ns, zs
예제 #10
0
파일: polyutils.py 프로젝트: toslunar/cupy
def trimcoef(c, tol=0):
    """Removes small trailing coefficients from a polynomial.

    Args:
        c(cupy.ndarray): 1d array of coefficients from lowest to highest order.
        tol(number, optional): trailing coefficients whose absolute value are
            less than or equal to ``tol`` are trimmed.

    Returns:
        cupy.ndarray: trimmed 1d array.

    .. seealso:: :func:`numpy.polynomial.polyutils.trimcoef`

    """
    if tol < 0:
        raise ValueError('tol must be non-negative')
    if c.size == 0:
        raise ValueError('Coefficient array is empty')
    if c.ndim > 1:
        raise ValueError('Coefficient array is not 1-d')
    if c.dtype.kind == 'b':
        raise ValueError('bool inputs are not allowed')
    if c.ndim == 0:
        c = c.ravel()
    c = c.astype(cupy.common_type(c), copy=False)
    filt = (cupy.abs(c) > tol)[::-1]
    ind = c.size - cupy._manipulation.add_remove._first_nonzero_krnl(
        filt, c.size).item()
    if ind == 0:
        return cupy.zeros_like(c[:1])
    return c[: ind]
예제 #11
0
    def calculate_drift_offset(image_mat: cp.array):
        frame_n, h, w = image_mat.shape[:3]
        x = np.arange(w)
        y = np.arange(h)
        bounds = [(0, h), (0, w)]

        def find_max_loc_in_map(cr_map: np.array, rough_yx: cp.array):
            # 调用scipy的插值和优化寻找亚像素最大值
            precise_max_loc = np.empty((2, frame_n), dtype=cp.float32)
            for i in range(frame_n):
                np_cr_map = cr_map[i]
                F2 = interpolate.interp2d(x, y, -np_cr_map, kind="cubic")
                X0 = rough_yx[i]
                precise_max_loc[:, i] = optimize.minimize(lambda arg: F2(*arg),
                                                          X0,
                                                          bounds=bounds).x
            precise_max_loc[0, :] -= w // 2  # X
            precise_max_loc[1, :] -= h // 2  # Y
            return precise_max_loc

        result = cp.abs(
            cp.fft.fftshift(
                cp.fft.ifft2(cp.fft.fft2(image_mat) * base_frame_fft)))
        rough_max_loc = np.array(
            cp.unravel_index(cp.argmax(result.reshape(frame_n, -1), axis=1),
                             shape)).T
        result = find_max_loc_in_map(cp.asnumpy(result), rough_max_loc)
        avg_offset = cp.average(cp.array(result), axis=0)
        print("average offset: x:", avg_offset[0], ", y:", avg_offset[1])
        return result
예제 #12
0
    def calc_vecs(self, num_vecs=100):
        '''Calculate principal component vectors by diagonalizing covariance matrix'''
        if self.cov is None:
            self._load_cov()

        if len(self.cov.shape) == 3:
            self.cov = self._get_allcov(self.cov)

        # Diagonalizing (3N, 3N) matrix
        sys.stderr.write('Diagonalizing...')
        vals, vecs = cp.linalg.eigh(self.cov)
        sys.stderr.write('done\n')

        # Note that eigenvalues are sorted in INCREASING order with sign
        # To get sorting acc. to absolute value with max first...
        sorter = cp.abs(vals).argsort()[::-1]

        if self.f_weighting:
            # We need to remove the scattering f-weighting from the eigenvectors
            vecs = (vecs.T / cp.tile(self.dgen.atom_f0.get(), 3)).T

        # Select first N eigenvectors
        vals_n = vals[sorter[:num_vecs]].astype('f4')
        vecs_n = vecs[:, sorter[:num_vecs]].astype('f4')

        # Save to file
        print('Saving PC vectors to', self.vecs_fname)
        with h5py.File(self.vecs_fname, 'w') as fptr:
            fptr['vecs'] = vecs_n.get()
            fptr['cov_weights'] = np.diag(np.ones(num_vecs) * 10.)
            fptr['orig_vals'] = vals_n.get()
예제 #13
0
 def test_10(self):
     N = 64
     M = 4
     Nd = 8
     D = cp.random.randn(Nd, Nd, M)
     X0 = cp.zeros((N, N, M))
     xr = cp.random.randn(N, N, M)
     xp = cp.abs(xr) > 3
     X0[xp] = cp.random.randn(X0[xp].size)
     S = cp.sum(fftconv(D, X0, axes=(0, 1)), axis=2)
     lmbda = 1e-4
     rho = 1e-1
     opt = cbpdn.ConvBPDN.Options({
         'Verbose': False,
         'MaxMainIter': 500,
         'RelStopTol': 1e-3,
         'rho': rho,
         'AutoRho': {
             'Enabled': False
         }
     })
     b = cbpdn.ConvBPDN(D, S, lmbda, opt)
     b.solve()
     X1 = b.Y.squeeze()
     assert rrs(X0, X1) < 5e-5
     Sr = b.reconstruct().squeeze()
     assert rrs(S, Sr) < 1e-4
예제 #14
0
def kernel_coherence_cu(fft_data, ch_it, fft_config):
    """Defines a kernel that calculates the coherence between two channels.

    Args:
    fft_data (ndarray, float):
                Contains the fourier-transformed data.
                dim0: channel. dim1: Fourier Coefficients, dim2: STFT (bins in fluctana code)
    ch_it (iterable):
        Iterator over a list of channels we wish to perform our computation on
    fft_params (dict):
        parameters of the fourier-transformed data

    Returns:
        Gxy (ndarray, float):
            Coherence
    """
    fft_data_cu = cp.asarray(fft_data)

    Gxy_cu = cp.zeros([len(ch_it), fft_data.shape[1]], dtype=fft_data.dtype)

    for idx, ch_pair in enumerate(ch_it):
        X = fft_data_cu[ch_pair.ch1.get_idx(), :, :]
        Y = fft_data_cu[ch_pair.ch2.get_idx(), :, :]
        Pxx = X * X.conj()
        Pyy = Y * Y.conj()
        Gxy_cu[idx, :] = ((X * Y.conj()) / cp.sqrt(Pxx * Pyy)).mean(axis=1)

    # Gxy_cu = cp.abs(Gxy_cu)
    Gxy = cp.asnumpy(cp.abs(Gxy_cu).real)

    return(Gxy)
예제 #15
0
 def predict_normalized(self, x):  # x:shape=[2]
     Psi = cp.exp(-cp.sum(self.theta * cp.power(
         (cp.abs(self.X - x[cp.newaxis, :])), self.pl),
                          axis=1))
     ccc = Psi.T.dot(self.bbb)
     fff = self.mu + ccc
     return fff
예제 #16
0
    def calculate_snrs(self, interferometer, waveform_polarizations):
        name = interferometer.name
        signal_ifo = xp.sum(
            xp.vstack([
                waveform_polarizations[mode] * float(
                    interferometer.antenna_response(
                        self.parameters["ra"],
                        self.parameters["dec"],
                        self.parameters["geocent_time"],
                        self.parameters["psi"],
                        mode,
                    )) for mode in waveform_polarizations
            ]),
            axis=0,
        )[interferometer.frequency_mask]

        time_delay = (self.parameters["geocent_time"] -
                      interferometer.strain_data.start_time +
                      interferometer.time_delay_from_geocenter(
                          self.parameters["ra"],
                          self.parameters["dec"],
                          self.parameters["geocent_time"],
                      ))

        signal_ifo *= xp.exp(-2j * np.pi * time_delay * self.frequency_array)

        d_inner_h = xp.sum(
            xp.conj(signal_ifo) * self.strain[name] / self.psds[name])
        h_inner_h = xp.sum(xp.abs(signal_ifo)**2 / self.psds[name])
        return d_inner_h, h_inner_h
예제 #17
0
def corr(input, axes=(-1, -2), norm=False, returngpu=False, **kwargs):
    """
    simple autocorrelation of input along axes (default: last two) using gpu
    axes: axes to correlate along, defaults to last two
    norm: do normalisation along non correlation axes and normalise for pair count
    returngpu: retrun a cupy array
    """

    axes = sorted([input.ndim + a if a < 0 else a for a in axes])
    fftshape = [_fastlen(2 * input.shape[ax]) for ax in axes]
    dinput = _cp.array(input)
    if norm:
        dinput *= 1 / dinput.mean(axis=[i for i in range(input.ndim) if i not in axes] or None)
    ret = _cp.fft.rfftn(dinput, fftshape)
    ret = _cp.abs(ret) ** 2
    ret = _cp.fft.irfftn(ret, axes=axes)
    ret = _cp.fft.fftshift(ret, axes=axes)[
        tuple((Ellipsis, *(slice(ps // 2 - input.shape[ax], ps // 2 + input.shape[ax]) for ax, ps in zip(axes, fftshape))))
    ]
    if norm:
        n = corr(_cp.ones(tuple(input.shape[ax] for ax in axes)), returngpu=True)
        ret /= n
        ret[(...,) + (n < 0.9).nonzero()] = _np.nan
    if not returngpu:
        ret = _cp.asnumpy(ret)
        _cp.get_default_memory_pool().free_all_blocks()
    return ret
def genetic_algorithm_convergence(evaluation_best_chr, evaluation_overall_gen,
                                  gen, population, optim, BEST_CHR_CONV_LIM,
                                  BEST_CHR_CONV_CTR, PERCENT_CHR_CONV_LIM):
    stop = False
    cause = list()
    for conv_method in ("best_chr", "percentage", "generation"):
        if conv_method == 'best_chr':
            gen_eval = evaluate_best_chromosome(population, optim)
            evaluation_best_chr[gen] = gen_eval
            if evaluation_best_chr[gen] == evaluation_best_chr[gen - 1]:
                BEST_CHR_CONV_CTR += 1
                if BEST_CHR_CONV_CTR > BEST_CHR_CONV_LIM:
                    stop = True
                    cause.append('best_chr')
                else:
                    BEST_CHR_CONV_CTR = 0
        elif conv_method == 'percentage':
            gen_eval = evaluate_generation(population, optim)
            evaluation_overall_gen[gen] = gen_eval
            if cupy.abs(evaluation_overall_gen[gen] -
                        evaluation_overall_gen[gen -
                                               1]) < PERCENT_CHR_CONV_LIM:
                stop = True
                cause.append('percentage')
        elif conv_method == 'generation':
            # if gen == 0:
            #   print('Warning [4]: Algorithm does not perform early stopping [at genetic algorithm convergence]')
            return stop, evaluation_best_chr, evaluation_overall_gen, BEST_CHR_CONV_CTR, cause
        else:
            print(
                'Error [9]: Invalid mutation method [at mutate chromosomes]\nExiting...'
            )
            exit()
    return stop, evaluation_best_chr, evaluation_overall_gen, BEST_CHR_CONV_CTR, cause
예제 #19
0
파일: test_canny.py 프로젝트: grlee77/cucim
 def test_01_01_circle(self):
     """Test that the Canny filter finds the outlines of a circle"""
     i, j = cp.mgrid[-200:200, -200:200].astype(float) / 200
     c = cp.abs(cp.sqrt(i * i + j * j) - 0.5) < 0.02
     result = feature.canny(c.astype(float), 4, 0, 0,
                            cp.ones(c.shape, bool))
     #
     # erode and dilate the circle to get rings that should contain the
     # outlines
     #
     # TODO: grlee77: only implemented brute_force=True, so added that to
     #                these tests
     cd = binary_dilation(c, iterations=3, brute_force=True)
     ce = binary_erosion(c, iterations=3, brute_force=True)
     cde = cp.logical_and(cd, cp.logical_not(ce))
     self.assertTrue(cp.all(cde[result]))
     #
     # The circle has a radius of 100. There are two rings here, one
     # for the inside edge and one for the outside. So that's
     # 100 * 2 * 2 * 3 for those places where pi is still 3.
     # The edge contains both pixels if there's a tie, so we
     # bump the count a little.
     point_count = cp.sum(result)
     self.assertTrue(point_count > 1200)
     self.assertTrue(point_count < 1600)
예제 #20
0
 def test_11(self):
     N = 63
     M = 4
     Nd = 8
     D = cp.random.randn(Nd, Nd, M)
     X0 = cp.zeros((N, N, M))
     xr = cp.random.randn(N, N, M)
     xp = cp.abs(xr) > 3
     X0[xp] = cp.random.randn(X0[xp].size)
     S = cp.sum(ifftn(
         fftn(D, (N, N), (0, 1)) * fftn(X0, None, (0, 1)), None,
         (0, 1)).real,
                axis=2)
     lmbda = 1e-2
     L = 1e3
     opt = cbpdn.ConvBPDN.Options({
         'Verbose': False,
         'MaxMainIter': 2000,
         'RelStopTol': 1e-9,
         'L': L,
         'BackTrack': {
             'Enabled': False
         }
     })
     b = cbpdn.ConvBPDN(D, S, lmbda, opt)
     b.solve()
     X1 = b.X.squeeze()
     assert rrs(X0, X1) < 5e-4
     Sr = b.reconstruct().squeeze()
     assert rrs(S, Sr) < 2e-4
예제 #21
0
파일: cepstrum.py 프로젝트: mfkiwl/cusignal
def real_cepstrum(x, n=None, axis=-1):
    r"""
    Calculates the real cepstrum of an input sequence x where the cepstrum is
    defined as the inverse Fourier transform of the log magnitude DFT
    (spectrum) of a signal. It's primarily used for source/speaker separation
    in speech signal processing

    Parameters
    ----------
    x : ndarray
        Input sequence, if x is a matrix, return cepstrum in direction of axis
    n : int
        Size of Fourier Transform; If none, will use length of input array
    axis: int
        Direction for cepstrum calculation

    Returns
    -------
    ceps : ndarray
        Complex cepstrum result
    """
    x = cp.asarray(x)

    spectrum = fft.fft(x, n=n, axis=axis)
    ceps = fft.ifft(cp.log(cp.abs(spectrum)), n=n, axis=axis).real

    return ceps
예제 #22
0
    def inverse(self, spectrum, in_phase=None):
        if in_phase is None:
            in_phase = self.phase
        else:
            in_phase = cp.array(in_phase)
        spectrum = cp.array(spectrum)
        self.spectrum_buffer[:, -1] = spectrum * in_phase
        self.absolute_buffer[:, -1] = spectrum

        for _ in range(self.loop_num):
            self.overwrap_buf *= 0
            waves = cp.fft.ifft(self.spectrum_buffer, axis=2).real
            last = self.spectrum_buffer

            for i in range(self.buffer_size):
                self.overwrap_buf[:, i * self.wave_dif:i * self.wave_dif +
                                  self.wave_len] += waves[:, i]
            waves = cp.stack([
                self.overwrap_buf[:, i * self.wave_dif:i * self.wave_dif +
                                  self.wave_len] * self.window
                for i in range(self.buffer_size)
            ],
                             axis=1)

            spectrum = cp.fft.fft(waves, axis=2)
            self.spectrum_buffer = self.absolute_buffer * spectrum / (
                cp.abs(spectrum) + 1e-10)
            self.spectrum_buffer += 0.5 * (self.spectrum_buffer - last)

        dst = cp.asnumpy(self.spectrum_buffer[:, 0])
        self.absolute_buffer = cp.roll(self.absolute_buffer, -1, axis=1)
        self.spectrum_buffer = cp.roll(self.spectrum_buffer, -1, axis=1)

        return dst
예제 #23
0
 def test_18(self):
     N = 64
     M = 2 * N
     L = 8
     cp.random.seed(12345)
     D = cp.random.randn(N, M)
     x0 = cp.zeros((M, 1))
     si = cp.random.permutation(M)
     x0[si[0:L]] = cp.random.randn(L, 1)
     s = D.dot(x0)
     lmbda = 5e-2
     opt = bpdn.BPDN.Options({
         'Verbose': False,
         'MaxMainIter': 300,
         'RelStopTol': 1e-5,
         'AutoRho': {
             'Enabled': False
         }
     })
     bp = bpdn.BPDN(D, s, lmbda=lmbda, opt=opt)
     Xp = bp.solve()
     epsilon = cp.linalg.norm(D.dot(Xp) - s)
     opt = bpdn.MinL1InL2Ball.Options({
         'Verbose': False,
         'MaxMainIter': 300,
         'RelStopTol': 1e-5,
         'rho': 2e1,
         'AutoRho': {
             'Enabled': False
         }
     })
     bc = bpdn.MinL1InL2Ball(D, s, epsilon=epsilon, opt=opt)
     Xc = bc.solve()
     assert cp.linalg.norm(Xp - Xc) / cp.linalg.norm(Xp) < 1e-3
     assert cp.abs(cp.linalg.norm(Xp, 1) - cp.linalg.norm(Xc, 1)) < 1e-3
 def accuracy(self, x, t, is_training):
     y = self.predict(x, is_training)
     mask = (t == 0.0)
     t[mask] += 1e-7
     accuracy = np.abs((y - t) / t)
     accuracy = np.sum(accuracy) / y.size
     return accuracy
예제 #25
0
def kernel_crosspower_cu(fft_data, ch_it, fft_config):
    """Defines a kernel that calculates the cross-power between two channels.

    Args:
    fft_data (ndarray, float):
                Contains the fourier-transformed data.
                dim0: channel. dim1: Fourier Coefficients, dim2: STFT (bins in fluctana code)
    ch_it (iterable):
        Iterator over a list of channels we wish to perform our computation on
    fft_params (dict):
        parameters of the fourier-transformed data

    Returns:
        cross-power (ndarray, float):
            Cross-power
    """
    fft_data_cu = cp.asarray(fft_data)

    res = cp.zeros([len(ch_it), fft_data.shape[1]], dtype=fft_data.dtype)
    for idx, ch_pair in enumerate(ch_it):
        res[idx, :] = (fft_data_cu[ch_pair.ch1.get_idx(), :, :] *
                       fft_data_cu[ch_pair.ch2.get_idx(), :, :].conj()).mean(axis=1) /\
            fft_config["win_factor"]

    crosspower = cp.asnumpy(cp.abs(res).real)
    np.savez("crosspower_cu.npz", crosspower=crosspower)

    return (crosspower)
예제 #26
0
    def grad_ptycho(self, data, psi, prb, scan, zlamd, rho, niter):
        """Gradient solver for the ptychography problem |||FQpsi|-sqrt(data)||^2_2 + rho||psi-zlamd||^2_2"""
        # minimization functional
        def minf(fpsi, psi):
            f = cp.linalg.norm(cp.abs(fpsi) - cp.sqrt(data))**2
            if(rho > 0):
                f += rho*cp.linalg.norm(psi-zlamd)**2
            return f

        for i in range(niter):
            # compute the gradient
            fpsi = self.fwd_ptycho(psi, prb, scan)

            gradpsi = self.adj_ptycho(
                fpsi - cp.sqrt(data)*fpsi/(cp.abs(fpsi)+1e-32), prb, scan)

            # normalization coefficient for skipping the line search procedure
            afpsi = self.adj_ptycho(fpsi, prb, scan)
            norm_coeff = cp.real(cp.sum(psi*cp.conj(afpsi)) /
                                 (cp.sum(afpsi*cp.conj(afpsi))+1e-32))

            if(rho > 0):
                gradpsi += rho*(psi-zlamd)
                gradpsi *= min(1/rho, norm_coeff)/2
            else:
                gradpsi *= norm_coeff/2
            # update psi
            psi = psi - 0.5*gradpsi
            # check convergence
            # print(f'{i}) {minf(fpsi, psi).get():.2e} ')

        return psi
    def detect(self, x_in):
        # Compute the instantaneous power for the current buffer
        x_envelope = cp.abs(x_in)
        # Filter and decimate the envelope to a lower data rate
        self._envelope[:] = cusignal.upfirdn(self._win,
                                             x_envelope)[self._filter_roi]
        # Update threshold
        # Add summation of current envelope to the threshold fifo array
        self._bkg_sum_arr[self._fifo_index] = cp.sum(self._envelope)
        # Update fifo index for next detection window
        self._fifo_index = (self._fifo_index + 1) % self._fifo_len
        # Calculate avg background power level for the previous buffers in fifo
        bkg_avg = np.sum(self._bkg_sum_arr) / (self._fifo_len * self._buff_len)
        # Calculate new threshold value
        self._thresh = bkg_avg * self._thresh_offset

        # Calc index vector where power is above the threshold
        envelope_det_idx = self._envelope > self._thresh
        n_detections = cp.sum(envelope_det_idx)
        # Make sure at least samp_above_thresh are higher than the threshold
        if n_detections > self._samp_above_thresh:
            # Copy to cupy array as workaround to issue cuSignal #178
            x_out = cp.array(x_in)
            x_out[~envelope_det_idx] = 0  # Zero out samples below threshold
        else:
            x_out = None
        return x_out
예제 #28
0
    def cg_ptycho(self, data, init, h, lamd, rho, piter, model):
        # minimization functional
        def minf(psi, fpsi):
            if model == 'gaussian':
                f = cp.linalg.norm(cp.abs(fpsi) - cp.sqrt(data))**2
            elif model == 'poisson':
                f = cp.sum(
                    cp.abs(fpsi)**2 - 2 * data * self.mlog(cp.abs(fpsi)))
            f += rho * cp.linalg.norm(h - psi + lamd / rho)**2
            return f

        psi = init.copy()
        gamma = 8  # init gamma as a large value
        for i in range(piter):
            fpsi = self.fwd_ptycho(psi)
            if model == 'gaussian':
                grad = self.adj_ptycho(fpsi - cp.sqrt(data) *
                                       cp.exp(1j * cp.angle(fpsi)))
            elif model == 'poisson':
                grad = self.adj_ptycho(fpsi - data * fpsi /
                                       (cp.abs(fpsi)**2 + 1e-32))
            grad -= rho * (h - psi + lamd / rho)
            # Dai-Yuan direction
            if i == 0:
                d = -grad
            else:
                d = -grad+cp.linalg.norm(grad)**2 / \
                    ((cp.sum(cp.conj(d)*(grad-grad0))))*d
            grad0 = grad
            # line search
            fd = self.fwd_ptycho(d)
            gamma = self.line_search(minf, gamma, psi, fpsi, d, fd)
            psi = psi + gamma * d
            # print(i,minf(psi,fpsi))
        return psi
예제 #29
0
    def __init__(self,
                 parallel,
                 wave_len=254,
                 wave_dif=64,
                 buffer_size=5,
                 loop_num=5,
                 window=np.hanning(254)):
        self.wave_len = wave_len
        self.wave_dif = wave_dif
        self.buffer_size = buffer_size
        self.loop_num = loop_num
        self.parallel = parallel
        self.window = cp.array([window for _ in range(parallel)])

        self.wave_buf = cp.zeros((parallel, wave_len + wave_dif), dtype=float)
        self.overwrap_buf = cp.zeros(
            (parallel, wave_dif * buffer_size + (wave_len - wave_dif)),
            dtype=float)
        self.spectrum_buffer = cp.ones(
            (parallel, self.buffer_size, self.wave_len), dtype=complex)
        self.absolute_buffer = cp.ones(
            (parallel, self.buffer_size, self.wave_len), dtype=complex)

        self.phase = cp.zeros((parallel, self.wave_len), dtype=complex)
        self.phase += cp.random.random(
            (parallel, self.wave_len)) - 0.5 + cp.random.random(
                (parallel, self.wave_len)) * 1j - 0.5j
        self.phase[self.phase == 0] = 1
        self.phase /= cp.abs(self.phase)
예제 #30
0
def fiedler(a):
    """Returns a symmetric Fiedler matrix

    Given an sequence of numbers ``a``, Fiedler matrices have the structure
    ``F[i, j] = np.abs(a[i] - a[j])``, and hence zero diagonals and nonnegative
    entries. A Fiedler matrix has a dominant positive eigenvalue and other
    eigenvalues are negative. Although not valid generally, for certain inputs,
    the inverse and the determinant can be derived explicitly.

    Args:
        a (cupy.ndarray): coefficient array

    Returns:
        cupy.ndarray: the symmetric Fiedler matrix

    .. seealso:: :func:`cupyx.scipy.linalg.circulant`
    .. seealso:: :func:`cupyx.scipy.linalg.toeplitz`
    .. seealso:: :func:`scipy.linalg.fiedler`
    """
    if a.ndim != 1:
        raise ValueError('Input `a` must be a 1D array.')
    if a.size == 0:
        return cupy.zeros(0)
    if a.size == 1:
        return cupy.zeros((1, 1))
    a = a[:, None] - a
    return cupy.abs(a, out=a)
예제 #31
0
def test_gabor_kernel_theta():
    for sigma_x in range(1, 10, 2):
        for sigma_y in range(1, 10, 2):
            for frequency in range(0, 10, 2):
                for theta in range(0, 10, 2):
                    kernel0 = gabor_kernel(frequency + 0.1,
                                           theta=theta,
                                           sigma_x=sigma_x,
                                           sigma_y=sigma_y)
                    kernel180 = gabor_kernel(frequency,
                                             theta=theta + np.pi,
                                             sigma_x=sigma_x,
                                             sigma_y=sigma_y)

                    assert_array_almost_equal(cp.abs(kernel0),
                                              cp.abs(kernel180))
예제 #32
0
 def test_02(self):
     lmbda = 1e-1
     opt = tvl2.TVL2Deconv.Options(
         {'Verbose': False, 'gEvalY': False, 'MaxMainIter': 250})
     b = tvl2.TVL2Deconv(cp.ones((1)), self.D, lmbda, opt, axes=(0, 1, 2))
     X = b.solve()
     assert cp.abs(b.itstat[-1].ObjFun - 567.72425227) < 1e-3
     assert sm.mse(self.U, X) < 1e-3
예제 #33
0
 def test_02(self):
     lmbda = 1e-1
     opt = tvl2.TVL2Deconv.Options(
         {'Verbose': False, 'gEvalY': False, 'MaxMainIter': 250})
     b = tvl2.TVL2Deconv(cp.ones((1)), self.D, lmbda, opt)
     X = b.solve()
     assert cp.abs(b.itstat[-1].ObjFun - 45.45958573088) < 1e-3
     assert sm.mse(self.U, X) < 1e-3
예제 #34
0
def rand_noise_write(_filename: str, _seconds: int, _framerate: int = 44100) -> None:
    """Generate a 16-bit mono WAV file containing random noise & write it to a file"""
    _data = xp.random.uniform(-1, 1, _framerate * _seconds)
    _scaled = xp.int16(_data / xp.max(xp.abs(_data)) * 32767)
    _wav_data: bytes = bytes(_scaled)
    with wave.open(_filename, mode=r'wb') as _file:
        _file.setparams((1, 2, _framerate, _scaled.shape[0], r'NONE', r'not compressed'))  # pylint: disable=E1101,E1136
        _file.writeframes(_wav_data)  # pylint: disable=E1101
예제 #35
0
 def test_01(self):
     lmbda = 1e-1
     opt = tvl2.TVL2Denoise.Options(
         {'Verbose': False, 'gEvalY': False, 'MaxMainIter': 250,
          'rho': 10 * lmbda})
     b = tvl2.TVL2Denoise(self.D, lmbda, opt, axes=(0, 1, 2))
     X = b.solve()
     assert cp.abs(b.itstat[-1].ObjFun - 366.04267554965134) < 1e-3
     assert sm.mse(self.U, X) < 1e-3
예제 #36
0
 def test_01(self):
     lmbda = 1e-1
     opt = tvl2.TVL2Denoise.Options(
         {'Verbose': False, 'gEvalY': False, 'MaxMainIter': 300,
          'rho': 75 * lmbda})
     b = tvl2.TVL2Denoise(self.D, lmbda, opt)
     X = b.solve()
     assert cp.abs(b.itstat[-1].ObjFun - 32.875710674129564) < 1e-3
     assert sm.mse(self.U, X) < 1e-3
예제 #37
0
 def check_usv(self, array, dtype):
     a_cpu = numpy.asarray(array, dtype=dtype)
     a_gpu = cupy.asarray(array, dtype=dtype)
     result_cpu = numpy.linalg.svd(a_cpu, full_matrices=self.full_matrices)
     result_gpu = cupy.linalg.svd(a_gpu, full_matrices=self.full_matrices)
     self.assertEqual(len(result_cpu), len(result_gpu))
     for b_cpu, b_gpu in zip(result_cpu, result_gpu):
         # Use abs to support an inverse vector
         cupy.testing.assert_allclose(
             numpy.abs(b_cpu), cupy.abs(b_gpu), atol=1e-4)
예제 #38
0
 def test_08(self):
     N = 64
     M = 2 * N
     L = 4
     np.random.seed(12345)
     D = cp.array(np.random.randn(N, M))
     x0 = cp.zeros((M, 1))
     si = cp.array(np.random.permutation(M))
     x0[si[0:L]] = cp.array(np.random.randn(L, 1))
     s0 = D.dot(x0)
     lmbda = 5e-3
     opt = bpdn.BPDN.Options({'Verbose': False, 'MaxMainIter': 500,
                              'RelStopTol': 5e-4})
     b = bpdn.BPDN(D, s0, lmbda, opt)
     b.solve()
     x1 = b.Y
     assert cp.abs(b.itstat[-1].ObjFun - 0.012009) < 1e-5
     assert cp.abs(b.itstat[-1].DFid - 1.9636082e-06) < 1e-5
     assert cp.abs(b.itstat[-1].RegL1 - 2.401446) < 1e-5
     assert cp.linalg.norm(x1 - x0) < 1e-3
예제 #39
0
def rand_noise_wav(_seconds: int, _framerate: int = 44100) -> dict:
    """Generate a 16-bit mono WAV file containing random noise & return the data"""
    _data = xp.random.uniform(-1, 1, _framerate * _seconds)
    _scaled = xp.int16(_data / xp.max(xp.abs(_data)) * 32767)
    _out: dict = {
        r'num_frames': _scaled.shape[0],  # pylint: disable=E1136
        r'frame_rate': _framerate,
        r'num_channels': 1,
        r'sample_width': 2,
        r'data': bytes(_scaled)
    }
    return _out
예제 #40
0
 def test_10(self):
     N = 64
     M = 4
     Nd = 8
     D = cp.random.randn(Nd, Nd, M)
     X0 = cp.zeros((N, N, M))
     xr = cp.random.randn(N, N, M)
     xp = cp.abs(xr) > 3
     X0[xp] = cp.random.randn(X0[xp].size)
     S = cp.sum(sl.fftconv(D, X0), axis=2)
     lmbda = 1e-4
     rho = 1e-1
     opt = cbpdn.ConvBPDN.Options({'Verbose': False, 'MaxMainIter': 500,
                                   'RelStopTol': 1e-3, 'rho': rho,
                                   'AutoRho': {'Enabled': False}})
     b = cbpdn.ConvBPDN(D, S, lmbda, opt)
     b.solve()
     X1 = b.Y.squeeze()
     assert sl.rrs(X0, X1) < 5e-5
     Sr = b.reconstruct().squeeze()
     assert sl.rrs(S, Sr) < 1e-4
예제 #41
0
 def test_11(self):
     N = 63
     M = 4
     Nd = 8
     D = cp.random.randn(Nd, Nd, M)
     X0 = cp.zeros((N, N, M))
     xr = cp.random.randn(N, N, M)
     xp = cp.abs(xr) > 3
     X0[xp] = cp.random.randn(X0[xp].size)
     S = cp.sum(sl.ifftn(sl.fftn(D, (N, N), (0, 1)) *
                         sl.fftn(X0, None, (0, 1)), None, (0, 1)).real,
                axis=2)
     lmbda = 1e-2
     L = 1e3
     opt = cbpdn.ConvBPDN.Options({'Verbose': False, 'MaxMainIter': 2000,
                                   'RelStopTol': 1e-9, 'L': L,
                                   'BackTrack': {'Enabled': False}})
     b = cbpdn.ConvBPDN(D, S, lmbda, opt)
     b.solve()
     X1 = b.X.squeeze()
     assert sl.rrs(X0, X1) < 5e-4
     Sr = b.reconstruct().squeeze()
     assert sl.rrs(S, Sr) < 2e-4
예제 #42
0
 def test_18(self):
     N = 64
     M = 2 * N
     L = 8
     cp.random.seed(12345)
     D = cp.random.randn(N, M)
     x0 = cp.zeros((M, 1))
     si = cp.random.permutation(M)
     x0[si[0:L]] = cp.random.randn(L, 1)
     s = D.dot(x0)
     lmbda = 5e-2
     opt = bpdn.BPDN.Options({'Verbose': False, 'MaxMainIter': 300,
                              'RelStopTol': 1e-5, 'AutoRho':
                              {'Enabled': False}})
     bp = bpdn.BPDN(D, s, lmbda=lmbda, opt=opt)
     Xp = bp.solve()
     epsilon = cp.linalg.norm(D.dot(Xp) - s)
     opt = bpdn.MinL1InL2Ball.Options(
         {'Verbose': False, 'MaxMainIter': 300, 'RelStopTol': 1e-5,
          'rho': 2e1, 'AutoRho': {'Enabled': False}})
     bc = bpdn.MinL1InL2Ball(D, s, epsilon=epsilon, opt=opt)
     Xc = bc.solve()
     assert cp.linalg.norm(Xp - Xc) / cp.linalg.norm(Xp) < 1e-3
     assert cp.abs(cp.linalg.norm(Xp, 1) - cp.linalg.norm(Xc, 1)) < 1e-3