Ejemplo n.º 1
0
    def convolve(self, first, second):
        shape = first.shape[0] + second.shape[0] - 1
        best_shape = int(2**cp.ceil(cp.log2(shape)))

        first_f = cp.fft.rfft(first, best_shape)
        second_f = cp.fft.rfft(second, best_shape)
        return cp.fft.irfft(first_f * second_f, best_shape)[:shape]
Ejemplo n.º 2
0
def fft_conv1d_cupy(a, b):

    n = len(a) + len(b) - 1
    N = 2**(int(cp.log2(n)) + 1)
    A = cp.fft.fft(a, N)
    B = cp.fft.fft(b, N)
    return cp.fft.ifft(A * B)[:n]
Ejemplo n.º 3
0
def log2(x: Array, /) -> Array:
    """
    Array API compatible wrapper for :py:func:`np.log2 <numpy.log2>`.

    See its docstring for more information.
    """
    if x.dtype not in _floating_dtypes:
        raise TypeError("Only floating-point dtypes are allowed in log2")
    return Array._new(np.log2(x._array))
Ejemplo n.º 4
0
 def __init__(self, codes, channel):
     """
     Args:
         codes (numpy.ndarray): the input codebook, which is an (Nc,M,T)-sized tensor.
         channel (imtoolkit.Channel): the channel class used for simulations.
     """
     self.codes = self.toXpArray(codes)  # Copy codes to the GPU memory
     self.Nc = len(codes)  # The number of codewords
     self.B = xp.log2(self.Nc)  # The bitwidth per codeword
     self.channel = channel  # The specified channel generator
Ejemplo n.º 5
0
        def _pl(self, image, context):
            h, w = image.shape[0], image.shape[1]

            offx = 0
            offy = 0

            wpow = int(cup.log2(w))
            hpow = int(cup.log2(h))

            offx = (w - 2**wpow) // 2
            offy = (h - 2**hpow) // 2

            if w > 2**wpow:
                w = 2**wpow
            if h > 2**hpow:
                h = 2**hpow
            # crop to center
            image = image[offy:offy + h, offx:offx + w]

            return image
Ejemplo n.º 6
0
    def _pyczt_cupy(self, x, k=None, w=None, a=None):
        olddim = x.ndim

        if olddim == 1:
            x = x[:, cp.newaxis]

        (m, n) = x.shape
        oldm = m

        if m == 1:
            x = x.transpose()
            (m, n) = x.shape

        if k is None:
            k = len(x)
        if w is None:
            w = cp.exp(-1j * 2 * pi / k)
        if a is None:
            a = 1.

        # %------- Length for power-of-two fft.

        nfft = int(2**cp.ceil(cp.log2(abs(m + k - 1))))

        # %------- Premultiply data.

        kk = cp.arange(-m + 1, max(k, m))[:, cp.newaxis]
        kk2 = (kk**2) / 2
        ww = w**kk2  # <----- Chirp filter is 1./ww
        nn = cp.arange(0, m)[:, cp.newaxis]
        aa = a**(-nn)
        aa = aa * ww[m + nn - 1, 0]
        y = (x * aa).astype(np.complex64)

        # %------- Fast convolution via FFT.

        fy = cp.fft.fft(y, nfft, axis=0)
        fv = cp.fft.fft(1 / ww[0:k - 1 + m], nfft,
                        axis=0)  # <----- Chirp filter.
        fy = fy * fv
        g = cp.fft.ifft(fy, axis=0)

        # %------- Final multiply.

        g = g[m - 1:m + k - 1, :] * ww[m - 1:m + k - 1]

        if oldm == 1:
            g = g.transpose()

        if olddim == 1:
            g = g.squeeze()

        return g
Ejemplo n.º 7
0
def sk_flag(data,
            metadata,
            n_sigma_upper=5,
            n_sigma_lower=5,
            flag_upper=True,
            flag_lower=True):
    """ Apply spectral kurtosis flagging 
    
    Args:
        data (np.array): Numpy array with shape (N_timestep, N_beam, N_channel)
        metadata (dict): Metadata dictionary, should contain 'df' and 'dt'
                         (frequency and time resolution)
        boxcar_mode (str): Boxcar mode to apply. mean/sum/gaussian.
        n_sigma_upper (float): Number of stdev above SK estimate to flag (upper bound)
        n_sigma_lower (float): Number of stdev below SK estmate to flag (lower bound)
        flag_upper (bool): Flag channels with large SK (highly variable signals)
        flag_lower (bool): Flag channels with small SK (very stable signals)
        return_space ('cpu' or 'gpu'): Returns array in CPU or GPU space
    
    Returns:
        mask (np.array, bool): Array of True/False flags per channel
    
    Notes:
        sk_flag upper and lower stdev is computed on log2(sk), as the minimum
        spectral kurtosis (for a CW signal) approaches 0. 
    """
    Fs = (1.0 / metadata['frequency_step'] / 2)
    samps_per_sec = np.abs(Fs.to('s').value)  # Nyq sample rate for channel
    N_acc = int(metadata['time_step'].to('s').value / samps_per_sec)

    sk = spectral_kurtosis(data, metadata)

    #var_theoretical = 2.0 / np.sqrt(N_acc)
    #std_theoretical = np.sqrt(var_theoretical)
    log_sk = cp.log2(sk)
    std_log = cp.std(log_sk)
    mean_log = cp.mean(log_sk)

    if flag_upper and flag_lower:
        mask = log_sk < mean_log + (std_log * n_sigma_upper)
        mask &= log_sk > mean_log - (std_log * n_sigma_lower)
    elif flag_upper and not flag_lower:
        mask = log_sk > mean_log + (std_log * n_sigma_upper)
    elif flag_lower and not flag_upper:
        mask = log_sk < mean_log - (std_log * n_sigma_lower)
    else:
        raise RuntimeError(
            "No flags to process: need to flag upper and/or lower!")
    return ~mask
Ejemplo n.º 8
0
    def __compute_entropy(self, values):
        from collections import Counter
        """Builds a dictionary of values with total probabilities and returns entropy H
        H = -sum(p(i)*log(p(i)))  bit/symbol"""
        if args.cupy:
            values = np.asnumpy(values)

        value_counter = Counter(values)
        H = 0
        pixel = len(values)
        for value in value_counter.values():
            pi = value/pixel
            H = H + pi*np.log2(pi)
        H = -round(float(H), 3)
        return H
Ejemplo n.º 9
0
def logneg(wave, n, partition):

    L, la, lb, lc1, lc2 = int(partition[0]), int(partition[1]), int(
        partition[2]), int(partition[3]), int(partition[4])

    # region A
    ps = cp.reshape(wave, (2**lc1, 2**la, 2**lc2, 2**lb))
    ps = cp.moveaxis(ps, 0, 1)
    ps = cp.reshape(ps, (2**la, 2**(L - la)))
    # entanglement entropy in region A
    en = ent(ps, n, L, la)
    # sa and sar stand for von-Neumann and Renyi entanglement entropies
    sa, sar = en[0], en[1]

    # region B
    ps = cp.reshape(wave, (2**(L - lb), 2**lb))
    en = ent(ps, n, L, L - lb)
    sb, sbr = en[0], en[1]

    # region C
    # since C composed of c1 and c2, we need to re-arrange the index to combine c1 and c2 into
    # a connected region
    ps = cp.reshape(wave, (2**lc1, 2**la, 2**lc2, 2**lb))
    ps = cp.moveaxis(ps, 1, 2)
    ps = cp.reshape(ps, (2**(lc1 + lc2), 2**(la + lb)))
    en = ent(ps, n, L, lc1 + lc2)
    sc, scr = en[0], en[1]

    # log(negativity)
    rab = cp.dot(ps.T, cp.conj(ps))  #reduced density matrix by tracing out C
    # reshape the reduced density matrix to have 4 indices to facilitate partial transpose
    rab = cp.reshape(rab, (2**la, 2**lb, 2**la, 2**lb))

    # partial transpose on A
    pab = cp.moveaxis(rab, 0, 2)
    # rearrange indices to make pab into a matrix
    pab = pab.reshape(2**(la + lb), 2**(la + lb))
    # SVD of partial transposed density matrix
    sp = cp.linalg.svd(pab, compute_uv=False)
    # definition of logarithmic negativity
    logn = cp.log2(cp.sum(sp))
    tol = 1e-10
    # returns logarithmic negativity and two mutual information
    result = np.array([logn, sa + sb - sc, sar + sbr - scr])
    # chop small values to be zero
    result[abs(result) < tol] = 0.0

    return result
Ejemplo n.º 10
0
def adjust_log(image, gain=1, inv=False):
    """Performs Logarithmic correction on the input image.

    This function transforms the input image pixelwise according to the
    equation ``O = gain*log(1 + I)`` after scaling each pixel to the range
    0 to 1.

    For inverse logarithmic correction, the equation is
    ``O = gain*(2**I - 1)``.

    Parameters
    ----------
    image : ndarray
        Input image.
    gain : float, optional
        The constant multiplier. Default value is 1.
    inv : float, optional
        If True, it performs inverse logarithmic correction,
        else correction will be logarithmic. Defaults to False.

    Returns
    -------
    out : ndarray
        Logarithm corrected output image.

    See Also
    --------
    adjust_gamma

    References
    ----------
    .. [1] http://www.ece.ucsb.edu/Faculty/Manjunath/courses/ece178W03/EnhancePart1.pdf

    """  # noqa
    _assert_non_negative(image)
    dtype = image.dtype.type
    scale = float(dtype_limits(image, True)[1] - dtype_limits(image, True)[0])

    if inv:
        out = (2**(image / scale) - 1) * scale * gain
        return out.astype(dtype, copy=False)

    out = cp.log2(1 + image / scale) * scale * gain
    return out.astype(dtype, copy=False)
Ejemplo n.º 11
0
def ent(wave, n, L, la):
    lb = L - la
    # convert the wavefunction into a matrix for SVD
    temp = cp.reshape(wave, (2**la, 2**lb))
    # SVD for entanglement entropy, only singular values calculated
    sp = cp.linalg.svd(temp, compute_uv=False)
    tol = 1e-10
    # chop small singular values to zero to avoid numerical instability
    sp[abs(sp) < tol] = 0.0
    # choose only non-zero values to avoid feeding to log function
    sp = sp[cp.nonzero(sp)]
    el = sp**2
    von = -cp.dot(el, np.log2(el))
    ren = (1 / (1 - n)) * cp.log2(np.sum(el**(n)))
    # chop small values to zero
    if (abs(von) < tol):
        von = 0
    if (abs(ren) < tol):
        ren = 0
    # EE in log2 base
    return von, ren
Ejemplo n.º 12
0
def expm(A,delta=1e-10):
    j = max(0,cp.int(1+cp.log2(cp.linalg.norm(A,cp.inf))))
    A = A/(2**j)
    q = u_nb.expm_eps_less_than(delta)
    n = A.shape[0]
    I = cp.eye(n)
    D = I
    N = I
    X = I
    c = 1
    sign = 1
    for k in range(1,q+1):
        c = c*(q-k+1)/((2*q - k+ 1)*k)
        X = A@X
        N = N + c*X
        sign = -1*sign
        D = D + sign*c*X
    
    F = cp.linalg.solve(D,N)
    for _ in range(j):
        F = F@F
    
    return F
Ejemplo n.º 13
0
def entanglement_entropy_from_state(state,
                                    chosen: list,
                                    sparse: bool = True,
                                    gpu: bool = False) -> float:
    """
        Compute entanglement entropy of state according to chosen bipartition of qubits

    :param state:   array representing state of the system of qubits, can be scipy.sparse or numpy depending on sparse
    :param chosen:  selected qubits
    :param sparse: True if dense representation (state is np.ndarray), False if state is a scipy.sparse.coo_matrix
    :return: S
    """

    notchosen = bip.notchosen(chosen, int(log2(state.shape[0])))

    if sparse:
        W = W_from_state_sparse(state, chosen, notchosen)
        svds = bip.sparsesvd(W, \
                             k=min(np.shape(W)) - 1, which='LM', return_singular_vectors=False)
        svds = svds**2
        svds = svds[svds > 1e-6]
        return -np.sum(svds * np.log2(svds))

    if gpu:
        W = cp.array(W_from_state_numba(state, chosen, notchosen))
        rho = W.dot(W.conj().T)
        eig = gpu_eigh(rho)
        eig = eig[eig > 1e-5]
        a = cp.log2(eig)
        return cp.asnumpy(-cp.sum(eig * a))

    rho = density_matrix_from_state_dense(state, chosen, notchosen)
    eig = eigh(rho)
    eig = eig[eig > 1e-15]
    a = np.log2(eig)
    return -np.sum(eig * a)
Ejemplo n.º 14
0
def find_prob(measured_qubits, sub_state, states):

    # Make sure measured qubit numbers are in ascending order
    qubits = measured_qubits
    qubits.sort()

    # Make a copy of given states in order not to alter them
    a = states.copy()
    d1, d2 = a.shape  # d1 = number of circuit runs, d2 = 2 ** N
    N = int(rint(log2(d2)))

    # Reshape to rank-(N+1) tensor
    a = a.reshape([d1] + [2] * N)

    # K = number of measured qubits, M = number of qubits not measured
    K = len(qubits)
    M = N - K

    # Reorder qubit number axes
    for i in range(K):
        a = swapaxes(a, i + 1, qubits[i] + 1)

    # Flatten arrays for 2 groups of qubits
    a = a.reshape([d1] + [2**K] + [2**M])

    # Broadcast multiply coefficients
    a = swapaxes(a, 0, 1)
    a = multiply(a.T, sub_state).T

    # Sum over coefficients
    a = a.sum(axis=0)
    a = abs(a)**2
    a = a.sum(axis=1)

    # Return probability of measuring a substate for all circuit runs
    return a
Ejemplo n.º 15
0
def apply(gate, states, global_phase=False):

    # A shorthand for the original states
    a = states
    # d1 = number of circuit runs with noise, d2 = 2 ** N = dimension of state vector

    d1, d2 = states.shape
    N = int(rint(log2(d2)))

    # A copy of state a, to be flipped by qubit-wise Pauli operations
    b = copy(a)

    # print("d1 = ", d1)
    # print("d2 = ", d2)
    # print("N = ", N)
    # Reshape to rank-(N+1) tensor
    b = b.reshape([d1] + [2] * N)

    for k in range(len(gate[0])):

        basis = gate[0][k]
        q = gate[1][k]

        if basis == identity:
            pass

        if basis == x:
            b = roll(b, 1, q + 1)

        if basis == y:
            b = roll(b, 1, q + 1)
            b = swapaxes(b, 0, q + 1)
            b[0] *= -1j
            b[1] *= 1j
            b = swapaxes(b, 0, q + 1)

        if basis == s_phi:
            phi = array(gate[3][k])
            b = roll(b, 1, q + 1)
            b = swapaxes(b, 0, q + 1)
            b = swapaxes(b, N, q + 1)
            phase1 = cos(phi) + 1j * sin(phi)
            phase2 = cos(phi) - 1j * sin(phi)
            b[0] = multiply(phase2, b[0])
            b[1] = multiply(phase1, b[1])
            b = swapaxes(b, N, q + 1)
            b = swapaxes(b, 0, q + 1)

        if basis == z:
            b = swapaxes(b, 0, q + 1)
            b[1] *= -1
            b = swapaxes(b, 0, q + 1)

    b = b.reshape(d1, d2)
    angles = array(gate[2][0])

    states = (cos(angles / 2) * a.T - 1j * sin(angles / 2) * b.T).T

    # Remove global phase (may be awkward if first amplitude is close to zero)

    if global_phase == False:
        pass

    return states
Ejemplo n.º 16
0
def random_noise(image, mode='gaussian', seed=None, clip=True, **kwargs):
    """
    Function to add random noise of various types to a floating-point image.

    Parameters
    ----------
    image : ndarray
        Input image data. Will be converted to float.
    mode : str, optional
        One of the following strings, selecting the type of noise to add:

        - 'gaussian'  Gaussian-distributed additive noise.
        - 'localvar'  Gaussian-distributed additive noise, with specified
                      local variance at each point of `image`.
        - 'poisson'   Poisson-distributed noise generated from the data.
        - 'salt'      Replaces random pixels with 1.
        - 'pepper'    Replaces random pixels with 0 (for unsigned images) or
                      -1 (for signed images).
        - 's&p'       Replaces random pixels with either 1 or `low_val`, where
                      `low_val` is 0 for unsigned images or -1 for signed
                      images.
        - 'speckle'   Multiplicative noise using out = image + n*image, where
                      n is Gaussian noise with specified mean & variance.
    seed : int, optional
        If provided, this will set the random seed before generating noise,
        for valid pseudo-random comparisons.
    clip : bool, optional
        If True (default), the output will be clipped after noise applied
        for modes `'speckle'`, `'poisson'`, and `'gaussian'`. This is
        needed to maintain the proper image data range. If False, clipping
        is not applied, and the output may extend beyond the range [-1, 1].
    mean : float, optional
        Mean of random distribution. Used in 'gaussian' and 'speckle'.
        Default : 0.
    var : float, optional
        Variance of random distribution. Used in 'gaussian' and 'speckle'.
        Note: variance = (standard deviation) ** 2. Default : 0.01
    local_vars : ndarray, optional
        Array of positive floats, same shape as `image`, defining the local
        variance at every image point. Used in 'localvar'.
    amount : float, optional
        Proportion of image pixels to replace with noise on range [0, 1].
        Used in 'salt', 'pepper', and 'salt & pepper'. Default : 0.05
    salt_vs_pepper : float, optional
        Proportion of salt vs. pepper noise for 's&p' on range [0, 1].
        Higher values represent more salt. Default : 0.5 (equal amounts)

    Returns
    -------
    out : ndarray
        Output floating-point image data on range [0, 1] or [-1, 1] if the
        input `image` was unsigned or signed, respectively.

    Notes
    -----
    Speckle, Poisson, Localvar, and Gaussian noise may generate noise outside
    the valid image range. The default is to clip (not alias) these values,
    but they may be preserved by setting `clip=False`. Note that in this case
    the output may contain values outside the ranges [0, 1] or [-1, 1].
    Use this option with care.

    Because of the prevalence of exclusively positive floating-point images in
    intermediate calculations, it is not possible to intuit if an input is
    signed based on dtype alone. Instead, negative values are explicitly
    searched for. Only if found does this function assume signed input.
    Unexpected results only occur in rare, poorly exposes cases (e.g. if all
    values are above 50 percent gray in a signed `image`). In this event,
    manually scaling the input to the positive domain will solve the problem.

    The Poisson distribution is only defined for positive integers. To apply
    this noise type, the number of unique values in the image is found and
    the next round power of two is used to scale up the floating-point result,
    after which it is scaled back down to the floating-point image range.

    To generate Poisson noise against a signed image, the signed image is
    temporarily converted to an unsigned image in the floating point domain,
    Poisson noise is generated, then it is returned to the original range.

    """
    mode = mode.lower()

    # Detect if a signed image was input
    if image.min() < 0:
        low_clip = -1.0
    else:
        low_clip = 0.0

    image = img_as_float(image)
    if seed is not None:
        cp.random.seed(seed=seed)

    allowedtypes = {
        'gaussian': 'gaussian_values',
        'localvar': 'localvar_values',
        'poisson': 'poisson_values',
        'salt': 'sp_values',
        'pepper': 'sp_values',
        's&p': 's&p_values',
        'speckle': 'gaussian_values'
    }

    kwdefaults = {
        'mean': 0.0,
        'var': 0.01,
        'amount': 0.05,
        'salt_vs_pepper': 0.5,
        'local_vars': cp.zeros_like(image) + 0.01
    }

    allowedkwargs = {
        'gaussian_values': ['mean', 'var'],
        'localvar_values': ['local_vars'],
        'sp_values': ['amount'],
        's&p_values': ['amount', 'salt_vs_pepper'],
        'poisson_values': []
    }

    for key in kwargs:
        if key not in allowedkwargs[allowedtypes[mode]]:
            raise ValueError('%s keyword not in allowed keywords %s' %
                             (key, allowedkwargs[allowedtypes[mode]]))

    # Set kwarg defaults
    for kw in allowedkwargs[allowedtypes[mode]]:
        kwargs.setdefault(kw, kwdefaults[kw])

    if mode == 'gaussian':
        noise = cp.random.normal(kwargs['mean'], kwargs['var']**0.5,
                                 image.shape)
        out = image + noise

    elif mode == 'localvar':
        # Ensure local variance input is correct
        if (kwargs['local_vars'] <= 0).any():
            raise ValueError('All values of `local_vars` must be > 0.')

        # Safe shortcut usage broadcasts kwargs['local_vars'] as a ufunc

        # CuPy Backend: Must supply size argument to get around a CuPy bug
        #       https://github.com/cupy/cupy/pull/4457
        out = image + cp.random.normal(0, kwargs["local_vars"]**0.5,
                                       kwargs["local_vars"].shape)

    elif mode == 'poisson':
        # Determine unique values in image & calculate the next power of two
        vals = len(cp.unique(image))
        vals = 2**cp.ceil(cp.log2(vals))

        # Ensure image is exclusively positive
        if low_clip == -1.0:
            old_max = image.max()
            image = (image + 1.0) / (old_max + 1.0)

        # Generating noise for each unique value in image.
        out = cp.random.poisson(image * vals) / float(vals)

        # Return image to original range if input was signed
        if low_clip == -1.0:
            out = out * (old_max + 1.0) - 1.0

    elif mode == 'salt':
        # Re-call function with mode='s&p' and p=1 (all salt noise)
        out = random_noise(image,
                           mode='s&p',
                           seed=seed,
                           amount=kwargs['amount'],
                           salt_vs_pepper=1.)

    elif mode == 'pepper':
        # Re-call function with mode='s&p' and p=1 (all pepper noise)
        out = random_noise(image,
                           mode='s&p',
                           seed=seed,
                           amount=kwargs['amount'],
                           salt_vs_pepper=0.)

    elif mode == 's&p':
        out = image.copy()
        p = kwargs['amount']
        q = kwargs['salt_vs_pepper']
        flipped = cp.random.choice([True, False],
                                   size=image.shape,
                                   p=[p, 1 - p])
        salted = cp.random.choice([True, False],
                                  size=image.shape,
                                  p=[q, 1 - q])
        peppered = ~salted
        out[flipped & salted] = 1
        out[flipped & peppered] = low_clip

    elif mode == 'speckle':
        noise = cp.random.normal(kwargs['mean'], kwargs['var']**0.5,
                                 image.shape)
        out = image + image * noise

    # Clip back to original range, if necessary
    if clip:
        out = cp.clip(out, low_clip, 1.0)

    return out
Ejemplo n.º 17
0
def make_sweep(freq, sr, duration):
    return cp.sin(
        cp.cumsum(2 * cp.pi * cp.logspace(cp.log2(2.0 / sr),
                                          cp.log2(float(freq) / sr),
                                          num=int(duration * sr),
                                          base=2.0)))
Ejemplo n.º 18
0
        number_qubits = k + L
        bipartitions = [bip.random_bipartition(range(number_qubits), number_qubits // 2) for i in range(number_of_bip)]
        constr_times = []
        eigh_times = []
        for chosen in bipartitions:
            local_start = time()
            notchosen = bip.notchosen(chosen, number_qubits)
            W = W_from_state_numba(current_state, chosen, notchosen)
            if flag: constr_times.append(time() - local_start)
            else: pass
            local_start = time()
            W = cp.array(W)
            rho = W.dot(W.conj().T)
            eig = gpu_eigh(rho)
            eig = eig[eig > 1e-5]
            a = cp.log2(eig)
            entr = cp.asnumpy(- cp.sum(eig * a))
            if flag: eigh_times.append(time() - local_start)
            else: flag = True       

        constr_means[i][k - 1] = np.mean(constr_times)
        constr_stds[i][k - 1] = np.sqrt(np.var(constr_times))
        eigh_means[i][k - 1] = np.mean(eigh_times)
        eigh_stds[i][k - 1] = np.sqrt(np.var(eigh_times))

    current_state = apply_IQFT(L, current_state)
    bipartitions = [bip.random_bipartition(range(number_qubits), number_qubits // 2) for i in range(number_of_bip)]
    constr_times = []
    eigh_times = []
    for chosen in bipartitions:
        local_start = time()
Ejemplo n.º 19
0
def run_cupy(price, strike, t, rate, vol):
    import cupy as cp

    # Allocate temporary arrays
    size = len(price)
    tmp = cp.empty(size, dtype='float64')
    vol_sqrt = cp.empty(size, dtype='float64')
    rsig = cp.empty(size, dtype='float64')
    d1 = cp.empty(size, dtype='float64')
    d2 = cp.empty(size, dtype='float64')

    # Outputs
    call = cp.empty(size, dtype='float64')
    put = cp.empty(size, dtype='float64')

    # Transfer inputs to the GPU
    price = cp.array(price)
    strike = cp.array(strike)
    t = cp.array(t)
    rate = cp.array(rate)
    vol = cp.array(vol)

    # Create an erf function that doesn't exist
    cp_erf = cp.core.create_ufunc('cupyx_scipy_erf', ('f->f', 'd->d'),
                                  'out0 = erf(in0)',
                                  doc='''Error function.
        .. seealso:: :meth:`scipy.special.erf`
        ''')

    # Begin computation
    c05 = 3.0
    c10 = 1.5
    invsqrt2 = 1.0 / math.sqrt(2.0)

    cp.multiply(vol, vol, out=rsig)
    cp.multiply(rsig, c05, out=rsig)
    cp.add(rsig, rate, out=rsig)

    cp.sqrt(t, out=vol_sqrt)
    cp.multiply(vol_sqrt, vol, out=vol_sqrt)

    cp.multiply(rsig, t, out=tmp)
    cp.divide(price, strike, out=d1)
    cp.log2(d1, out=d1)
    cp.add(d1, tmp, out=d1)

    cp.divide(d1, vol_sqrt, out=d1)
    cp.subtract(d1, vol_sqrt, out=d2)

    # d1 = c05 + c05 * erf(d1 * invsqrt2)
    cp.multiply(d1, invsqrt2, out=d1)
    cp_erf(d1, out=d1)
    cp.multiply(d1, c05, out=d1)
    cp.add(d1, c05, out=d1)

    # d2 = c05 + c05 * erf(d2 * invsqrt2)
    cp.multiply(d2, invsqrt2, out=d2)
    cp_erf(d2, out=d2)
    cp.multiply(d2, c05, out=d2)
    cp.add(d2, c05, out=d2)

    # Reuse existing buffers
    e_rt = vol_sqrt
    tmp2 = rsig

    # e_rt = exp(-rate * t)
    cp.multiply(rate, -1.0, out=e_rt)
    cp.multiply(e_rt, t, out=e_rt)
    cp.exp(e_rt, out=e_rt)

    # call = price * d1 - e_rt * strike * d2
    #
    # tmp = price * d1
    # tmp2 = e_rt * strike * d2
    # call = tmp - tmp2
    cp.multiply(price, d1, out=tmp)
    cp.multiply(e_rt, strike, out=tmp2)
    cp.multiply(tmp2, d2, out=tmp2)
    cp.subtract(tmp, tmp2, out=call)

    # put = e_rt * strike * (c10 - d2) - price * (c10 - d1)
    # tmp = e_rt * strike
    # tmp2 = (c10 - d2)
    # put = tmp - tmp2
    # tmp = c10 - d1
    # tmp = price * tmp
    # put = put - tmp
    cp.multiply(e_rt, strike, out=tmp)
    cp.subtract(c10, d2, out=tmp2)
    cp.multiply(tmp, tmp2, out=put)
    cp.subtract(c10, d1, out=tmp)
    cp.multiply(price, tmp, out=tmp)
    cp.subtract(put, tmp, out=put)

    # Transfer outputs back to CPU
    call = cp.asnumpy(call)
    put = cp.asnumpy(put)

    return call, put
Ejemplo n.º 20
0
def train():

    n_iterations = 1000
    n_moves = 1000

    learning_rate = 0.01
    mutation_scale = 0.1
    size = [2 * 4**2 + 12 * 2, 40, 10, 4]

    n_organisms = 100

    plt.ion()
    plt.show()

    best_loss_iteration = []
    mean_loss_iteration = []
    worst_loss_iteration = []

    initial_organism = init_organism(1, size)
    flat_initial_organism = flatten_organism(initial_organism)
    loc = flatten_organism(initial_organism)
    previous_loc = loc
    N = cp.random.normal(loc=0,
                         scale=mutation_scale,
                         size=(int(n_organisms / 2),
                               flat_initial_organism.shape[1]))
    flat_organism = cp.concatenate((loc + N, loc - N), axis=0)
    organism = reform_organism(flat_organism, size)

    for iteration in range(1, n_iterations + 1):

        game = game_2048.Games(n_boards=n_organisms)
        board = game.boards

        is_game_over = np.repeat(False, n_organisms)
        action = np.zeros((n_organisms, ))
        for move in range(n_moves):
            board = cp.asarray(board)
            is_empty = (board == 0).astype(cp.float)
            vertical = (board[:, :-1] == board[:, 1:])
            horisontal = (board[:, :, :-1] == board[:, :, 1:])
            board[board == 0] = 1
            board = cp.log2(board)
            data = cp.concatenate((
                cp.log2(board).reshape((-1, 16)),
                is_empty.reshape((-1, 16)),
                vertical.reshape((-1, 12)),
                horisontal.reshape((-1, 12)),
            ),
                                  axis=1)
            output = apply_organism(get_organism(organism, ~is_game_over),
                                    data[~is_game_over])
            new_action = cp.reshape(output, (-1, 4))
            new_action = cp.argmax(new_action, axis=1)
            # new_action = cp.exp(new_action)
            # new_action = new_action/new_action.sum(axis=1, keepdims=True)
            # p = cp.random.rand(new_action.shape[0], 1)
            # c = cp.cumsum(new_action, axis=1)
            # new_action = (p <= c).sum(axis=1)
            new_action = cp.asnumpy(new_action)
            action[~is_game_over] = new_action
            board, reward, is_game_over = game.step(action)

            if is_game_over.all():
                break

        loss = -cp.asarray(reward)
        organism_loss = loss
        best_loss_iteration.append(cp.asnumpy(organism_loss.min()))
        mean_loss_iteration.append(cp.asnumpy(organism_loss.mean()))
        worst_loss_iteration.append(cp.asnumpy(organism_loss.max()))

        print('iteration: {}, loss: {}, std: {:.2f}, gradient norm: {:.4f}'.
              format(iteration, organism_loss.min(),
                     cp.asnumpy(organism_loss).std(),
                     cp.asnumpy(cp.linalg.norm(loc - previous_loc))))

        #print('best final board')
        #print(board[best_organism_index])
        if iteration % 1 == 0:
            best_organism_index = cp.argmin(organism_loss)

            plt.clf()
            # plt.subplot(2, 2, 3)
            # #plt.yscale('log')
            # plt.plot(best_board_iteration)
            # plt.plot(mean__iteration)
            # plt.plot(worst_loss_iteration)

            plt.subplot(2, 2, 3)
            #plt.yscale('log')
            plt.plot(best_loss_iteration)
            plt.plot(mean_loss_iteration)
            plt.plot(worst_loss_iteration)

            plt.draw()
            plt.pause(0.001)

        previous_loc = loc
        organism, loc, N = evolve(organism, organism_loss, mutation_scale,
                                  size, learning_rate)
def _project_cupy(reference_sources, estimated_source, flen, nsrc):
    """Least-squares projection of estimated source on the subspace spanned by
    delayed versions of reference sources, with delays between 0 and flen-1
    """
    # nsrc = tf.shape(reference_sources)[0]
    nsampl = reference_sources.shape[1]
    typ = reference_sources.dtype

    # computing coefficients of least squares problem via FFT ##
    # zero padding and FFT of input data
    reference_sources = cp.concatenate(
        (reference_sources, cp.zeros([nsrc, flen - 1], dtype=typ)), 1)

    estimated_source = cp.concatenate(
        (estimated_source, cp.zeros([flen - 1], dtype=typ)), 0)

    n_fft = cp.power(2., cp.ceil(cp.log2(nsampl + flen - 1))).astype('i')

    sf = cp.fft.fft(reference_sources, n=int(n_fft), axis=1)
    sef = cp.fft.fft(estimated_source, n=int(n_fft))

    # inner products between delayed versions of reference_sources
    G = cp.empty([nsrc * flen, nsrc * flen])
    for i in range(nsrc):
        for j in range(nsrc):
            ssf = sf[i] * cp.conj(sf[j])
            ssf = cp.real(cp.fft.ifft(ssf))
            ss = toeplitz_cupy(
                cp.concatenate((cp.reshape(ssf[0], [1]), ssf[-1:-flen:-1]), 0),
                ssf[:flen])
            G[i * flen:(i + 1) * flen, j * flen:(j + 1) * flen] = ss
            G[j * flen:(j + 1) * flen,
              i * flen:(i + 1) * flen] = cp.transpose(ss)

    # inner products between estimated_source and delayed versions of
    # reference_sources
    D = cp.empty([nsrc * flen])
    for i in range(nsrc):
        ssef = sf[i] * cp.conj(sef)
        ssef = cp.real(cp.fft.ifft(ssef))
        conc = cp.concatenate(
            [cp.reshape(ssef[0], [1]),
             cp.flip(ssef[-flen + 1:], 0)], 0)
        D[i * flen:(i + 1) * flen] = conc

    # Computing projection
    # Distortion filters

    s = cp.linalg.solve(G, cp.expand_dims(D, 1))
    if nsrc == 2:
        C = cp.concatenate((s[:flen], s[flen:]), 1)
    else:
        C = cp.reshape(s, (flen, nsrc))

    # Filtering
    sproj = cp.zeros([nsampl + flen - 1], dtype=cp.float64)

    for i in range(nsrc):
        fshape = C[:, i].shape[0] + reference_sources[i].shape[0] - 1
        fft1 = cp.fft.rfftn(C[:, i], (fshape, ))
        fft2 = cp.fft.rfftn(reference_sources[i], (fshape, ))
        ifft = cp.fft.irfftn(fft1 * fft2, (fshape, ))
        sproj += ifft[:nsampl + flen - 1]
    return sproj
Ejemplo n.º 22
0
def qmf(hk):
    """
    Return high-pass qmf filter from low-pass

    Parameters
    ----------
    hk : array_like
        Coefficients of high-pass filter.

    """
    N = len(hk) - 1
    asgn = [{0: 1, 1: -1}[k % 2] for k in range(N + 1)]
    return hk[::-1] * cp.array(asgn)
    """
    Return (x, phi, psi) at dyadic points ``K/2**J`` from filter coefficients.

    Parameters
    ----------
    hk : array_like
        Coefficients of low-pass filter.
    J : int, optional
        Values will be computed at grid points ``K/2**J``. Default is 7.

    Returns
    -------
    x : ndarray
        The dyadic points ``K/2**J`` for ``K=0...N * (2**J)-1`` where
        ``len(hk) = len(gk) = N+1``.
    phi : ndarray
        The scaling function ``phi(x)`` at `x`:
        ``phi(x) = sum(hk * phi(2x-k))``, where k is from 0 to N.
    psi : ndarray, optional
        The wavelet function ``psi(x)`` at `x`:
        ``phi(x) = sum(gk * phi(2x-k))``, where k is from 0 to N.
        `psi` is only returned if `gk` is not None.

    Notes
    -----
    The algorithm uses the vector cascade algorithm described by Strang and
    Nguyen in "Wavelets and Filter Banks".  It builds a dictionary of values
    and slices for quick reuse.  Then inserts vectors into final vector at the
    end.

    """
    N = len(hk) - 1

    if (J > 30 - cp.log2(N + 1)):
        raise ValueError("Too many levels.")
    if (J < 1):
        raise ValueError("Too few levels.")

    # construct matrices needed
    nn, kk = cp.ogrid[:N, :N]
    s2 = cp.sqrt(2)
    # append a zero so that take works
    thk = cp.r_[hk, 0]
    gk = qmf(hk)
    tgk = cp.r_[gk, 0]

    indx1 = cp.clip(2 * nn - kk, -1, N + 1)
    indx2 = cp.clip(2 * nn - kk + 1, -1, N + 1)
    m = cp.zeros((2, 2, N, N), 'd')
    m[0, 0] = cp.take(thk, indx1, 0)
    m[0, 1] = cp.take(thk, indx2, 0)
    m[1, 0] = cp.take(tgk, indx1, 0)
    m[1, 1] = cp.take(tgk, indx2, 0)
    m *= s2

    # construct the grid of points
    x = cp.arange(0, N * (1 << J), dtype=float) / (1 << J)
    phi = 0 * x

    psi = 0 * x

    # find phi0, and phi1
    lam, v = eig(m[0, 0])
    ind = cp.argmin(cp.absolute(lam - 1))
    # a dictionary with a binary representation of the
    #   evaluation points x < 1 -- i.e. position is 0.xxxx
    v = cp.real(v[:, ind])
    # need scaling function to integrate to 1 so find
    #  eigenvector normalized to sum(v,axis=0)=1
    sm = cp.sum(v)
    if sm < 0:  # need scaling function to integrate to 1
        v = -v
        sm = -sm
    bitdic = {'0': v / sm}
    bitdic['1'] = cp.dot(m[0, 1], bitdic['0'])
    step = 1 << J
    phi[::step] = bitdic['0']
    phi[(1 << (J - 1))::step] = bitdic['1']
    psi[::step] = cp.dot(m[1, 0], bitdic['0'])
    psi[(1 << (J - 1))::step] = cp.dot(m[1, 1], bitdic['0'])
    # descend down the levels inserting more and more values
    #  into bitdic -- store the values in the correct location once we
    #  have computed them -- stored in the dictionary
    #  for quicker use later.
    prevkeys = ['1']
    for level in range(2, J + 1):
        newkeys = ['%d%s' % (xx, yy) for xx in [0, 1] for yy in prevkeys]
        fac = 1 << (J - level)
        for key in newkeys:
            # convert key to number
            num = 0
            for pos in range(level):
                if key[pos] == '1':
                    num += (1 << (level - 1 - pos))
            pastphi = bitdic[key[1:]]
            ii = int(key[0])
            temp = cp.dot(m[0, ii], pastphi)
            bitdic[key] = temp
            phi[num * fac::step] = temp
            psi[num * fac::step] = cp.dot(m[1, ii], pastphi)
        prevkeys = newkeys

    return x, phi, psi
Ejemplo n.º 23
0
def train():

    n_iterations = 1000000
    n_moves = 10000

    mutation_scale = 0.1
    n_children = 1
    size = [16 + 2, 10, 4]
    food_amount = 2000
    n_initial_organisms = int(food_amount/10)

    if not args.no_graphics:
        plt.ion()
        plt.show()

    best_loss_iteration = []
    mean_loss_iteration = []
    worst_loss_iteration = []
    best_food_loss_iteration = []
    best_food_organism_iteration = []

    organism = init_organism(n_initial_organisms, size)
    food = np.ones(n_initial_organisms)
    for iteration in range(1, n_iterations+1):
        n_organisms = len(food)
        game = game_2048.Games(n_boards=n_organisms)
        board = game.boards
        is_game_over = np.repeat(False, n_organisms)
        action = np.zeros((n_organisms,))
        for move in range(n_moves):
            board = cp.asarray(board)
            is_empty = (board == 0).astype(cp.float)
            vertical = (board[:, :-1] == board[:, 1:])
            horisontal = (board[:, :, :-1] == board[:, :, 1:])
            board[board == 0] = 1
            board = cp.log2(board)
            data = cp.concatenate((
                #cp.log2(board).reshape((-1, 16)),
                is_empty.reshape((-1, 16)),
                #vertical.reshape((-1, 12)),
                #horisontal.reshape((-1, 12)),
                vertical.sum(axis=(1,2)).reshape((-1, 1)),
                horisontal.sum(axis=(1,2)).reshape((-1, 1))
            ), axis=1)
            output = apply_organism(get_organism(organism, ~is_game_over), data[~is_game_over])
            new_action = cp.reshape(output, (-1, 4))
            new_action = cp.argmax(new_action, axis=1)
            # new_action = cp.exp(new_action)
            # new_action = new_action/new_action.sum(axis=1, keepdims=True)
            # p = cp.random.rand(new_action.shape[0], 1)
            # c = cp.cumsum(new_action, axis=1)
            # new_action = (p <= c).sum(axis=1)
            new_action = cp.asnumpy(new_action)
            action[~is_game_over] = new_action
            board, reward, is_game_over = game.step(action)
           
            if is_game_over.all():
                break

        loss = -cp.asarray(reward)
        
        organism_loss = loss
        
        food += distribute_food(loss, amount=food_amount - food.sum())
        food -= 1

        best_loss_iteration.append(cp.asnumpy(organism_loss.min()))
        mean_loss_iteration.append(cp.asnumpy(organism_loss.mean()))
        worst_loss_iteration.append(cp.asnumpy(organism_loss.max()))
        best_food_loss_iteration.append(cp.asnumpy(organism_loss[np.argsort(food)[-10:]].mean()))
        best_food_organism_iteration.append(get_organism(organism, np.argmax(food)))

        is_living = (food >= 0)
        survivor = get_organism(organism, is_living)
        food = food[is_living].copy()

        is_new_parent = (food >= 1)
        if is_new_parent.sum() >= 1:
            parent = get_organism(survivor, is_new_parent)
            children = mutate(parent, n_children, mutation_scale=mutation_scale)
            organism = concatenate_organism(survivor, children)
            n_new_children = int(is_new_parent.sum()*n_children)
            food[is_new_parent] -= 1
            food = np.concatenate((food, np.ones(n_new_children)))
        else:
            organism = survivor

        print('iteration: {}, loss: {}, std: {:.2f}, n_organisms: {}, food: {:.2f}, std: {:.2f}, dead: {:.2f}, children: {}'.format(
            iteration,
            organism_loss.min(),
            cp.asnumpy(organism_loss).std(),
            loss.shape[0],
            food.mean(),
            food.std(),
            (~is_living).sum()/loss.shape[0],
            is_new_parent.sum()*n_children
        ))

        best_organism_index = np.argmin(cp.asnumpy(organism_loss))
        print('best final board')
        print(board[best_organism_index])

        if iteration % 1 == 0 and not args.no_graphics:
            best_organism_index = cp.argmin(organism_loss)

            plt.clf()
            plt.subplot(2, 1, 1)
            plt.title('Loss (neg. sum of board)')
            plt.xlabel('Iterations')
            plt.ylabel('Loss')
            plt.plot(best_loss_iteration)
            plt.plot(mean_loss_iteration)
            plt.plot(worst_loss_iteration)
            plt.plot(best_food_loss_iteration)

            plt.subplot(2, 1, 2)
            plt.title('Food')
            plt.xlabel('Food')
            plt.ylabel('n_organisms')
            plt.hist(food)

            plt.draw()
            plt.pause(0.001)

            plt.savefig('loss.png')

        if iteration % 10 == 0:
            with open('loss_iteration','wb') as fp:
                pickle.dump({
                    'best_loss_iteration': best_loss_iteration,
                    'mean_loss_iteration': mean_loss_iteration,
                    'worst_loss_iteration': worst_loss_iteration,
                    'best_food_loss_iteration': best_food_loss_iteration,
                }, fp)

            with open('organism_iteration','wb') as fp:
                pickle.dump({
                    'best_food_organism_iteration': best_food_organism_iteration
                }, fp)