def __call__(self, hs, path_through, test=False):
     """ zのフォーマットはshape(1, 512, 2, 2)となっており、512bitが最大値だと思われる """
     """ 
     path_throughのフォーマットはshape(256)なので、全然足りない
     4倍して,2,2で切って、代入する
     """
     path_through_4 = cp.repeat(path_through, 4).astype('float32')
     path_through_2x2 = chainer.cuda.to_gpu(cp.reshape(path_through_4, (1, 256, 2, 2)) )
     #print("orig shape.", hs[-1].shape)
     #print("tag shape.", path_through_2x2.shape)
     #print("tag object.", type(path_through_2x2))
     #print("shape, ", type(hs), type(hs[-1]))
     hs[-1] = F.concat( (hs[-1], Variable(path_through_2x2)) )
     """
     - device: <CUDA Device 0>
     - volatile: OFF
     - backend: <class 'cupy.core.core.ndarray'>
     - shape: (1, 512, 2, 2)
     - dtype: float32
     """
     #cupy.concat(hs[-1],) 
     """
     import numpy as np
     import cupy as cp
     import cupy.manipulation.join as cujoin
     from chainer import Variable
     cucat = cujoin.concatenate
     path_through
     for i in range(len(path_through)-4):
         p1, p2, p3, p4 = path_through[i:i+4]
         sample = cp.array([[[[p1, p2], [p3, p4]]]]).astype('float32')
         break
     #print( sample.shape )
     vsample = Variable(sample)
     import chainer.functions.array.concat as cat
     hs[-1] = cat.concat([hs[-1], vsample])
     """
     #print("hs", hs[-1], hs[-1].__len__(), hs[-1].debug_print())
     h = self.c0(hs[-1], test=test)
     for i in range(1,8):
         h = F.concat([h, hs[-i-1]])
         if i<7:
             h = self['c%d'%i](h, test=test)
         else:
             h = self.c7(h)
     return h
    def __atmospheric_distort(self, img: np.ndarray) -> Tuple[np.ndarray, float, float, int, int]:
        pupil_mask = self.__circular_aperture(img)
        phase_screen = self.phase_screen_container.phase_screen

        aperture_size = phase_screen.aperture_size
        fried_param = phase_screen.fried_param
        outer_scale = phase_screen.outer_scale
        stencil_length_factor = phase_screen.stencil_length_factor

        if using_cupy:
            phase_screen = np.asarray(phase_screen, dtype=np.float32)

        # this sets each satellite pixel to max brightness. Remove this line if it interferes with dynamic brightness range
        img[img > 0] = 255
        a = ifft2(pupil_mask * np.exp(1j * phase_screen))
        h = abs(abs(a) ** 2)
        img = ifft2(fft2(h) * fft2(img[:,:,1])).real

        img /= np.max(img)
        img *= 255

        return np.repeat(img[:,:,np.newaxis], 3, axis=2), aperture_size, fried_param, outer_scale, stencil_length_factor
Exemple #3
0
 def __call__(self, coordinates, groups=None):
     """
     Interpolates new input values `coordinates` using the `.c` DataFrame
     or map of DataFrames.
     """
     if isinstance(self.y, Series):
         if groups is not None:
             self.groups = groups.astype("int32")
         else:
             self.groups = Series(
                 cp.repeat(cp.array(0), len(self.t))
             ).astype("int32")
         result = _cubic_spline_fit(
             coordinates, self.groups, self.prefix, self.t, self.c
         )
         return Series(result)
     else:
         result = DataFrame()
         for col in self.y.columns:
             result[col] = Series(
                 _cubic_spline_fit(self.c[col], coordinates)
             )
         return result
def Generate_S1(J, T):
    S = cp.random.normal(0, .01, J * 5)
    S = S.reshape(5, 1, J)
    EPS = cp.random.normal(0, .01, 3 * J * (T + 1))
    EPS = EPS.reshape(3, T + 1, J)
    Lx = cp.vstack(
        (cp.array([0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]),
         cp.array([0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]),
         cp.array([0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0])))
    Li = np.vstack(
        (cp.array([0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]),
         cp.array([0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0]),
         cp.array([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0])))
    Lpi = cp.vstack(
        (cp.array([0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]),
         cp.array([0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]),
         cp.array([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0])))
    R = cp.reshape(
        np.vstack(
            (cp.array([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0]),
             cp.array([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0]),
             cp.array([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1,
                       0]))), [3, 18])
    U = cp.reshape(
        np.vstack(
            (cp.array([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0]),
             cp.array([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0]),
             cp.array([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
                       1]))), [3, 18])
    I = cp.reshape(
        cp.vstack(
            (cp.array([1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]),
             cp.array([0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]),
             cp.array([0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
                       0]))), [3, 18])
    I = cp.repeat(I[:, :, cp.newaxis], J, axis=2)
    return EPS, S, Lx, Lpi, Li, I, R, U
Exemple #5
0
def getData(balance_ones=True, Ntest=1000):
    """
    Get's the facial expression data
    optional params:
    balance_ones: whether or not to handle the class imbalance
    Ntest = number of elements for test set
    """
    Y = []
    X = []
    i = 0
    for line in open('fer2013.csv'):
        if i == 0:
            i = 1
        else:
            row = line.split(',')
            Y.append(int(row[0]))
            X.append([int(p) for p in row[1].split()])

    X = cp.array(X) / 255.0
    Y = cp.array(Y)

    # shuffle data and split into training and test
    X, Y = shuffle(X, Y)
    Xtrain, Ytrain = X[:-Ntest], Y[:-Ntest]
    Xvalid, Yvalid = X[-Ntest:], Y[-Ntest:]

    if balance_ones:
        # balance the 1 class since it has an imbalanced # of samples
        # used for logistic regression mostly but also when classifying all 7 labels
        X0, Y0 = Xtrain[Ytrain != 1, :], Ytrain[Ytrain != 1]
        X1 = Xtrain[Ytrain==1, :]
        X1 = cp.repeat(X1, 9, axis=0)
        Xtrain = cp.vstack([X0, X1])
        Ytrain = cp.concatenate((Y0, [1]*len(X1)))

    return Xtrain, Ytrain, Xvalid, Yvalid
Exemple #6
0
def lstsq(a, b, rcond=1e-15):
    """Return the least-squares solution to a linear matrix equation.

    Solves the equation `a x = b` by computing a vector `x` that
    minimizes the Euclidean 2-norm `|| b - a x ||^2`.  The equation may
    be under-, well-, or over- determined (i.e., the number of
    linearly independent rows of `a` can be less than, equal to, or
    greater than its number of linearly independent columns).  If `a`
    is square and of full rank, then `x` (but for round-off error) is
    the "exact" solution of the equation.

    Args:
        a (cupy.ndarray): "Coefficient" matrix with dimension ``(M, N)``
        b (cupy.ndarray): "Dependent variable" values with dimension ``(M,)``
            or ``(M, K)``
        rcond (float): Cutoff parameter for small singular values.
            For stability it computes the largest singular value denoted by
            ``s``, and sets all singular values smaller than ``s`` to zero.

    Returns:
        tuple:
            A tuple of ``(x, residuals, rank, s)``. Note ``x`` is the
            least-squares solution with shape ``(N,)`` or ``(N, K)`` depending
            if ``b`` was two-dimensional. The sums of ``residuals`` is the
            squared Euclidean 2-norm for each column in b - a*x. The
            ``residuals`` is an empty array if the rank of a is < N or M <= N,
            but  iff b is 1-dimensional, this is a (1,) shape array, Otherwise
            the shape is (K,). The ``rank`` of matrix ``a`` is an integer. The
            singular values of ``a`` are ``s``.

    .. warning::
        This function calls one or more cuSOLVER routine(s) which may yield
        invalid results if input conditions are not met.
        To detect these invalid results, you can set the `linalg`
        configuration to a value that is not `ignore` in
        :func:`cupyx.errstate` or :func:`cupyx.seterr`.

    .. seealso:: :func:`numpy.linalg.lstsq`
    """
    util._assert_cupy_array(a, b)
    util._assert_rank2(a)
    if b.ndim > 2:
        raise linalg.LinAlgError('{}-dimensional array given. Array must be at'
                                 ' most two-dimensional'.format(b.ndim))
    m, n = a.shape[-2:]
    m2 = b.shape[0]
    if m != m2:
        raise linalg.LinAlgError('Incompatible dimensions')

    u, s, vt = cupy.linalg.svd(a, full_matrices=False)
    # number of singular values and matrix rank
    cutoff = rcond * s.max()
    s1 = 1 / s
    sing_vals = s <= cutoff
    s1[sing_vals] = 0
    rank = s.size - sing_vals.sum()

    if b.ndim == 2:
        s1 = cupy.repeat(s1.reshape(-1, 1), b.shape[1], axis=1)
    # Solve the least-squares solution
    z = core.dot(u.transpose(), b) * s1
    x = core.dot(vt.transpose(), z)
    # Calculate squared Euclidean 2-norm for each column in b - a*x
    if rank != n or m <= n:
        resids = cupy.array([], dtype=a.dtype)
    elif b.ndim == 2:
        e = b - core.dot(a, x)
        resids = cupy.sum(cupy.square(e), axis=0)
    else:
        e = b - cupy.dot(a, x)
        resids = cupy.dot(e.T, e).reshape(-1)
    return x, resids, rank, s
Exemple #7
0
 def test_func(self):
     a = testing.shaped_arange((2, 3, 4), cupy)
     repeats = cupy.array([2, 3], dtype=cupy.int32)
     with pytest.raises(ValueError, match=r'repeats'):
         cupy.repeat(a, repeats)
Exemple #8
0
def forward_pool(A_previous, stride, f, mode = "max"):
    '''
    A forward pool step
    Calcul output shape : (1 + (x - f) / stride)

    Parameters
    ----------
    A_previous : cp.array(examples, height, width, depth)
        Input images from the previous layer.
    stride : int
        Stride number.
    f : int
        Square filter dimension.
    mode : string, optional
        Pool mode 'mean' or 'max'. The default is "max".

    Returns
    -------
    A : cp.array(examples, 1 + (height - f) / stride, 1 + (width - f) / stride, depth)
        Output layer image.

    '''
    
    (m, n_H_prev, n_W_prev, n_C_prev) = A_previous.shape
    
    n_H = int(1 + (n_H_prev - f) / stride)
    n_W = int(1 + (n_W_prev - f) / stride)
    n_C = n_C_prev

    A = cp.zeros((m, n_H, n_W, n_C))       

    i0 = cp.repeat(cp.arange(f), f)
    i1 = stride * cp.repeat(cp.arange(n_W), n_H)
    j0 = cp.tile(cp.arange(f), f)
    j1 = stride * cp.tile(cp.arange(n_H), n_W)
    i = cp.reshape(i0, (-1, 1))+cp.reshape(i1, (1, -1))
    j = cp.reshape(j0, (-1, 1))+cp.reshape(j1, (1, -1))
    R = A_previous[:, i, j, :]
    if mode == "max":
        pl = cp.max(R, axis=1)
    elif mode == "mean":
        pl = cp.mean(R, axis=1)
    A = cp.reshape(pl, (m, n_H, n_W, n_C))
    '''
    for i in range(m):                       
        for h in range(n_H):                   
            vert_start = h*stride
            vert_end = h*stride+f
            
            for w in range(n_W):        
                horiz_start = w*stride
                horiz_end = w*stride+f
                
                for c in range (n_C):
                    a_prev_slice = A_previous[i, vert_start:vert_end, horiz_start:horiz_end, c]
                    if mode == "max":
                        A[i, h, w, c] = cp.max(a_prev_slice)
                    elif mode == "mean":
                        A[i, h, w, c] = cp.mean(a_prev_slice)
    '''
    return A
Exemple #9
0
 def test_select_choicelist_condlist_broadcast(self, xp, dtype):
     a = cupy.arange(10, dtype=dtype)
     b = cupy.arange(20, dtype=dtype).reshape(2, 10)
     condlist = [a < 4, b > 8]
     choicelist = [cupy.repeat(a, 2).reshape(2, 10), b]
     return cupy.select(condlist, choicelist)
Exemple #10
0
def calc_pgh(ispec, wavelengths, psfparams):
    '''
    Calculate the pixelated Gauss Hermite for all wavelengths of a single spectrum

    ispec : integer spectrum number
    wavelengths : array of wavelengths to evaluate
    psfparams : dictionary of PSF parameters returned by evalcoeffs

    returns pGHx, pGHy

    where pGHx[ghdeg+1, nwave, nbinsx] contains the pixel-integrated Gauss-Hermite polynomial
    for all degrees at all wavelengths across nbinsx bins spaning the PSF spot, and similarly
    for pGHy.  The core PSF will then be evaluated as

    PSFcore = sum_ij c_ij outer(pGHy[j], pGHx[i])
    '''

    #- shorthand
    p = psfparams

    #- spot size (ny,nx)
    nx = 2 * p['HSIZEX'] + 1
    ny = 2 * p['HSIZEY'] + 1
    nwave = len(wavelengths)
    #- convert to cupy arrays
    for k in ['X', 'Y', 'GHSIGX', 'GHSIGY']:
        p[k] = cp.asarray(p[k])

    #- x and y edges of bins that span the center of the PSF spot
    xedges = cp.repeat(cp.arange(nx + 1) - nx // 2 - 0.5,
                       nwave).reshape(nx + 1, nwave)
    yedges = cp.repeat(cp.arange(ny + 1) - ny // 2 - 0.5,
                       nwave).reshape(ny + 1, nwave)

    #- Shift to be relative to the PSF center and normalize
    #- by the PSF sigma (GHSIGX, GHSIGY).
    #- Note: x,y = 0,0 is center of pixel 0,0 not corner
    #- Dimensions: xedges[nx+1, nwave], yedges[ny+1, nwave]
    dx = (p['X'][ispec] + 0.5) % 1 - 0.5
    dy = (p['Y'][ispec] + 0.5) % 1 - 0.5
    xedges = ((xedges - dx) / p['GHSIGX'][ispec])
    yedges = ((yedges - dy) / p['GHSIGY'][ispec])

    #- Degree of the Gauss-Hermite polynomials
    ghdegx = p['GHDEGX']
    ghdegy = p['GHDEGY']

    #- Evaluate the Hermite polynomials at the pixel edges
    #- HVx[ghdegx+1, nwave, nx+1]
    #- HVy[ghdegy+1, nwave, ny+1]
    HVx = hermevander(xedges, ghdegx).T
    HVy = hermevander(yedges, ghdegy).T

    #- Evaluate the Gaussians at the pixel edges
    #- Gx[nwave, nx+1]
    #- Gy[nwave, ny+1]
    Gx = cp.exp(-0.5 * xedges**2).T / cp.sqrt(2. * cp.pi)
    Gy = cp.exp(-0.5 * yedges**2).T / cp.sqrt(2. * cp.pi)

    #- Combine into Gauss*Hermite
    GHx = HVx * Gx
    GHy = HVy * Gy

    #- Integrate over the pixels using the relationship
    #  Integral{ H_k(x) exp(-0.5 x^2) dx} = -H_{k-1}(x) exp(-0.5 x^2) + const

    #- pGHx[ghdegx+1, nwave, nx]
    #- pGHy[ghdegy+1, nwave, ny]
    pGHx = cp.zeros((ghdegx + 1, nwave, nx))
    pGHy = cp.zeros((ghdegy + 1, nwave, ny))
    pGHx[0] = 0.5 * cp.diff(cupyx.scipy.special.erf(xedges / cp.sqrt(2.)).T)
    pGHy[0] = 0.5 * cp.diff(cupyx.scipy.special.erf(yedges / cp.sqrt(2.)).T)
    pGHx[1:] = GHx[:ghdegx, :, 0:nx] - GHx[:ghdegx, :, 1:nx + 1]
    pGHy[1:] = GHy[:ghdegy, :, 0:ny] - GHy[:ghdegy, :, 1:ny + 1]

    return pGHx, pGHy
Exemple #11
0
def MRVFLpredict(testX, testY, model):
    [n_sample, n_dims] = testX.shape

    w = model.w
    b = model.b
    beta = model.beta
    mu = model.mu
    sigma = model.sigma
    L = model.L
    sfi = model.sfi
    bi = model.bi
    TestingAccuracy = cp.zeros(L)

    A = []
    A_input = testX
    fs = []

    time_start = time.time()

    for i in range(L):
        A_ = cp.matmul(A_input, w[i])
        A_ = (A_ - mu[i]) / sigma[i]
        A_ = A_ + cp.repeat(b[i], n_sample, 0)
        A_ = selu(A_)  # Replace Relu to selu
        if i == 0:
            A_tmp = cp.concatenate([testX, A_, cp.ones((n_sample, 1))], axis=1)
        else:
            A_tmp = cp.concatenate(
                [testX, A_, sf_tmp, cp.ones((n_sample, 1))], axis=1)

        A.append(A_tmp)
        A_except_testX = A_tmp[:, n_dims:-1]
        A_ = A_except_testX[:, bi[i]]
        A_select = A_except_testX[:, sfi[i]]
        fs.append(A_select)

        sf_tmp = A_select
        #sf_tmp = cp.concatenate(fs, axis=1)

        ############ SETTINNG
        A_input = cp.concatenate([testX, sf_tmp, A_], axis=1)
        # A_input = cp.concatenate([testX, A_], axis=1)

        pred_result = cp.zeros((n_sample, i + 1))
        for j in range(i + 1):
            Ai = A[j]
            beta_temp = beta[j]
            predict_score = cp.matmul(Ai, beta_temp)
            predict_index = cp.argmax(predict_score, axis=1).ravel()
            # indx=indx.reshape(n_sample,1)
            pred_result[:, j] = predict_index
        TestingAccuracy_temp = majorityVoting(testY, pred_result)
        TestingAccuracy[i] = TestingAccuracy_temp
    '''
    pred_result = cp.zeros((n_sample, L))
    for i in range(L):
        Ai = A[i]
        beta_temp = beta[i]
        predict_score = cp.matmul(Ai, beta_temp)
        predict_index = cp.argmax(predict_score, axis=1).ravel()
        # indx=indx.reshape(n_sample,1)
        pred_result[:, i] = predict_index

    TestingAccuracy = majorityVoting(testY, predict_idx)
    '''
    time_end = time.time()

    Testing_time = time_end - time_start

    return TestingAccuracy, Testing_time
Exemple #12
0
def ELM1(X_train, X_test, y_train, y_test, ini, fin, inter, xp, repeticiones):

    #Fijamos la proporción de datos al 100%
    proporcion_datos = 1

    clases = int(max(y_train) + 1)
    n_train = int(X_train.shape[0] * proporcion_datos)
    n_test = int(X_test.shape[0] * proporcion_datos)
    neuronas = []
    acierto_medio = []
    tiempo_medio = []

    #Robustez
    if ini != fin:
        numero_neuronas = np.arange(ini, fin + 1, inter).astype('int')

    if ini == fin and inter == 0:
        numero_neuronas = []
        numero_neuronas.append(ini)

    for i in numero_neuronas:

        acierto = 0
        tiempo = 0
        neuronas.append(i)

        for j in range(repeticiones):
            ind_train = np.random.choice(X_train.shape[0],
                                         size=n_train,
                                         replace=False)
            ind_test = np.random.choice(X_test.shape[0],
                                        size=n_test,
                                        replace=False)

            X_train1 = X_train[ind_train, :]
            y_train1 = y_train[ind_train].astype('int')
            X_test1 = X_test[ind_test, :]
            y_test1 = y_test[ind_test].astype('int')

            neuronas_entrada = X_train1.shape[1]  # 784

            #---------------------GPU---------------------------------------------
            if (xp == 0):
                X_train1 = cp.asarray(X_train1)
                X_test1 = cp.asarray(X_test1)
                Y_train1 = cp.asarray(to_categorical(y_train1, clases))

                #Generamos aleatoriamente los pesos de entrada y las bias
                cp.cuda.runtime.deviceSynchronize()
                tiempo_inicial = time()
                #Pesos de entrada
                Win = cp.asarray(
                    (np.random.random([neuronas_entrada, i]) * 2. -
                     1.).astype('float32'))
                Bias = cp.asarray(np.random.random([1, i]) * 2. - 1.)
                temp_H = cp.dot(X_train1, Win)
                del X_train1
                #Hay que extender la matriz de Bias para que coincida con la dimension de H
                BiasMatrix = cp.repeat(Bias, temp_H.shape[0], axis=0)

                #Añadimos los Bias
                temp_H = temp_H + BiasMatrix

                #Función ReLU.
                H = cp.maximum(temp_H, 0, temp_H)
                del temp_H

                #Calculamos los pesos de salida haciendo uso de la pseudoinversa de Moore Penrose
                if (cp.linalg.det(cp.dot(cp.transpose(H), H))) == 0:
                    Y_train1 = to_categorical(y_train1, clases)
                    H = cp.asnumpy(H)
                    Y_train1 = cp.asnumpy(Y_train1)

                    Wout = np.dot(np.linalg.pinv(H), Y_train1)
                    del Y_train1
                    tiempo_final = time()
                    del H
                    tiempo = tiempo + (tiempo_final - tiempo_inicial)

                    #Conjunto de test
                    X_test1 = cp.asnumpy(X_test1)
                    Win = cp.asnumpy(Win)
                    temp_H_test = np.dot(X_test1, Win)
                    del Win
                    del X_test1

                    #Extendemos la matriz de los bias para que cuadre
                    BiasMatrix = np.repeat(Bias, temp_H_test.shape[0], axis=0)
                    del Bias
                    BiasMatrix = cp.asnumpy(BiasMatrix)
                    temp_H_test = temp_H_test + BiasMatrix
                    del BiasMatrix

                    H_test = np.maximum(temp_H_test, 0, temp_H_test)
                    del temp_H_test

                    #Prediccion de test
                    Y_pred_test = np.dot(H_test, Wout)
                    Y_pred_test = np.argmax(Y_pred_test, axis=1)
                    del H_test
                    del Wout

                    aciertos = np.sum(Y_pred_test == y_test1)
                    del Y_pred_test
                    acierto = acierto + aciertos / y_test1.size

                else:
                    X_test1 = cp.asarray(X_test1)
                    Y_train1 = cp.asarray(to_categorical(y_train1, clases))

                    Wout = cp.dot(
                        cp.dot(cp.linalg.inv(cp.dot(cp.transpose(H), H)),
                               cp.transpose(H)), Y_train1)
                    del Y_train1

                    cp.cuda.runtime.deviceSynchronize()
                    tiempo_final = time()
                    del H
                    tiempo = tiempo_final - tiempo_inicial

                    #Conjunto de test
                    temp_H_test = cp.dot(X_test1, Win)
                    del Win
                    del X_test1

                    #Extendemos la matriz de los bias para que cuadre
                    BiasMatrix = cp.repeat(Bias, temp_H_test.shape[0], axis=0)
                    del Bias
                    temp_H_test = temp_H_test + BiasMatrix
                    del BiasMatrix
                    H_test = cp.maximum(temp_H_test, 0, temp_H_test)
                    del temp_H_test

                    #Prediccion de test
                    Y_pred_test = cp.dot(H_test, Wout)
                    Y_pred_test = cp.asnumpy(Y_pred_test)
                    Y_pred_test = np.argmax(Y_pred_test, axis=1)
                    del H_test
                    del Wout
                    aciertos = np.sum(Y_pred_test == y_test1)
                    del Y_pred_test
                    acierto = acierto + aciertos / y_test1.size

        #---------------------CPU---------------------------------------------

            if (xp == 1):

                Y_train1 = (to_categorical(y_train1, clases))

                #Generamos aleatoriamente los pesos de entrada y las bias
                tiempo_inicial = time()
                #Pesos de entrada
                Win = ((np.random.random([neuronas_entrada, i]) * 2. -
                        1.).astype('float32'))
                Bias = (np.random.random([1, i]))
                temp_H = np.dot(X_train1, Win)

                #Hay que extender la matriz de Bias para que coincida con la dimension de H
                BiasMatrix = np.repeat(Bias, temp_H.shape[0], axis=0)

                #Añadimos los Bias
                temp_H = temp_H + BiasMatrix

                #Función ReLU.
                H = np.maximum(temp_H, 0, temp_H)

                #Calculamos los pesos de salida haciendo uso de la pseudoinversa de Moore Penrose
                if (np.linalg.det(np.dot(np.transpose(H), H))) == 0:
                    Wout = np.dot(np.linalg.pinv(H), Y_train1)

                else:
                    Wout = np.dot(
                        np.dot(np.linalg.inv(np.dot(np.transpose(H), H)),
                               np.transpose(H)), Y_train1)

                del H
                tiempo_final = time()

                tiempo = tiempo + (tiempo_final - tiempo_inicial)

                #Conjunto de test
                temp_H_test = np.dot(X_test1, Win)

                #Extendemos la matriz de los bias para que cuadre
                BiasMatrix = np.repeat(Bias, temp_H_test.shape[0], axis=0)

                temp_H_test = temp_H_test + BiasMatrix

                H_test = np.maximum(temp_H_test, 0, temp_H_test)

                #Prediccion de test
                Y_pred_test = np.dot(H_test, Wout)
                Y_pred_test = np.argmax(Y_pred_test, axis=1)
                aciertos = np.sum(Y_pred_test == y_test1)
                acierto = acierto + aciertos / y_test1.size

        acierto_medio.append(acierto / repeticiones)
        tiempo_medio.append(tiempo / repeticiones)

    return neuronas, acierto_medio, tiempo_medio
Exemple #13
0
def ELM(X_train, X_test, y_train, y_test, neuronas_ocultas, proporcion_datos,
        xp):

    #obtiene de los datos automáticamente el numero de clases
    clases = int(max(y_train) + 1)

    #Opción de no emplear el 100% de los datos disponibles para agilizar el cálculo
    n_train = int(X_train.shape[0] * proporcion_datos)
    n_test = int(X_test.shape[0] * proporcion_datos)
    ind_train = np.random.choice(X_train.shape[0], size=n_train, replace=False)
    ind_test = np.random.choice(X_test.shape[0], size=n_test, replace=False)

    #Definimos los conjuntos de datos que emplearemos en el entrenamiento
    global Y_test1, Y_pred_test
    X_train1 = X_train[ind_train, :]
    y_train1 = y_train[ind_train].astype('int')
    X_test1 = X_test[ind_test, :]
    y_test1 = y_test[ind_test].astype('int')

    #Formato de entrada es recogido automaticamente según el número de columnas de X_train1
    neuronas_entrada = X_train1.shape[1]

    #---------------------GPU---------------------------------------------
    if (xp == 0):

        #Generamos aleatoriamente los pesos de entrada y las bias
        cp.cuda.runtime.deviceSynchronize()
        tiempo_inicial = time()

        #Pesos de entrada y Bias
        Win = cp.asarray(
            (np.random.random([neuronas_entrada, neuronas_ocultas]) * 2. -
             1.).astype('float32'))
        Bias = cp.asarray(np.random.random([1, neuronas_ocultas]) * 2 - 1)

        #Conversiones a formato requerido por CUDA (CUPY)
        X_train1 = cp.asarray(X_train1)

        #Calculamos una H previa
        temp_H = cp.dot(X_train1, Win)

        #Hay que extender la matriz de Bias para que coincida con la dimension de H
        BiasMatrix = cp.repeat(Bias, temp_H.shape[0], axis=0)

        #Añadimos los Bias a la H previa
        temp_H = temp_H + BiasMatrix

        #Función ReLU.
        H = cp.maximum(temp_H, 0, temp_H)

        #Calculamos los pesos de salida haciendo uso de la pseudoinversa de Moore Penrose
        #Comprobamos que el determinante sea distinto de cero
        if (cp.linalg.det(cp.dot(cp.transpose(H), H))) == 0:
            Y_train1 = to_categorical(y_train1, clases)
            H = cp.asnumpy(H)
            Y_train1 = cp.asnumpy(Y_train1)
            Wout = np.dot(np.linalg.pinv(H), Y_train1)
            del Y_train1
            tiempo_final = time()
            del H
            tiempo = tiempo_final - tiempo_inicial

            #Conjunto de test
            X_test1 = cp.asnumpy(X_test1)
            Win = cp.asnumpy(Win)
            temp_H_test = np.dot(X_test1, Win)
            del X_test1

            #Extendemos la matriz de los bias para que cuadre
            BiasMatrix = np.repeat(Bias, temp_H_test.shape[0], axis=0)
            BiasMatrix = cp.asnumpy(BiasMatrix)
            temp_H_test = temp_H_test + BiasMatrix
            del BiasMatrix

            H_test = np.maximum(temp_H_test, 0, temp_H_test)
            del temp_H_test

            #Prediccion de test
            Y_pred_test = np.dot(H_test, Wout)
            Y_pred_test = np.argmax(Y_pred_test, axis=1)
            del H_test

            aciertos = np.sum(Y_pred_test == y_test1)

            precision_test = aciertos / y_test1.size

        else:
            X_test1 = cp.asarray(X_test1)
            Y_train1 = cp.asarray(to_categorical(y_train1, clases))

            Wout = cp.dot(
                cp.dot(cp.linalg.inv(cp.dot(cp.transpose(H), H)),
                       cp.transpose(H)), Y_train1)

            cp.cuda.runtime.deviceSynchronize()
            tiempo_final = time()

            tiempo = tiempo_final - tiempo_inicial

            #Conjunto de test
            temp_H_test = cp.dot(X_test1, Win)

            #Extendemos la matriz de los bias para que cuadre
            BiasMatrix = cp.repeat(Bias, temp_H_test.shape[0], axis=0)

            #Añadimos los bias
            temp_H_test = temp_H_test + BiasMatrix

            #Función ReLu
            H_test = cp.maximum(temp_H_test, 0, temp_H_test)

            #Prediccion de test
            Y_pred_test = cp.dot(H_test, Wout)
            Y_pred_test = cp.asnumpy(Y_pred_test)
            Y_pred_test = np.argmax(Y_pred_test, axis=1)
            aciertos = np.sum(Y_pred_test == y_test1)
            precision_test = aciertos / y_test1.size

    #---------------------CPU---------------------------------------------

    if (xp == 1):

        Y_train1 = (to_categorical(y_train1, clases))

        #Generamos aleatoriamente los pesos de entrada y las bias
        tiempo_inicial = time()

        #Pesos de entrada y Bias
        Win = ((np.random.random([neuronas_entrada, neuronas_ocultas]) * 2. -
                1.).astype('float32'))
        Bias = (np.random.random([1, neuronas_ocultas]) * 2 -
                1).astype('float32')
        temp_H = np.dot(X_train1, Win).astype('float32')
        del X_train1

        #Hay que extender la matriz de Bias para que coincida con la dimension de H
        BiasMatrix = np.repeat(Bias, temp_H.shape[0], axis=0)

        #Añadimos los Bias
        temp_H = temp_H + BiasMatrix

        #Función ReLU.
        H = np.maximum(temp_H, 0, temp_H)
        del temp_H

        #Calculamos los pesos de salida haciendo uso de la pseudoinversa de Moore Penrose
        #Comprobamos que el determinante sea distinto de cero

        if (np.linalg.det(np.dot(np.transpose(H), H))) == 0:
            Wout = np.dot(np.linalg.pinv(H), Y_train1)

        else:
            Wout = np.dot(
                np.dot(np.linalg.inv(np.dot(np.transpose(H), H)),
                       np.transpose(H)), Y_train1)

        del Y_train1
        tiempo_final = time()
        del H

        tiempo = tiempo_final - tiempo_inicial

        #Conjunto de test

        temp_H_test = np.dot(X_test1, Win)
        del X_test1

        #Extendemos la matriz de los bias para que cuadre
        BiasMatrix = np.repeat(Bias, temp_H_test.shape[0], axis=0)
        temp_H_test = temp_H_test + BiasMatrix
        del BiasMatrix

        H_test = np.maximum(temp_H_test, 0, temp_H_test)
        del temp_H_test

        #Prediccion de test
        Y_pred_test = np.dot(H_test, Wout)
        Y_pred_test = np.argmax(Y_pred_test, axis=1)
        del H_test

        aciertos = np.sum(Y_pred_test == y_test1)

        precision_test = aciertos / y_test1.size

    return neuronas_ocultas, precision_test, tiempo, Win, Wout, Bias, y_test1, Y_pred_test
Exemple #14
0
 def F(a, depth, minx=1):
     ax = a[:, cp.newaxis, :, :]
     return cp.concatenate(
         (cp.repeat(ax, depth - 2,
                    axis=1), cp.sqrt(ax + minx), cp.log(ax + minx + 1)),
         axis=1)
Exemple #15
0
 def NID(a, depth, minx=None):
     return cp.repeat(a[:, cp.newaxis, :, :] / depth**.5, depth, axis=1)
Exemple #16
0
 def ID(a, depth, minx=None):
     return cp.repeat(a[:, cp.newaxis, :, :], depth, axis=1)
Exemple #17
0
 def __init__(self,
              kernel: Callable,
              lower: Union[float, int],
              upper: Union[float, int],
              grid_size: int,
              observations: np.ndarray,
              sample_size: int,
              adjoint: bool = False,
              quadrature: str = 'rectangle',
              **kwargs):
     """
     Instance of Landweber solver for inverse problem in Poisson noise with integral operator.
     :param kernel: Kernel of the integral operator.
     :type kernel: Callable
     :param lower: Lower end of the interval on which the operator is defined.
     :type lower: float
     :param upper: Upper end of the interval on which the operator is defined.
     :type upper: float
     :param grid_size: Size pf grid used to approximate the operator.
     :type grid_size: int
     :param observations: Observations used for the estimation.
     :type observations: numpy.ndarray
     :param sample_size: Theoretical sample size (n).
     :type sample_size: int
     :param adjoint: Whether the operator is adjoint (True) or not (False).
     :type adjoint: boolean (default: False)
     :param quadrature: Type of quadrature used to approximate integrals.
     :type quadrature: str (default: recatngle)
     :param kwargs: Possible arguments:
         - max_iter: The maximum number of iterations of the algorithm (int, default: 1000).
         - tau: Parameter used to rescale the obtained values of estimated noise level (float or int, default: 1).
         - initial: Initial guess for the solution (numpy.ndarray, default: 0).
         - relaxation: Parameter used in the iteration of the algorithm (step size, omega). This approximate square norm
          of an operator is divide by the value of relaxation parameter (float or int, default: 2).
     """
     Operator.__init__(self, kernel, lower, upper, grid_size, adjoint,
                       quadrature)
     EstimatorDiscretize.__init__(self, kernel, lower, upper, grid_size,
                                  observations, sample_size, quadrature)
     self.max_iter: int = kwargs.get('max_iter', 100)
     self.__tau: Union[float, int] = kwargs.get('tau', 1.)
     assert isinstance(self.__tau, (int, float)), 'tau must be a number'
     self.initial: cp.ndarray = kwargs.get(
         'initial_guess',
         cp.repeat(cp.array([0]), self.grid_size).astype(cp.float64))
     try:
         assert isinstance(self.initial, cp.ndarray)
     except AssertionError:
         warn(
             'Initial guess is not a cupy array, falling back to default value',
             RuntimeWarning)
         self.initial: cp.ndarray = cp.repeat(cp.array(
             [0]), self.grid_size).astype(cp.float64)
     self.previous: cp.ndarray = cp.empty(self.grid_size, dtype=cp.float64)
     self.current: cp.ndarray = cp.empty(self.grid_size, dtype=cp.float64)
     Operator.approximate(self)
     self.__KHK: cp.ndarray = self.__premultiplication(self.KH, self.K)
     self.__relaxation: Union[float, int] = kwargs.get('relaxation', 0.5)
     assert isinstance(
         self.__relaxation,
         (int, float)), 'Relaxation parameter must be a number'
     self.__relaxation = self.__relaxation / (np.max(
         np.linalg.svd(
             cp.asnumpy(self.KHK), compute_uv=False, hermitian=True)))
     EstimatorDiscretize.estimate_q(self)
     EstimatorDiscretize.estimate_delta(self)
     self.__grid: np.ndarray = getattr(super(), quadrature + '_grid')()
Exemple #18
0
def MRVFLtrain(trainX, trainY, option):
    fs_mode = 'INF'
    rand_seed = np.random.RandomState(2)

    [n_sample, n_dims] = trainX.shape
    N = option.N
    L = option.L
    C = option.C
    s = option.scale
    mode = option.mode
    ratio = option.ratio
    drop = option.drop

    TrainingAccuracy = cp.zeros(L)

    if mode == 'merged':
        drop_amount = cp.int(cp.floor(drop * N))
        selected_amount = cp.int(cp.floor(ratio * N))
        bi = []

    A = []
    beta = []
    weights = []
    biases = []
    mu = []
    sigma = []
    sfi = []
    fs = []

    A_input = trainX

    time_start = time.time()

    for i in range(L):

        if i == 0:
            w = s * 2 * cp.asarray(rand_seed.rand(n_dims, N)) - 1

        elif mode == 'merged':
            ######################### SETTING
            # w = s * 2 * cp.asarray(rand_seed.rand(n_dims - drop_amount + N, N)) - 1
            w = s * 2 * cp.asarray(
                rand_seed.rand(n_dims + selected_amount - drop_amount + N,
                               N)) - 1
            # w = s * 2 * cp.asarray(rand_seed.rand(n_dims + selected_amount*i - drop_amount + N, N)) - 1

        b = s * cp.asarray(rand_seed.rand(1, N))
        weights.append(w)
        biases.append(b)

        A_ = cp.matmul(A_input, w)  # A_ should be 100 at any loop
        # layer normalization
        A_mean = cp.mean(A_, axis=0)
        A_std = cp.std(A_, axis=0)
        A_ = (A_ - A_mean) / A_std
        mu.append(A_mean)
        sigma.append(A_std)

        A_ = A_ + cp.repeat(b, n_sample, 0)
        A_ = selu(A_)
        if i == 0:
            A_tmp = cp.concatenate(
                [trainX, A_, cp.ones((n_sample, 1))], axis=1)
        else:
            A_tmp = cp.concatenate(
                [trainX, sf, A_, cp.ones((n_sample, 1))], axis=1)
        beta_ = l2_weights(A_tmp, trainY, C, n_sample)

        if fs_mode == 'LASSO':
            significance = cp.linalg.norm(beta_, ord=1, axis=1)
            ranked_index = cp.argsort(significance[n_dims:-1])
        if fs_mode == 'RIDGE':
            significance = cp.linalg.norm(beta_, ord=2, axis=1)
            ranked_index = cp.argsort(significance[n_dims:-1])
        elif fs_mode == 'MI':
            at = cp.asnumpy(A_tmp[:, n_dims:-1])
            ty = cp.asnumpy(cp.asarray([cp.argmax(i) for i in trainY]))
            mis = mi(at, ty)
            # with joblib.parallel_backend('loky'):
            # mis = Parallel(10)(delayed(mi)(at[:, i].reshape(-1, 1), ty) for i in range(N))
            # ranked_index = cp.argsort(cp.asarray(mis).ravel())
            ranked_index = cp.argsort(mis)
        elif fs_mode == 'INF':
            at = cp.asnumpy(A_tmp[:, n_dims:-1])
            rank, score = inf_fs(at)
            ranked_index = rank

        A.append(A_tmp)
        beta.append(beta_)

        selected_index = ranked_index[:
                                      selected_amount]  # chosen features, used in the next layers

        sfi.append(selected_index)
        left_amount = N - drop_amount
        left_index = ranked_index[:left_amount]
        A_except_trainX = A_tmp[:, n_dims:-1]
        A_selected = A_except_trainX[:, selected_index]
        fs.append(A_selected)
        A_ = A_except_trainX[:, left_index]

        ################### SETTING
        sf = A_selected
        # sf = cp.concatenate(fs, axis=1)

        ################### SETTING
        A_input = cp.concatenate([trainX, sf, A_], axis=1)
        # A_input = cp.concatenate([trainX,  A_], axis=1)

        bi.append(left_index)

        pred_result = cp.zeros((n_sample, i + 1))
        for j in range(i + 1):
            Ai = A[j]
            beta_temp = beta[j]
            predict_score = cp.matmul(Ai, beta_temp)
            predict_index = cp.argmax(predict_score, axis=1).ravel()
            # indx=indx.reshape(n_sample,1)
            pred_result[:, j] = predict_index
        TrainingAccuracy_temp = majorityVoting(trainY, pred_result)
        TrainingAccuracy[i] = TrainingAccuracy_temp
    '''    
    ## Calculate the training accuracy
    pred_result = cp.zeros((n_sample, L))
    for i in range(L):
        Ai = A[i]
        beta_temp = beta[i]
        predict_score = cp.matmul(Ai, beta_temp)
        predict_index = cp.argmax(predict_score, axis=1).ravel()
        # indx=indx.reshape(n_sample,1)
        pred_result[:, i] = predict_index
        
    TrainingAccuracy = majorityVoting(trainY, pred_result)
    '''

    time_end = time.time()
    Training_time = time_end - time_start

    model = mod(L, weights, biases, beta, mu, sigma, sfi, bi)

    return model, TrainingAccuracy, Training_time
Exemple #19
0
def batch_correct(adata,
                  batch_key,
                  layer="X",
                  depth_scale=1e3,
                  device="cpu",
                  inplace=True):
    """\
    batch correction of the count matrix.

    Code has been translated from pagoda2 R function setCountMatrix (plain model).

    Parameters
    ----------
    adata
        Annotated data matrix.
    batch_key
        Column name to use for batch.
    layer
        Which layer to correct
    depth_scale
        Depth scale.
    device
        Run method on either `cpu` or on `gpu`.
    add_layer
        if True, corrected count matrix is added to adata.layers["pagoda2"].
    copy
        Return a copy instead of writing to adata.
    Returns
    -------
    adata : anndata.AnnData
        if `inplace=False` it returns the corrected matrix, else it update field to `adata`:

        `.X`
            batch-corrected count matrix.

    """

    if layer == "X":
        X = adata.X.copy()
    else:
        X = adata.layers[layer].copy()
    logg.info("Performing pagoda2 batch correction", reset=True)
    if adata.obs[batch_key].dtype.name != "category":
        adata.obs[batch_key] = adata.obs[batch_key].astype("category")
    batches = adata.obs[batch_key].cat.categories
    nbatches = len(batches)

    if device == "cpu":
        depth = X.sum(axis=1)
        depth = np.array(depth).ravel()

        gene_av = (np.array(X.sum(axis=0)).ravel() +
                   len(batches)) / (depth.sum() + len(batches))
        tc = np.vstack(
            [X[adata.obs[batch_key] == b, :].sum(axis=0) for b in batches])
        tc = np.log(tc + 1) - np.log(
            np.array([
                depth[adata.obs[batch_key].values == b].sum() for b in batches
            ]) + 1).reshape(-1, 1)
        bc = np.exp(tc - np.log(gene_av.astype(np.float64)))
        bc = pd.DataFrame(np.transpose(bc), columns=batches)
        X = csr_matrix(X.transpose())

        batch = adata.obs[batch_key].cat.rename_categories(range(nbatches))
        count_gene = np.repeat(np.arange(X.shape[0]), np.diff(X.indptr))
        acc = np.transpose(np.vstack([count_gene, batch[X.indices].values]))
        X.data = X.data / bc.values[acc[:, 0], acc[:, 1]]
        logg.info("    depth scaling")
        X = X.transpose()
        d = depth / depth_scale
        X = X.multiply(1.0 / d[None, :].T)

    elif device == "gpu":
        import cupy as cp
        import cudf
        from cupyx.scipy.sparse import csr_matrix as csr_matrix_gpu

        X = csr_matrix_gpu(X)
        depth = X.sum(axis=1)
        depth = cp.array(depth).ravel()

        gene_av = (cp.array(X.sum(axis=0)).ravel() +
                   len(batches)) / (depth.sum() + len(batches))
        tc = cp.vstack(
            [X[adata.obs[batch_key] == b, :].sum(axis=0) for b in batches])
        tc = cp.log(tc + 1) - cp.log(
            cp.array([
                depth[adata.obs[batch_key].values == b].sum() for b in batches
            ]) + 1).reshape(-1, 1)
        bc = cp.exp(tc - np.log(gene_av.astype(cp.float64)))
        bc = cudf.DataFrame(np.transpose(bc.get()), columns=batches)
        X = csr_matrix_gpu(X.transpose())

        batch = adata.obs[batch_key].cat.rename_categories(range(nbatches))
        count_gene = cp.repeat(cp.arange(X.shape[0]),
                               cp.diff(X.indptr).get().tolist())
        batch_to_stack = cp.array(batch.values[X.indices.get()])
        acc = cp.transpose(cp.vstack([count_gene, batch_to_stack]))
        X.data = X.data / bc.values[acc[:, 0], acc[:, 1]]
        X = X.transpose()
        logg.info("    depth scaling")
        d = depth / depth_scale
        X = X.multiply(1.0 / d[None, :].T)
        X = X.get()

    logg.info("    finished",
              time=True,
              end=" " if settings.verbosity > 2 else "\n")

    if inplace:
        if layer == "X":
            adata.X = csr_matrix(X)
            logg.hint("updated \n" "    .X, batch corrected matrix.")
        else:
            adata.layers[layer] = csr_matrix(X)
            logg.hint("updated \n"
                      "    .layer['" + layer + "'], batch corrected matrix.")

    else:
        return csr_matrix(X)
Exemple #20
0
def repmat(M, n1, n2=None):
    #copy M (m1,m2) to form multi-dim array of (m1,m2,n1,n2)
    N = cp.repeat(M[cp.newaxis, :, :], n1, axis=0)
    if n2 is not None:
        N = cp.repeat(N[cp.newaxis, :, :, :], n2, axis=0)
    return N
                G_kernel = G(X, Y, SW_ips)
                G_kernel = cp.asarray(G_kernel, dtype="float32")
                G_kernel = G_kernel / cp.amax(G_kernel)

                G_kernel = cp.fft.ifftshift(G_kernel)

                cp_structure_factor.real = G_kernel * cp_structure_factor.real
                cp_structure_factor.imag = G_kernel * cp_structure_factor.imag

                G_dens = cp.fft.ifftn(cp_structure_factor,
                                      axes=(1, 2),
                                      norm="ortho")

                threshold = SW_delta * cp.amax(cp.real(G_dens), axis=(1, 2))
                threshold_3D = cp.repeat(threshold, int(col) * int(row))
                threshold_3D = threshold_3D.reshape(sta_dens, int(row),
                                                    int(col))

                cp_sup = cp.where(
                    cp.real(G_dens) >= threshold_3D, float(1), float(0))

                cp_sup = cp_sup.astype(cp.float32)

                SW_ips = SW_ips * float(SW_ips_step)

                if (SW_sup_output_flag == 1):
                    cp_sup = cp.asnumpy(cp_sup)
                    with mrcfile.new(header + "_" + str(i + 1).zfill(6) +
                                     '_sup.mrc',
                                     overwrite=True) as mrc:
Exemple #22
0
    def __init__(
        self,
        dim_x,
        dim_z,
        dim_u=0,
        points=1,
        dtype=cp.float32,
    ):

        self.points = points

        if dim_x < 1:
            raise ValueError("dim_x must be 1 or greater")
        if dim_z < 1:
            raise ValueError("dim_z must be 1 or greater")
        if dim_u < 0:
            raise ValueError("dim_u must be 0 or greater")

        self.dim_x = dim_x
        self.dim_z = dim_z
        self.dim_u = dim_u

        # Create data arrays
        self.x = cp.zeros(
            (
                self.points,
                dim_x,
                1,
            ),
            dtype=dtype,
        )  # state

        self.P = cp.repeat(
            cp.identity(dim_x, dtype=dtype)[cp.newaxis, :, :],
            self.points,
            axis=0,
        )  # uncertainty covariance

        self.Q = cp.repeat(
            cp.identity(dim_x, dtype=dtype)[cp.newaxis, :, :],
            self.points,
            axis=0,
        )  # process uncertainty

        self.B = None  # control transition matrix

        self.F = cp.repeat(
            cp.identity(dim_x, dtype=dtype)[cp.newaxis, :, :],
            self.points,
            axis=0,
        )  # state transition matrix

        self.H = cp.zeros(
            (
                self.points,
                dim_z,
                dim_z,
            ),
            dtype=dtype,
        )  # Measurement function

        self.R = cp.repeat(
            cp.identity(dim_z, dtype=dtype)[cp.newaxis, :, :],
            self.points,
            axis=0,
        )  # process uncertainty

        self._alpha_sq = cp.ones(
            (
                self.points,
                1,
                1,
            ),
            dtype=dtype,
        )  # fading memory control

        self.z = cp.empty(
            (
                self.points,
                dim_z,
                1,
            ),
            dtype=dtype,
        )

        # Allocate GPU resources
        numSM = _get_numSM()
        threads_z_axis = 16
        threadsperblock = (self.dim_x, self.dim_x, threads_z_axis)
        blockspergrid = (1, 1, numSM * 20)

        max_threads_per_block = self.dim_x * self.dim_x * threads_z_axis

        # Only need to populate cache once
        # At class initialization
        _filters_cuda._populate_kernel_cache(
            self.x.dtype,
            threads_z_axis,
            self.dim_x,
            self.dim_z,
            self.dim_u,
            max_threads_per_block,
        )

        # Retrieve kernel from cache
        self.predict_kernel = _filters_cuda._get_backend_kernel(
            self.x.dtype,
            blockspergrid,
            threadsperblock,
            "predict",
        )

        self.update_kernel = _filters_cuda._get_backend_kernel(
            self.x.dtype,
            blockspergrid,
            threadsperblock,
            "update",
        )

        _print_atts(self.predict_kernel)
        _print_atts(self.update_kernel)
Exemple #23
0
def forward_conv(A_previous, Filter, Bias, pad, stride, 
                 function = 'identity', verbose = False):
    '''
    A forward convolution step.
    Calcul output shape : ((x-f+2*pad)/stride)+1

    Parameters
    ----------
    A_previous : cp.array(examples, height, width, depth)
        Input images from the previous layer.
    Filter : cp.array(f, f, depth, number of filter)
        Filter to convolve with the input image.
    Bias : cp.array(1, 1, 1, number of filter)
        Bias for each filter.
    pad : int
        Padding edge width.
    stride : int
        Stride number.

    Returns
    -------
    Z : cp.array(examples, ((h-f+2*pad)/stride)+1, ((w-f+2*pad)/stride)+1), number of filter)
        Output layer image.

    '''
    
    (m, n_H_prev, n_W_prev, n_C_prev) = A_previous.shape

    (f, f, n_C_prev, n_C) = Filter.shape
    
    mu = cp.mean(Filter)
    s = cp.std(Filter)
    Filter = (Filter-mu)/(s+1e-5)
    
    n_H = int(((n_H_prev-f+2*pad)/stride)+1)
    n_W = int(((n_W_prev-f+2*pad)/stride)+1)

    Z = cp.zeros([m, n_H, n_W, n_C])
    
    A_prev_pad = cp.pad(A_previous, ((0,0), (pad,pad), (pad,pad), (0,0),), mode='constant', constant_values = (0,0))

    i0 = cp.repeat(cp.arange(f), f)
    i1 = stride * cp.repeat(cp.arange(n_W), n_H)
    j0 = cp.tile(cp.arange(f), f)
    j1 = stride * cp.tile(cp.arange(n_H), n_W)
    i = cp.reshape(i0, (-1, 1))+cp.reshape(i1, (1, -1))
    j = cp.reshape(j0, (-1, 1))+cp.reshape(j1, (1, -1))
    k = cp.reshape(cp.repeat(cp.arange(n_C_prev), f**2), (-1, 1))
    Ztest = A_prev_pad[:, i, j, :]
    weights = cp.reshape(Filter, (f**2, n_C_prev, n_C))
    conV = cp.tensordot(weights, Ztest, ((0, 1), (1, 3)))
    Z = cp.reshape(cp.transpose(conV, (1, 2, 0)), (m, n_H, n_W, n_C)) + Bias
    Z = activation('forward', function, Z)
    if(verbose):
        print("Filter :")
        print(Filter)
        print("Weights :")
        print(weights)
        print("Z :")
        print(Ztest)
        print("Conv :")
        print(conV)
        print("Result :")
        print(Z)
    '''
    for i in range(m):               
        a_prev_pad = A_prev_pad[i, :, :, :]             
        for h in range(n_H):     
            vert_start = h*stride
            vert_end = h*stride+f
            
            for w in range(n_W):       
                horiz_start = w*stride
                horiz_end = w*stride+f
                
                a_slice_prev = a_prev_pad[vert_start:vert_end, horiz_start:horiz_end, :]
                for c in range(n_C):  
                    Z[i, h, w, c] = cp.squeeze(cp.sum(a_slice_prev*Filter[:, :, :, c])+Bias[:, :, :, c])
    '''               
    return Z
Exemple #24
0
    def predict(self, u=None, B=None, F=None, Q=None):
        """
        Predict next state (prior) using the Kalman filter state propagation
        equations.

        Parameters
        ----------
        u : narray, default 0
            Optional control vector.

        B : array(points, dim_x, dim_u), or None
            Optional control transition matrix; a value of None
            will cause the filter to use `self.B`.

        F : array(points, dim_x, dim_x), or None
            Optional state transition matrix; a value of None
            will cause the filter to use `self.F`.

        Q : array(points, dim_x, dim_x), scalar, or None
            Optional process noise matrix; a value of None will cause the
            filter to use `self.Q`.

        """

        # B will be ignored until implemented
        if u is not None:
            raise NotImplementedError(
                "Control Matrix implementation in process")

        # if u is not None:
        #     u = cp.asarray(u)

        if B is None:
            B = self.B
        else:
            B = cp.asarray(B)

        if F is None:
            F = self.F
        else:
            F = cp.asarray(F)

        if Q is None:
            Q = self.Q
        elif cp.isscalar(Q):
            Q = cp.repeat(
                (cp.identity(self.dim_x, dtype=self.x.dtype) *
                 Q)[cp.newaxis, :, :],
                self.points,
                axis=0,
            )
        else:
            Q = cp.asarray(Q)

        self.predict_kernel(
            self._alpha_sq,
            self.x,
            u,
            B,
            F,
            self.P,
            Q,
        )
Exemple #25
0
def backward_conv(dZ, A_previous, Filter, Bias, pad, stride, function = 'identity'):
    '''
    A backward convolution step

    Parameters
    ----------
    dZ : cp.array(examples, ((h-f+2*pad)/stride)+1, ((w-f+2*pad)/stride)+1), number of filter)
        Cost derivative from the l+1 layer.
    A_previous : cp.array(examples, height, width, depth)
        Output image from the l-1 layer.
    Filter : cp.array(f, f, depth, number of filter)
        Convolutionnal filter.
    Bias : cp.array(1, 1, 1, number of filter)
        Bias respective to each filter.
    pad : int
        Padding parameter.
    stride : int
        Stride parameter.

    Returns
    -------
    dA : cp.array(examples, height, width, depth)
        Cost derivative from the current layer.
    dFilter : cp.array(f, f, depth, number of filter)
        Cost derivative from filter.
    dBias : cp.array(1, 1, 1, number of filter)
        Cost derivative from Bias.

    '''
    dZ = activation('backward', function, 1, dZ)
    
    (m, n_H_prev, n_W_prev, n_C_prev) = A_previous.shape
    
    (f, f, n_C_prev, n_C) = Filter.shape
    
    (m, n_H, n_W, n_C) = dZ.shape
    
    dA = cp.zeros((m, n_H_prev, n_W_prev, n_C_prev))                           
    dFilter = cp.zeros((f, f, n_C_prev, n_C))
    dBias = cp.zeros((1, 1, 1, n_C))
    dBias = cp.sum(dZ, axis=(0, 1, 2))
    
    A_prev_pad = cp.pad(A_previous, ((0,0), (pad,pad), (pad,pad), (0,0),), mode='constant', constant_values = (0,0))
    dA_prev_pad = cp.pad(dA, ((0,0), (pad,pad), (pad,pad), (0,0),), mode='constant', constant_values = (0,0))
    i0 = cp.repeat(cp.arange(f), f)
    i1 = stride * cp.repeat(cp.arange(n_W), n_H)
    j0 = cp.tile(cp.arange(f), f)
    j1 = stride * cp.tile(cp.arange(n_H), n_W)
    i = cp.reshape(i0, (-1, 1))+cp.reshape(i1, (1, -1))
    j = cp.reshape(j0, (-1, 1))+cp.reshape(j1, (1, -1))
    Ztest = A_prev_pad[:, i, j, :]
    dZtest = cp.reshape(dZ, (m, -1, n_C))
    dFiltertest = cp.tensordot(dZtest, cp.transpose(Ztest, (1, 0, 2, 3)), ((0, 1), (1, 2)))
    dFilter = cp.reshape(cp.transpose(dFiltertest, (1, 2, 0)), (f, f, n_C_prev, n_C))
    dZ = cp.reshape(cp.transpose(dZ, (3, 1, 2, 0)), (n_C, -1))
    weights = cp.reshape(cp.transpose(Filter, (3, 1, 2, 0)), (n_C, -1))
    dA_prev_pad = cp.dot(weights.T, dZ)
    strPad = "same"
    if(pad==0):
        strPad = "valid"
    dA = Utils.column_to_image(dA_prev_pad, (m, n_C_prev, n_H_prev, n_W_prev), (f, f), stride, strPad)
    
    '''
    Intuitive way (Really not optimized)
    for i in range(m):                     
        a_prev_pad = A_prev_pad[i, :, :, :]
        da_prev_pad = dA_prev_pad[i, :, :, :]
        
        for h in range(n_H):
            vert_start = h*stride
            vert_end = h*stride + f
            for w in range(n_W):
                horiz_start = w*stride
                horiz_end = w*stride + f
                a_slice = a_prev_pad[vert_start:vert_end, horiz_start:horiz_end, :]
                for c in range(n_C):
                    da_prev_pad[vert_start:vert_end, horiz_start:horiz_end, :] += Filter[:,:,:,c] * dZ[i, h, w, c]
                    #dFilter[:,:,:,c] += a_slice * dZ[i, h, w, c]
                    #dBias[:,:,:,c] += dZ[i, h, w, c]
        dA[i, :, :, :] = da_prev_pad[pad:da_prev_pad.shape[0]-pad, pad:da_prev_pad.shape[1]-pad, :]
    '''
    return dA, dFilter, dBias
Exemple #26
0
    def transform(self, X):
        """Impute all missing values in X.

        Parameters
        ----------
        X : {array-like, sparse matrix}, shape (n_samples, n_features)
            The input data to complete.
        """
        check_is_fitted(self)

        output_type = get_input_type(X)
        X = self._validate_input(X, in_fit=False)
        X_indicator = super()._transform_indicator(X)

        statistics = self.statistics_

        if X.shape[1] != statistics.shape[0]:
            raise ValueError("X has %d features per sample, expected %d" %
                             (X.shape[1], self.statistics_.shape[0]))

        # Delete the invalid columns if strategy is not constant
        if self.strategy == "constant":
            valid_statistics = statistics
        else:
            # same as np.isnan but also works for object dtypes
            invalid_mask = _get_mask(statistics, np.nan)
            valid_mask = np.logical_not(invalid_mask)
            valid_statistics = statistics[valid_mask]
            valid_statistics_indexes = np.flatnonzero(valid_mask)

            if invalid_mask.any():
                missing = np.arange(X.shape[1])[invalid_mask]
                if self.verbose:
                    warnings.warn("Deleting features without "
                                  "observed values: %s" % missing)
                X = X[:, valid_statistics_indexes]

        # Do actual imputation
        if sparse.issparse(X):
            if self.missing_values == 0:
                raise ValueError("Imputation not possible when missing_values "
                                 "== 0 and input is sparse. Provide a dense "
                                 "array instead.")
            else:
                mask = _get_mask(X.data, self.missing_values)
                indexes = np.repeat(np.arange(len(X.indptr) - 1, dtype=np.int),
                                    np.diff(X.indptr).tolist())[mask]

                X.data[mask] = valid_statistics[indexes].astype(X.dtype,
                                                                copy=False)
        else:
            mask = _get_mask(X, self.missing_values)
            if self.strategy == "constant":
                X[mask] = valid_statistics[0]
            else:
                for i, vi in enumerate(valid_statistics_indexes):
                    feature_idxs = np.flatnonzero(mask[:, vi])
                    X[feature_idxs, vi] = valid_statistics[i]

        X = super()._concatenate_indicator(X, X_indicator)
        X = to_output_type(X, output_type)
        return X
def generate_chunk(i_chunk, local_size, num_chunks, chunk_type, frac_match,
                   gpu):
    # Setting a seed that triggers max amount of comm in the two-GPU case.
    if gpu:
        import cupy as xp

        import cudf as xdf
    else:
        import numpy as xp
        import pandas as xdf

    xp.random.seed(2**32 - 1)

    chunk_type = chunk_type or "build"
    frac_match = frac_match or 1.0
    if chunk_type == "build":
        # Build dataframe
        #
        # "key" column is a unique sample within [0, local_size * num_chunks)
        #
        # "shuffle" column is a random selection of partitions (used for shuffle)
        #
        # "payload" column is a random permutation of the chunk_size

        start = local_size * i_chunk
        stop = start + local_size

        parts_array = xp.arange(num_chunks, dtype="int64")
        suffle_array = xp.repeat(parts_array,
                                 math.ceil(local_size / num_chunks))

        df = xdf.DataFrame({
            "key":
            xp.arange(start, stop=stop, dtype="int64"),
            "shuffle":
            xp.random.permutation(suffle_array)[:local_size],
            "payload":
            xp.random.permutation(xp.arange(local_size, dtype="int64")),
        })
    else:
        # Other dataframe
        #
        # "key" column matches values from the build dataframe
        # for a fraction (`frac_match`) of the entries. The matching
        # entries are perfectly balanced across each partition of the
        # "base" dataframe.
        #
        # "payload" column is a random permutation of the chunk_size

        # Step 1. Choose values that DO match
        sub_local_size = local_size // num_chunks
        sub_local_size_use = max(int(sub_local_size * frac_match), 1)
        arrays = []
        for i in range(num_chunks):
            bgn = (local_size * i) + (sub_local_size * i_chunk)
            end = bgn + sub_local_size
            ar = xp.arange(bgn, stop=end, dtype="int64")
            arrays.append(xp.random.permutation(ar)[:sub_local_size_use])
        key_array_match = xp.concatenate(tuple(arrays), axis=0)

        # Step 2. Add values that DON'T match
        missing_size = local_size - key_array_match.shape[0]
        start = local_size * num_chunks + local_size * i_chunk
        stop = start + missing_size
        key_array_no_match = xp.arange(start, stop=stop, dtype="int64")

        # Step 3. Combine and create the final dataframe chunk (dask_cudf partition)
        key_array_combine = xp.concatenate(
            (key_array_match, key_array_no_match), axis=0)
        df = xdf.DataFrame({
            "key":
            xp.random.permutation(key_array_combine),
            "payload":
            xp.random.permutation(xp.arange(local_size, dtype="int64")),
        })
    return df
Exemple #28
0
def cuda(y,
         t,
         freq_centre,
         half_width,
         resolution,
         chunk_size=100,
         dtype=None,
         silent=False):
    """
    Calculate powerspectrum using matrix multiplication with cupy (CUDA numpy)
    """
    # # Uses cupy (cuda numpy) instead of numpy # #
    import cupy as cu
    if dtype is None:
        dtype = cu.double

    pi = math.pi
    t1 = tm.time()

    # # # # Prepare input for use # # # #
    # Convert input to cupy array (if not already)
    y = cu.asarray(y)
    t = cu.asarray(t)

    # Change to angular frequencies (assumes input is in cyclic frequencies)
    freq_centre[:] = [x * (2 * pi) for x in freq_centre]
    half_width = half_width * 2 * pi
    resolution = resolution * 2 * pi

    # Data mean subtraction, to reduce potentially constant elements
    y_mean = cu.mean(y, dtype=dtype)
    y = y - y_mean
    t = t - cu.min(t)  # To get first element in t to be 0

    # # # # Prepare for for-loop # # # #
    # Preload results list
    results = [None] * len(freq_centre)

    # Set amount of steps (might be subject to rounding error)
    step_amnt = int((2 * half_width) / resolution)

    # # # # Run for loop for all frequencies indicated by the list freq_centre # # # #
    for k in range(0, len(freq_centre)):
        freq_centre_current = freq_centre[k]
        # Create frequency steps
        freq = cu.linspace(freq_centre_current - half_width,
                           freq_centre_current + half_width,
                           step_amnt,
                           dtype=dtype)
        # Reshape freq in order to do matrix multiplication
        freq = cu.reshape(freq, (len(freq), 1))
        lent = len(t)

        # # Prepare to calculate sine and cosine function parts of power spectrum estimate # #
        sin = np.zeros(step_amnt)
        cos = np.zeros(step_amnt)
        sin2 = np.zeros(step_amnt)
        cos2 = np.zeros(step_amnt)
        sincos = np.zeros(step_amnt)

        freq = cu.ascontiguousarray(freq)
        t = cu.ascontiguousarray(t)
        # Recurrence sine and cosine difference product
        s_diff = cu.sin(resolution * t, dtype=dtype)
        c_diff = cu.cos(resolution * t, dtype=dtype)

        # # # # # Calculation matrices # # # # # # # # # # # # # # # # # # # # # # # # # # #
        # use [c0, s0][c_diff, s_diff; -s_diff, c_diff]  (inverted in calc for .T)         #
        # Recurrence based on s_m = c_(m-1)*sin(deltaf * t) + s_(m-1)*cos(deltaf * t) and  #
        # c_m = c_(m-1)*cos(deltaf * t) - s_(m-1)*sin(deltaf * t) from T. Ponman 1981      #
        # # # # # # # # # #  # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #

        # # Prepare linear operation matrix with initial operation # #
        calc_base = cu.array([[c_diff, -s_diff], [s_diff, c_diff]]).T
        calc_mat = cu.zeros((chunk_size, lent, 2, 2))
        calc_mat[0, :, :, :] = calc_base
        # # Generate linear operation matrices for recurrence multiplication # #
        for i in range(1, chunk_size):
            calc_mat[i, :, :, :] = cu.matmul(calc_mat[i - 1, :, :, :],
                                             calc_base)

        # Convert large matrix arrays to contiguous arrays
        calc_mat = cu.ascontiguousarray(calc_mat)

        # # Calculate sine and cosine function parts of power spectrum estimation # #
        for i in range(0, step_amnt, chunk_size):
            end = i + chunk_size
            if end > step_amnt:
                dif = end - step_amnt
                calc_mat = cu.asnumpy(calc_mat)
                calc_mat = np.delete(calc_mat,
                                     range(chunk_size - dif, chunk_size), 0)
                calc_mat = cu.array(calc_mat)
                calc_mat = cu.ascontiguousarray(calc_mat)
                end = step_amnt
                chunk_size = end - i

            if not silent:
                print('Current chunk ', i, ':', end, ' of ', step_amnt)
            # Original point calculation
            s0 = cu.sin(freq[i] * t, dtype=dtype)
            c0 = cu.cos(freq[i] * t, dtype=dtype)
            # Sine/cosine vector initialization (for matmul calculation, see c0, s0 before loop)
            trig_vec = cu.zeros((1, lent, 1, 2))
            trig_vec[0, :, 0, 0] = c0
            trig_vec[0, :, 0, 1] = s0
            trig_vec = cu.repeat(trig_vec, chunk_size, axis=0)
            # Matrix calculations
            matrix_result = cu.matmul(trig_vec, calc_mat)
            sin_temp = matrix_result[:, :, 0, 1]
            cos_temp = matrix_result[:, :, 0, 0]

            # # Sum and save results # #
            sin[i:end] = cu.sum(y * sin_temp, 1).get()
            cos[i:end] = cu.sum(y * cos_temp, 1).get()
            sin2[i:end] = cu.sum(sin_temp**2, 1).get()
            cos2[i:end] = cu.sum(cos_temp**2, 1).get()
            sincos[i:end] = cu.sum(sin_temp * cos_temp, 1).get()

        # # # # Calculate alpha and beta components of spectrum, and from them, power of spectrum # # # #
        alpha = (sin * cos2 - cos * sincos) / (sin2 * cos2 - sincos**2)
        beta = (cos * sin2 - sin * sincos) / (sin2 * cos2 - sincos**2)
        power = alpha**2 + beta**2

        # # # # Last loop steps # # # #
        # Convert frequency back to cyclic units
        freq = freq.get() / (2 * pi)
        # Save data in results
        results[k] = [freq, power, alpha, beta]
    t2 = tm.time()
    # Change freq_centre back to cyclic frequencies
    freq_centre[:] = [x / (2 * pi) for x in freq_centre]
    if not silent:
        print('Total time elapsed create_pspectrum_cuda: ', t2 - t1,
              ' seconds')
    return results
Exemple #29
0
def _var_network(graph,
                 add_noise=True,
                 inno_cov=None,
                 invert_inno=False,
                 T=100,
                 initial_values=None):
    """Returns a vector-autoregressive process with correlated innovations.

    Useful for testing.

    Example:
        graph=numpy.array([[[0.2,0.,0.],[0.5,0.,0.]],
                           [[0.,0.1,0. ],[0.3,0.,0.]]])

        represents a process

        X_1(t) = 0.2 X_1(t-1) + 0.5 X_2(t-1) + eps_1(t)
        X_2(t) = 0.3 X_2(t-1) + 0.1 X_1(t-2) + eps_2(t)

        with inv_inno_cov being the negative (except for diagonal) inverse
        covariance matrix of (eps_1(t), eps_2(t)) OR inno_cov being
        the covariance. Initial values can also be provided.


    Parameters
    ----------
    graph : array
        Lagged connectivity matrices. Shape is (n_nodes, n_nodes, max_delay+1)

    add_noise : bool, optional (default: True)
        Flag to add random noise or not

    inno_cov : array, optional (default: None)
        Covariance matrix of innovations.

    invert_inno : bool, optional (defualt : False)
        Flag to negate off-diagonal elements of inno_cov and invert it before
        using it as the covariance matrix of innovations

    T : int, optional (default: 100)
        Sample size.

    initial_values : array, optional (defult: None)
        Initial values for each node. Shape is (n_nodes, max_delay+1), i.e. must
        be of shape (graph.shape[1], graph.shape[2]).

    Returns
    -------
    X : array
        Array of realization.
    """
    n_nodes, _, period = graph.shape

    time = T
    # Test stability
    _check_stability(graph)

    # Generate the returned data
    data = np.random.randn(n_nodes, time)
    # Load the initial values
    if initial_values is not None:
        # Check the shape of the initial values
        _check_initial_values(initial_values, data[:, :period].shape)
        # Input the initial values
        data[:, :period] = initial_values

    # Check if we are adding noise
    noise = None
    if add_noise:
        # Use inno_cov if it was provided
        if inno_cov is not None:
            noise = _generate_noise(inno_cov,
                                    time=time,
                                    use_inverse=invert_inno)
        # Otherwise just use uncorrelated random noise
        else:
            noise = np.random.randn(time, n_nodes)

    for a_time in range(period, time):
        data_past = np.repeat(data[:, a_time - period:a_time][:, ::-1].reshape(
            1, n_nodes, period),
                              n_nodes,
                              axis=0)
        data[:, a_time] = (data_past * graph).sum(axis=2).sum(axis=1)
        if add_noise:
            data[:, a_time] += noise[a_time]

    return data.transpose()
Exemple #30
0
def findLocalHomography_SVD_CUDA(src_pts, dst_pts, grids_coordi):
    '''
    This function calculate the location dependent homography matrix for each grids.
    src_pts : All target image's paired key points' x y coordinate. it's a Nx2 matrix, N is number of matching pairs
    dst_pts : All reference image's paired key points' x y coordinate. it's Nx2 matrix, N is number of matching pairs
    '''
    print(grids_coordi.shape)

    src_pts_unnormalized = np.copy(src_pts)
    src_pts_unnormalized = cp.asarray(src_pts_unnormalized)
    grids_coordi = cp.asarray(grids_coordi)
    grid_num = len(grids_coordi)
    src_mean = np.mean(src_pts, axis=0)
    src_std = max(np.std(src_pts, axis=0))
    C1 = np.array([[1 / src_std, 0, -src_mean[0] / src_std],
                   [0, 1 / src_std, -src_mean[1] / src_std], [0, 0, 1]])
    src_pts = np.hstack((src_pts, np.ones((len(src_pts), 1))))
    src_pts = src_pts @ C1.T

    dst_mean = np.mean(dst_pts, axis=0)
    dst_std = max(np.std(dst_pts, axis=0))
    C2 = np.array([[1 / dst_std, 0, -dst_mean[0] / dst_std],
                   [0, 1 / dst_std, -dst_mean[1] / dst_std], [0, 0, 1]])
    dst_pts = np.hstack((dst_pts, np.ones((len(dst_pts), 1))))
    dst_pts = dst_pts @ C2.T

    A = np.zeros((2 * len(src_pts), 9))
    for i in range(len(src_pts)):
        x1, y1, _ = src_pts[i]
        x2, y2, _ = dst_pts[i]
        A[i * 2, :] = [0, 0, 0, -x1, -y1, -1, y2 * x1, y2 * y1, y2]
        A[i * 2 + 1, :] = [x1, y1, 1, 0, 0, 0, -x2 * x1, -x2 * y1, -x2]

    local_Hs = np.zeros((grid_num, 3, 3))
    A = cp.asarray(A)
    scale_factor = 20
    src_pts = cp.asarray(src_pts)
    matchNum = src_pts.shape[0]

    u, s, v = cp.linalg.svd(A)
    H = v[-1, :].reshape((3, 3))
    H = cp.asnumpy(H)
    H = np.linalg.inv(C2) @ H @ C1
    H = H / H[-1, -1]
    H_unweighted = H[...]
    skip = 0
    unskip_grids = []
    for idx in range(grid_num):
        print(
            f'SVD {idx:8d}/{grid_num}({idx/grid_num*100:8.1f}%)  Current skip {skip} times. Current Skip rate is {skip/grid_num:5.3%}',
            end='\r')
        grid_coordi = grids_coordi[idx]

        weight = cp.exp((-1) * cp.sum(
            (src_pts_unnormalized - grid_coordi)**2, axis=1) / scale_factor**2)

        if cp.amax(weight) < 0.025:
            skip += 1
            local_Hs[idx, :, :] = H_unweighted
            continue
        unskip_grids.append(grid_coordi)
        weight = cp.repeat(weight, 2)
        weight[weight < 0.025] = 0.025
        weight = weight.reshape((2 * matchNum, 1))
        weighted_A = cp.multiply(weight, A)
        u, s, v = cp.linalg.svd(weighted_A)
        H = v[-1, :].reshape((3, 3))
        H = cp.asnumpy(H)
        H = np.linalg.inv(C2) @ H @ C1
        H = H / H[-1, -1]
        local_Hs[idx, :, :] = H
    print()
    print(f'Skip {skip} times. Skip rate is {skip/grid_num:5.3%}')

    return local_Hs, unskip_grids
Exemple #31
0
    def partial_fit(self, X, y=None, check_input=True) -> "IncrementalPCA":
        """
        Incremental fit with X. All of X is processed as a single batch.

        Parameters
        ----------

        X : array-like or sparse matrix, shape (n_samples, n_features)
            Training data, where n_samples is the number of samples and
            n_features is the number of features.
        check_input : bool
            Run check_array on X.
        y : Ignored

        Returns
        -------

        self : object
            Returns the instance itself.

        """
        if check_input:
            if scipy.sparse.issparse(X) or cupyx.scipy.sparse.issparse(X):
                raise TypeError(
                    "IncrementalPCA.partial_fit does not support "
                    "sparse input. Either convert data to dense "
                    "or use IncrementalPCA.fit to do so in batches.")

            self._set_output_type(X)

            X, n_samples, n_features, self.dtype = \
                input_to_cupy_array(X, order='K',
                                    check_dtype=[cp.float32, cp.float64])
        else:
            n_samples, n_features = X.shape

        if not hasattr(self, 'components_'):
            self.components_ = None

        if self.n_components is None:
            if self.components_ is None:
                self.n_components_ = min(n_samples, n_features)
            else:
                self.n_components_ = self.components_.shape[0]
        elif not 1 <= self.n_components <= n_features:
            raise ValueError("n_components=%r invalid for n_features=%d, need "
                             "more rows than columns for IncrementalPCA "
                             "processing" % (self.n_components, n_features))
        elif not self.n_components <= n_samples:
            raise ValueError("n_components=%r must be less or equal to "
                             "the batch number of samples "
                             "%d." % (self.n_components, n_samples))
        else:
            self.n_components_ = self.n_components

        if (self.components_ is not None) and (self.components_.shape[0] !=
                                               self.n_components_):
            raise ValueError("Number of input features has changed from %i "
                             "to %i between calls to partial_fit! Try "
                             "setting n_components to a fixed value." %
                             (self.components_.shape[0], self.n_components_))
        # This is the first partial_fit
        if not hasattr(self, 'n_samples_seen_'):
            self.n_samples_seen_ = 0
            self.mean_ = .0
            self.var_ = .0

        # Update stats - they are 0 if this is the first step
        col_mean, col_var, n_total_samples = \
            _incremental_mean_and_var(
                X, last_mean=self.mean_, last_variance=self.var_,
                last_sample_count=cp.repeat(cp.asarray([self.n_samples_seen_]),
                                            X.shape[1]))
        n_total_samples = n_total_samples[0]

        # Whitening
        if self.n_samples_seen_ == 0:
            # If it is the first step, simply whiten X
            X = X - col_mean
        else:
            col_batch_mean = cp.mean(X, axis=0)
            X = X - col_batch_mean
            # Build matrix of combined previous basis and new data
            mean_correction = \
                cp.sqrt((self.n_samples_seen_ * n_samples) /
                        n_total_samples) * (self.mean_ - col_batch_mean)
            X = cp.vstack((self.singular_values_.reshape(
                (-1, 1)) * self.components_, X, mean_correction))

        U, S, V = cp.linalg.svd(X, full_matrices=False)
        U, V = _svd_flip(U, V, u_based_decision=False)
        explained_variance = S**2 / (n_total_samples - 1)
        explained_variance_ratio = S**2 / cp.sum(col_var * n_total_samples)

        self.n_rows = n_total_samples
        self.n_samples_seen_ = n_total_samples
        self.components_ = V[:self.n_components_]
        self.singular_values_ = S[:self.n_components_]
        self.mean_ = col_mean
        self.var_ = col_var
        self.explained_variance_ = explained_variance[:self.n_components_]
        self.explained_variance_ratio_ = \
            explained_variance_ratio[:self.n_components_]
        if self.n_components_ < n_features:
            self.noise_variance_ = \
                explained_variance[self.n_components_:].mean()
        else:
            self.noise_variance_ = 0.

        return self
Exemple #32
0
 def test_repeat_failure(self):
     x = testing.shaped_arange((2, 3, 4))
     cupy.repeat(x, -3)