예제 #1
0
    def cross_validation_error(self):
        error_per_lambda = xp.zeros(len(self._lambda_vals))

        for i in range(len(self._lambda_vals)):
            lambduh = self._lambda_vals[i]
            if self._logging:
                print('lambduh = {} ({} of {})'.format(lambduh, i + 1,
                                                       num_lambda))
            error_per_fold = xp.zeros(self._n_folds)
            for j in range(self._n_folds):
                fold_size = int(self._n / self._n_folds)
                indicies = xp.array(range(0, self._n))
                fold_indicies = ((indicies >= fold_size * j) &
                                 (indicies <= fold_size * (j + 1)))
                x_train = self._x[fold_indicies == True]
                y_train = self._y[fold_indicies == True]
                x_test = self._x[fold_indicies == False]
                y_test = self._y[fold_indicies == False]
                y_train = y_train.reshape((len(y_train), 1))
                y_test = y_test.reshape((len(y_test), 1))

                y_train = xp.ravel(y_train)
                y_test = xp.ravel(y_test)

                svm = SVM(self._kernel, self._kernel_params, lambduh,
                          self._max_iter, self._classification_strategy)
                svm.fit(x_train, y_train)
                error_per_fold[j] = svm.compute_misclassification_error(
                    x_test, y_test)

            error_per_lambda[i] = xp.mean(error_per_fold)

        return error_per_lambda.tolist()
예제 #2
0
def append(arr, values, axis=None):
    # this code is basically copied from numpy/lib/function_base.py's append
    arr = array(arr)
    if axis is None:
        if ndim(arr) != 1:
            arr = _cp.ravel(arr)
        values = _cp.ravel(array(values))
        axis = ndim(arr) - 1
    return concatenate((arr, values), axis=axis)
예제 #3
0
 def getConnectionMatrix(self) -> csr_matrix:
     distances = cupy.ravel(cupy.fromDlpack(self.D.to_dlpack()))
     indices = cupy.ravel(cupy.fromDlpack(self.I.to_dlpack()))
     n_samples = indices.shape[0]
     n_nonzero = n_samples * self.nneighbors
     rowptr = cupy.arange(0, n_nonzero + 1, self.nneighbors)
     knn_graph = cupyx.scipy.sparse.csr_matrix((distances, indices, rowptr),
                                               shape=(n_samples, n_samples))
     print(f"Completed KNN, sparse graph shape = {knn_graph.shape}")
     return knn_graph
예제 #4
0
def inject_error(weight, mask0, mask1, num_bits=32):
    if num_bits == 32:
        dtype = cp.uint32
        ftype = cp.float32
    shape = weight.shape
    weight_flatten = cp.ravel(weight).view(dtype)
    mask0, mask0_bit = mask0
    mask1, mask1_bit = mask1
    zero = cp.zeros(1, dtype=dtype)

    if (mask0.__len__() is not 0) or (mask1.__len__() is not 0):
        for b in range(num_bits):
            fault = cp.full(weight_flatten.size, 2**b, dtype=dtype)
            bit_loc0 = cp.where(mask0_bit == b, mask0, zero).nonzero()[0]
            bit_loc1 = cp.where(mask1_bit == b, mask1, zero).nonzero()[0]
            uniform0 = cp.zeros(weight_flatten.size, dtype=dtype)
            uniform1 = cp.zeros(weight_flatten.size, dtype=dtype)
            # Inject bit error
            if bit_loc0.__len__() > 0:
                cp.put(uniform0, mask0[bit_loc0], fault)
                cp.put(uniform1, mask1[bit_loc1], fault)
                # Stuck at 0
                not_mask0 = cp.invert(uniform0)
                weight_flatten = cp.bitwise_and(weight_flatten, not_mask0)
                # Stuck at 1
                weight_flatten = cp.bitwise_or(weight_flatten, uniform1)
        weight_float = weight_flatten.view(ftype)
        return cp.reshape(weight_float, shape)
    else:
        return weight
예제 #5
0
def _build_laplacian(data, spacing, mask, beta, multichannel):
    l_x, l_y, l_z = data.shape[:3]
    edges = _make_graph_edges_3d(l_x, l_y, l_z)
    weights = _compute_weights_3d(data, spacing, beta=beta, eps=1.e-10,
                                  multichannel=multichannel)
    assert weights.dtype == data.dtype
    if mask is not None:
        # Remove edges of the graph connected to masked nodes, as well
        # as corresponding weights of the edges.
        mask0 = cp.concatenate([mask[..., :-1].ravel(), mask[:, :-1].ravel(),
                                mask[:-1].ravel()])
        mask1 = cp.concatenate([mask[..., 1:].ravel(), mask[:, 1:].ravel(),
                                mask[1:].ravel()])
        ind_mask = cp.logical_and(mask0, mask1)
        edges, weights = edges[:, ind_mask], weights[ind_mask]

        # Reassign edges labels to 0, 1, ... edges_number - 1
        _, inv_idx = cp.unique(edges, return_inverse=True)
        edges = inv_idx.reshape(edges.shape)

    # Build the sparse linear system
    pixel_nb = l_x * l_y * l_z
    i_indices = edges.ravel()
    j_indices = edges[::-1].ravel()
    data = cp.concatenate((weights, weights))
    lap = sparse.coo_matrix((data, (i_indices, j_indices)),
                            shape=(pixel_nb, pixel_nb))
    # need CSR instead of COO for indexing used later in _build_linear_system
    lap = lap.tocsr()
    lap.setdiag(-cp.ravel(lap.sum(axis=0)))
    return lap
예제 #6
0
def setdiff1d(ar1, ar2, assume_unique=False):
    """Find the set difference of two arrays. It returns unique
    values in `ar1` that are not in `ar2`.

    Parameters
    ----------
    ar1 : cupy.ndarray
        Input array
    ar2 : cupy.ndarray
        Input array for comparision
    assume_unique : bool
        By default, False, i.e. input arrays are not unique.
        If True, input arrays are assumed to be unique. This can
        speed up the calculation.

    Returns
    -------
    setdiff1d : cupy.ndarray
        Returns a 1D array of values in `ar1` that are not in `ar2`.
        It always returns a sorted output for unsorted input only
        if `assume_unique=False`.

    See Also
    --------
    numpy.setdiff1d

    """
    if assume_unique:
        ar1 = cupy.ravel(ar1)
    else:
        ar1 = cupy.unique(ar1)
        ar2 = cupy.unique(ar2)
    return ar1[in1d(ar1, ar2, assume_unique=True, invert=True)]
예제 #7
0
 def forward(self, a_prev: cp.array, training=True) -> cp.array:
     """
     :param a_prev - ND tensor with shape (n, ..., channels)
     :output - 1D tensor with shape (n, 1)
     ------------------------------------------------------------------------
     n - number of examples in batch
     """
     self._shape = a_prev.shape
     return cp.ravel(a_prev).reshape(a_prev.shape[0], -1)
예제 #8
0
def ECP(error_weight, orig_weight, set_map):
    orig_shape = error_weight.shape
    error_weight, orig_weight = cp.ravel(error_weight.view(
        cp.uint32)), cp.ravel(orig_weight.view(cp.uint32))
    shape = (int(error_weight.__len__() / 16), 16)
    # Reshape 64 bit in one row
    error_weight, orig_weight = cp.reshape(error_weight, shape), cp.reshape(
        orig_weight, shape)
    # Calculate stuck bits
    stuck_bits = cp.bitwise_xor(error_weight, orig_weight)
    stuck_bits_sum = cp.sum(stuck_bits, axis=1)
    error = cp.concatenate(cp.in1d(stuck_bits_sum, set_map).nonzero())

    if error.__len__() == 0:
        return cp.reshape(error_weight, orig_shape).view(cp.float32)
    else:
        error_weight[error, :] = orig_weight[error, :]
        return cp.reshape(error_weight, orig_shape).view(cp.float32)
예제 #9
0
def wiener(im, mysize=None, noise=None):
    """
    Perform a Wiener filter on an N-dimensional array.

    Apply a Wiener filter to the N-dimensional array `im`.

    Parameters
    ----------
    im : ndarray
        An N-dimensional array.
    mysize : int or array_like, optional
        A scalar or an N-length list giving the size of the Wiener filter
        window in each dimension.  Elements of mysize should be odd.
        If mysize is a scalar, then this scalar is used as the size
        in each dimension.
    noise : float, optional
        The noise-power to use. If None, then noise is estimated as the
        average of the local variance of the input.

    Returns
    -------
    out : ndarray
        Wiener filtered result with the same shape as `im`.

    """
    im = asarray(im)
    if mysize is None:
        mysize = [3] * im.ndim
    mysize = np.asarray(mysize)
    if mysize.shape == ():
        mysize = cp.repeat(mysize.item(), im.ndim)
        mysize = np.asarray(mysize)

    # Estimate the local mean
    lMean = correlate(im, ones(mysize), "same") / prod(mysize, axis=0)

    # Estimate the local variance
    lVar = (
        correlate(im ** 2, ones(mysize), "same") / prod(mysize, axis=0)
        - lMean ** 2
    )

    # Estimate the noise power if needed.
    if noise is None:
        noise = mean(ravel(lVar), axis=0)

    res = im - lMean
    res *= 1 - noise / lVar
    res += lMean
    out = where(lVar < noise, lMean, res)

    return out
예제 #10
0
파일: gpu_ssb.py 프로젝트: dxm447/ptychogpu
def ssb_kernel(processed4D, real_calibration, aperture, voltage, chunks=12):
    data_size = np.asarray(processed4D.shape)
    processed4D = np.reshape(
        processed4D, (data_size[0], data_size[1], data_size[2] * data_size[3]))
    wavelength = wavelength_pm(voltage)
    cutoff = aperture / (1000 * wavelength)
    four_y = cp.fft.fftshift(cp.fft.fftfreq(data_size[0], real_calibration))
    four_x = cp.fft.fftshift(cp.fft.fftfreq(data_size[1], real_calibration))
    Four_X, Four_Y = cp.meshgrid(four_x, four_y)
    FourXY = cp.sqrt((Four_Y**2) + (Four_X**2))
    yy, xx = cp.mgrid[0:data_size[0], 0:data_size[1]]
    rsize = cp.zeros((np.size(yy), 2), dtype=int)
    rsize[:, 0] = cp.ravel(yy)
    rsize[:, 1] = cp.ravel(xx)

    left_imGPU, rightimGPU = lobe_calc(processed4D, Four_Y, Four_X, FourXY,
                                       rsize, cutoff, chunks)
    left_image = cp.asnumpy(cp.fft.ifft2(left_imGPU))
    rightimage = cp.asnumpy(cp.fft.ifft2(rightimGPU))

    del four_y, four_x, Four_X, Four_Y, FourXY, yy, xx, rsize, left_imGPU, rightimGPU

    return left_image, rightimage
예제 #11
0
def wiener(im, mysize=None, noise=None):
    """
    Perform a Wiener filter on an N-dimensional array.

    Apply a Wiener filter to the N-dimensional array `im`.

    Parameters
    ----------
    im : ndarray
        An N-dimensional array.
    mysize : int or array_like, optional
        A scalar or an N-length list giving the size of the Wiener filter
        window in each dimension.  Elements of mysize should be odd.
        If mysize is a scalar, then this scalar is used as the size
        in each dimension.
    noise : float, optional
        The noise-power to use. If None, then noise is estimated as the
        average of the local variance of the input.

    Returns
    -------
    out : ndarray
        Wiener filtered result with the same shape as `im`.

    """
    im = cp.asarray(im)
    if mysize is None:
        mysize = [3] * im.ndim
    mysize = np.asarray(mysize)
    if mysize.shape == ():
        mysize = cp.repeat(mysize.item(), im.ndim)
        mysize = np.asarray(mysize)

    lprod = cp.prod(mysize, axis=0)
    lMean = correlate(im, cp.ones(mysize), "same")
    lVar = correlate(im**2, cp.ones(mysize), "same")

    lMean, lVar = _wiener_prep_kernel(lMean, lVar, lprod)

    # Estimate the noise power if needed.
    if noise is None:
        noise = cp.mean(cp.ravel(lVar), axis=0)

    return _wiener_post_kernel(im, lMean, lVar, noise)
def go_cupy(signal, gpuR, gpuW):
    """ Run demodulation on the GPU
    First store the reference and window data on the GPU using the init_gpu() function. The object returned are 
    required for this function.

    Returns:
    - A (M, N) numpy array (np.float64) buffer for the average of the convolution result along the second dimension of the signal data. This can be considered as the demodulation result for each demodulation channel.  """
    N, k = signal.shape
    M = gpuR.shape[0]

    gpuS = cp.asarray(signal)

    results = np.zeros((M, N))
    for i in range(M):
        buffer = cp.multiply(gpuS, gpuR[i,:])
        buffer = cp.ravel(buffer)
        buffer = cp.convolve(buffer, gpuW, mode='same')
        buffer = cp.reshape(buffer, signal.shape)
        buffer = cp.mean(buffer, axis=1)
        results[i,:] = cp.asnumpy(buffer)

    return results
예제 #13
0
# However, it's a good opportunity to get familiar with the API
source_df: cudf.DataFrame = cudf.read_csv(
    '/att/nobackup/tpmaxwel/data/fashion-mnist-csv/fashion_train.csv')
data = source_df.loc[:, source_df.columns[:-1]]
target = source_df[source_df.columns[-1]]
n_neighbors = 5

# fit model
model = NearestNeighbors(n_neighbors=5)
model.fit(data)

# get nearest neighbors
dist_mlarr, ind_mlarr = model.kneighbors(data, return_distance=True)

# create sparse matrix
distances = cupy.ravel(cupy.fromDlpack(dist_mlarr.to_dlpack()))
indices = cupy.ravel(cupy.fromDlpack(ind_mlarr.to_dlpack()))
print(
    f"Computed KNN graph, distances shape = {distances.shape}, indices shape = {indices.shape}, distances[0:5]= {distances[0:5]}, indices[0:5]= {indices[0:5]}"
)
n_samples = indices.shape[0]
n_nonzero = n_samples * n_neighbors
rowptr = cupy.arange(0, n_nonzero + 1, n_neighbors)
knn_graph = cupyx.scipy.sparse.csr_matrix((distances, indices, rowptr),
                                          shape=(n_samples, n_samples))

print(f"Completed KNN, graph shape = {knn_graph.shape}")

reducer = cuml.UMAP(n_neighbors=15,
                    n_components=3,
                    n_epochs=500,
예제 #14
0
def weighted_mode(a, w, *, axis=0):
    """Returns an array of the weighted modal (most common) value in a

    If there is more than one such value, only the first is returned.
    The bin-count for the modal bins is also returned.

    This is an extension of the algorithm in scipy.stats.mode.

    Parameters
    ----------
    a : array_like
        n-dimensional array of which to find mode(s).
    w : array_like
        n-dimensional array of weights for each value
    axis : int, optional
        Axis along which to operate. Default is 0, i.e. the first axis.

    Returns
    -------
    vals : ndarray
        Array of modal values.
    score : ndarray
        Array of weighted counts for each mode.

    Examples
    --------
    >>> from sklearn.utils.extmath import weighted_mode
    >>> x = [4, 1, 4, 2, 4, 2]
    >>> weights = [1, 1, 1, 1, 1, 1]
    >>> weighted_mode(x, weights)
    (array([4.]), array([3.]))

    The value 4 appears three times: with uniform weights, the result is
    simply the mode of the distribution.

    >>> weights = [1, 3, 0.5, 1.5, 1, 2]  # deweight the 4's
    >>> weighted_mode(x, weights)
    (array([2.]), array([3.5]))

    The value 2 has the highest score: it appears twice with weights of
    1.5 and 2: the sum of these is 3.5.

    See Also
    --------
    scipy.stats.mode
    """
    if axis is None:
        a = np.ravel(a)
        w = np.ravel(w)
        axis = 0
    else:
        a = np.asarray(a)
        w = np.asarray(w)

    if a.shape != w.shape:
        w = np.full(a.shape, w, dtype=w.dtype)

    scores = np.unique(np.ravel(a))  # get ALL unique values
    testshape = list(a.shape)
    testshape[axis] = 1
    oldmostfreq = np.zeros(testshape)
    oldcounts = np.zeros(testshape)
    for score in scores:
        template = np.zeros(a.shape)
        ind = (a == score)
        template[ind] = w[ind]
        counts = np.expand_dims(np.sum(template, axis), axis)
        mostfrequent = np.where(counts > oldcounts, score, oldmostfreq)
        oldcounts = np.maximum(counts, oldcounts)
        oldmostfreq = mostfrequent
    return mostfrequent, oldcounts
예제 #15
0
 def _inner_prod(self, x, y):
     return cp.real(cp.dot(cp.conj(cp.ravel(x)), cp.ravel(y)))
예제 #16
0
 def _inner_prod(self, x, y):
     return cp.dot(cp.ravel(x), cp.ravel(y))
예제 #17
0
def backward_pool(dA, A_previous, stride, f, mode = "max"):
    '''
    A backward pool step

    Parameters
    ----------
    dA : cp.array(examples, 1 + (height - f) / stride, 1 + (width - f) / stride, depth)
        Cost derivative from l+1 layer.
    A_previous : cp.array(examples, height, width, depth)
        Output image from the l-1 layer.
    stride : int
        Stride parameter.
    f : int
        Square filter dimension.
    mode : string, optional
        Filter type. The default is "max".

    Returns
    -------
    dA_prev : cp.array(examples, height, width, depth)
        Cost derivative from the current layer.

    '''
    
    m, n_H_prev, n_W_prev, n_C_prev = A_previous.shape
    m, n_H, n_W, n_C = dA.shape
    
    dA = cp.ravel(cp.transpose(dA, (1, 2, 0, 3)))
    dA_prev = cp.zeros((f**2, dA.size))
    A_previous = cp.reshape(cp.transpose(A_previous, (0, 3, 1, 2)), (m*n_C_prev, 1, n_H_prev, n_W_prev))
    A_prev = Utils.image_to_column(A_previous, (f, f), stride)
    
    if mode == "max":
        mask = cp.argmax(A_prev, axis=0).flatten()
        dA_prev[mask, cp.linspace(0, dA.size-1, dA.size, dtype=int)] = dA
    elif mode == "mean":
        dA_prev[:,  cp.linspace(0, dA.size-1, dA.size, dtype=int)] = 1. / dA_prev.shape[0] * dA
        
    dA_prev = cp.reshape(dA_prev, (n_H_prev, n_W_prev, m, n_C_prev))
    dA_prev = cp.transpose(dA_prev, (2, 0, 1, 3))
    
    '''
    Intuitive way (Really not optimized)
    for i in range(m):               
        a_prev = A_previous[i, :, :, :]
        
        for h in range(n_H):
            vert_start = h*stride
            vert_end = h*stride + f                 
            for w in range(n_W):
                horiz_start = w*stride
                horiz_end = w*stride + f
                for c in range(n_C):           
                    
                    if mode == "max":
                        a_prev_slice = a_prev[vert_start:vert_end, horiz_start:horiz_end, c]
                        mask = (a_prev_slice == cp.max(a_prev_slice))
                        dA_prev[i, vert_start: vert_end, horiz_start: horiz_end, c] += mask*dA[i,h,w,c]
                        
                    elif mode == "mean":

                        dA_prev[i, vert_start: vert_end, horiz_start: horiz_end, c] += dA[i,h,w,c] * cp.ones((f, f))/f**2
    '''
    return dA_prev
예제 #18
0
 def set(self, vect, name, value):
     """Takes in a vector and returns the subset indexed by name."""
     idxs, _ = self.idxs_and_shapes[name]
     vect[idxs] = np.ravel(value)
예제 #19
0
    def forward(self, prev_arr):
        # Conv, pooling 이 끝난 데이터(n차원)을 입력받아서 1차원으로 변환한다.
        self.prev_shape = prev_arr.shape

        return np.ravel(prev_arr).reshape(prev_arr.shape[0], -1)
예제 #20
0
        if (types=='all'):
            histogram.append(list(spacinginfo[0]))
            allspacings=spacinginfo[1]

        elif (types=='median'):
            spacinghistograms=spacinginfo
            
        else:
            #Creating histogram data
            spacinghistograms=spacinginfo
            x = range(len(spacinginfo))
            x_coord = cp.repeat(x, len(spacinginfo[0]))
            cp.cuda.Stream.null.synchronize()
            spacing_array = cp.array(spacinginfo)
            cp.cuda.Stream.null.synchronize()
            allhists = cp.ravel(spacing_array)
            cp.cuda.Stream.null.synchronize()

            #Saving data to excel spreadsheet for batching
            workbook = xlsxwriter.Workbook('Frech_Christian_cupyprobdensitydata.xlsx')
            worksheet = workbook.add_worksheet()

            bold = workbook.add_format({'bold'=True})
            col = findIndex(upperlimit,upperlimit_list)
            
            worksheet.write(0, col, upperlimit, bold)
            for row in range(len(stdevvalues)):
                worksheet.write((row+1), col, allhists[row])

            # Create a figure for plotting the data as a 3D histogram.
            fig, ax = plt.subplots(1,1)